response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Raises a `~astropy.io.votable.exceptions.VOTableSpecError` if
*string* is not a string or Unicode string.
Parameters
----------
string : str
An astronomical year string
attr_name : str
The name of the field this year was found in (used for error
message)
config, pos : optional
Information about the source of the value | def check_string(string, attr_name, config=None, pos=None):
"""
Raises a `~astropy.io.votable.exceptions.VOTableSpecError` if
*string* is not a string or Unicode string.
Parameters
----------
string : str
An astronomical year string
attr_name : str
The name of the field this year was found in (used for error
message)
config, pos : optional
Information about the source of the value
"""
if string is not None and not isinstance(string, str):
warn_or_raise(W08, W08, attr_name, config, pos)
return False
return True |
Warns or raises a
`~astropy.io.votable.exceptions.VOTableSpecError` if *ucd* is not
a valid `unified content descriptor`_ string as defined by the
VOTABLE standard.
Parameters
----------
ucd : str
A UCD string.
config, pos : optional
Information about the source of the value | def check_ucd(ucd, config=None, pos=None):
"""
Warns or raises a
`~astropy.io.votable.exceptions.VOTableSpecError` if *ucd* is not
a valid `unified content descriptor`_ string as defined by the
VOTABLE standard.
Parameters
----------
ucd : str
A UCD string.
config, pos : optional
Information about the source of the value
"""
if config is None:
config = {}
if config.get("version_1_1_or_later"):
try:
ucd_mod.parse_ucd(
ucd,
check_controlled_vocabulary=config.get("version_1_2_or_later", False),
has_colon=config.get("version_1_2_or_later", False),
)
except ValueError as e:
# This weird construction is for Python 3 compatibility
if config.get("verify", "ignore") == "exception":
vo_raise(W06, (ucd, str(e)), config, pos)
elif config.get("verify", "ignore") == "warn":
vo_warn(W06, (ucd, str(e)), config, pos)
return False
else:
return False
return True |
Parse the UCD into its component parts.
Parameters
----------
ucd : str
The UCD string
check_controlled_vocabulary : bool, optional
If `True`, then each word in the UCD will be verified against
the UCD1+ controlled vocabulary, (as required by the VOTable
specification version 1.2), otherwise not.
has_colon : bool, optional
If `True`, the UCD may contain a colon (as defined in earlier
versions of the standard).
Returns
-------
parts : list
The result is a list of tuples of the form:
(*namespace*, *word*)
If no namespace was explicitly specified, *namespace* will be
returned as ``'ivoa'`` (i.e., the default namespace).
Raises
------
ValueError
if *ucd* is invalid | def parse_ucd(ucd, check_controlled_vocabulary=False, has_colon=False):
"""
Parse the UCD into its component parts.
Parameters
----------
ucd : str
The UCD string
check_controlled_vocabulary : bool, optional
If `True`, then each word in the UCD will be verified against
the UCD1+ controlled vocabulary, (as required by the VOTable
specification version 1.2), otherwise not.
has_colon : bool, optional
If `True`, the UCD may contain a colon (as defined in earlier
versions of the standard).
Returns
-------
parts : list
The result is a list of tuples of the form:
(*namespace*, *word*)
If no namespace was explicitly specified, *namespace* will be
returned as ``'ivoa'`` (i.e., the default namespace).
Raises
------
ValueError
if *ucd* is invalid
"""
global _ucd_singleton
if _ucd_singleton is None:
_ucd_singleton = UCDWords()
if has_colon:
m = re.search(r"[^A-Za-z0-9_.:;\-]", ucd)
else:
m = re.search(r"[^A-Za-z0-9_.;\-]", ucd)
if m is not None:
raise ValueError(f"UCD has invalid character '{m.group(0)}' in '{ucd}'")
word_component_re = r"[A-Za-z0-9][A-Za-z0-9\-_]*"
word_re = rf"{word_component_re}(\.{word_component_re})*"
parts = ucd.split(";")
words = []
for i, word in enumerate(parts):
colon_count = word.count(":")
if colon_count == 1:
ns, word = word.split(":", 1)
if not re.match(word_component_re, ns):
raise ValueError(f"Invalid namespace '{ns}'")
ns = ns.lower()
elif colon_count > 1:
raise ValueError(f"Too many colons in '{word}'")
else:
ns = "ivoa"
if not re.match(word_re, word):
raise ValueError(f"Invalid word '{word}'")
if ns == "ivoa" and check_controlled_vocabulary:
if i == 0:
if not _ucd_singleton.is_primary(word):
if _ucd_singleton.is_secondary(word):
raise ValueError(
f"Secondary word '{word}' is not valid as a primary word"
)
else:
raise ValueError(f"Unknown word '{word}'")
else:
if not _ucd_singleton.is_secondary(word):
if _ucd_singleton.is_primary(word):
raise ValueError(
f"Primary word '{word}' is not valid as a secondary word"
)
else:
raise ValueError(f"Unknown word '{word}'")
try:
normalized_word = _ucd_singleton.normalize_capitalization(word)
except KeyError:
normalized_word = word
words.append((ns, normalized_word))
return words |
Returns False if *ucd* is not a valid `unified content descriptor`_.
Parameters
----------
ucd : str
The UCD string
check_controlled_vocabulary : bool, optional
If `True`, then each word in the UCD will be verified against
the UCD1+ controlled vocabulary, (as required by the VOTable
specification version 1.2), otherwise not.
has_colon : bool, optional
If `True`, the UCD may contain a colon (as defined in earlier
versions of the standard).
Returns
-------
valid : bool | def check_ucd(ucd, check_controlled_vocabulary=False, has_colon=False):
"""
Returns False if *ucd* is not a valid `unified content descriptor`_.
Parameters
----------
ucd : str
The UCD string
check_controlled_vocabulary : bool, optional
If `True`, then each word in the UCD will be verified against
the UCD1+ controlled vocabulary, (as required by the VOTable
specification version 1.2), otherwise not.
has_colon : bool, optional
If `True`, the UCD may contain a colon (as defined in earlier
versions of the standard).
Returns
-------
valid : bool
"""
if ucd is None:
return True
try:
parse_ucd(
ucd,
check_controlled_vocabulary=check_controlled_vocabulary,
has_colon=has_colon,
)
except ValueError:
return False
return True |
Returns a writable file-like object suitable for streaming output.
Parameters
----------
fd : str or file-like
May be:
- a file path string, in which case it is opened, and the file
object is returned.
- an object with a :meth:``write`` method, in which case that
object is returned.
compressed : bool, optional
If `True`, create a gzip-compressed file. (Default is `False`).
Returns
-------
fd : :term:`file-like (writeable)` | def convert_to_writable_filelike(fd, compressed=False):
"""
Returns a writable file-like object suitable for streaming output.
Parameters
----------
fd : str or file-like
May be:
- a file path string, in which case it is opened, and the file
object is returned.
- an object with a :meth:``write`` method, in which case that
object is returned.
compressed : bool, optional
If `True`, create a gzip-compressed file. (Default is `False`).
Returns
-------
fd : :term:`file-like (writeable)`
"""
if isinstance(fd, str):
fd = os.path.expanduser(fd)
if fd.endswith(".gz") or compressed:
with gzip.GzipFile(filename=fd, mode="wb") as real_fd:
encoded_fd = io.TextIOWrapper(real_fd, encoding="utf8")
yield encoded_fd
encoded_fd.flush()
real_fd.flush()
return
else:
with open(fd, "w", encoding="utf8") as real_fd:
yield real_fd
return
elif hasattr(fd, "write"):
assert callable(fd.write)
if compressed:
fd = gzip.GzipFile(fileobj=fd, mode="wb")
# If we can't write Unicode strings, use a codecs.StreamWriter
# object
needs_wrapper = False
try:
fd.write("")
except TypeError:
needs_wrapper = True
if not hasattr(fd, "encoding") or fd.encoding is None:
needs_wrapper = True
if needs_wrapper:
yield codecs.getwriter("utf-8")(fd)
else:
yield fd
fd.flush()
if isinstance(fd, gzip.GzipFile):
fd.close()
return
else:
raise TypeError("Can not be coerced to writable file-like object") |
Coerces and/or verifies the object *p* into a valid range-list-format parameter.
As defined in `Section 8.7.2 of Simple
Spectral Access Protocol
<http://www.ivoa.net/documents/REC/DAL/SSA-20080201.html>`_.
Parameters
----------
p : str or sequence
May be a string as passed verbatim to the service expecting a
range-list, or a sequence. If a sequence, each item must be
either:
- a numeric value
- a named value, such as, for example, 'J' for named
spectrum (if the *numeric* kwarg is False)
- a 2-tuple indicating a range
- the last item my be a string indicating the frame of
reference
frames : sequence of str, optional
A sequence of acceptable frame of reference keywords. If not
provided, the default set in ``set_reference_frames`` will be
used.
numeric : bool, optional
TODO
Returns
-------
parts : tuple
The result is a tuple:
- a string suitable for passing to a service as a range-list
argument
- an integer counting the number of elements | def coerce_range_list_param(p, frames=None, numeric=True):
"""
Coerces and/or verifies the object *p* into a valid range-list-format parameter.
As defined in `Section 8.7.2 of Simple
Spectral Access Protocol
<http://www.ivoa.net/documents/REC/DAL/SSA-20080201.html>`_.
Parameters
----------
p : str or sequence
May be a string as passed verbatim to the service expecting a
range-list, or a sequence. If a sequence, each item must be
either:
- a numeric value
- a named value, such as, for example, 'J' for named
spectrum (if the *numeric* kwarg is False)
- a 2-tuple indicating a range
- the last item my be a string indicating the frame of
reference
frames : sequence of str, optional
A sequence of acceptable frame of reference keywords. If not
provided, the default set in ``set_reference_frames`` will be
used.
numeric : bool, optional
TODO
Returns
-------
parts : tuple
The result is a tuple:
- a string suitable for passing to a service as a range-list
argument
- an integer counting the number of elements
"""
def str_or_none(x):
if x is None:
return ""
if numeric:
x = float(x)
return str(x)
def numeric_or_range(x):
if isinstance(x, tuple) and len(x) == 2:
return f"{str_or_none(x[0])}/{str_or_none(x[1])}"
else:
return str_or_none(x)
def is_frame_of_reference(x):
return isinstance(x, str)
if p is None:
return None, 0
elif isinstance(p, (tuple, list)):
has_frame_of_reference = len(p) > 1 and is_frame_of_reference(p[-1])
if has_frame_of_reference:
points = p[:-1]
else:
points = p[:]
out = ",".join([numeric_or_range(x) for x in points])
length = len(points)
if has_frame_of_reference:
if frames is not None and p[-1] not in frames:
raise ValueError(f"'{p[-1]}' is not a valid frame of reference")
out += ";" + p[-1]
length += 1
return out, length
elif isinstance(p, str):
number = r"([-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?)?"
if not numeric:
number = r"(" + number + ")|([A-Z_]+)"
match = re.match(
"^" + number + r"([,/]" + number + r")+(;(?P<frame>[<A-Za-z_0-9]+))?$", p
)
if match is None:
raise ValueError(f"'{p}' is not a valid range list")
frame = match.groupdict()["frame"]
if frames is not None and frame is not None and frame not in frames:
raise ValueError(f"{frame!r} is not a valid frame of reference")
return p, p.count(",") + p.count(";") + 1
try:
float(p)
return str(p), 1
except TypeError:
raise ValueError(f"'{p}' is not a valid range list") |
Compare two VOTable version identifiers. | def version_compare(a, b):
"""
Compare two VOTable version identifiers.
"""
def version_to_tuple(v):
if v[0].lower() == "v":
v = v[1:]
return Version(v)
av = version_to_tuple(a)
bv = version_to_tuple(b)
# Can't use cmp because it was removed from Python 3.x
return (av > bv) - (av < bv) |
Raises a `~astropy.io.votable.exceptions.VOTableSpecError` if *ID*
is not a valid XML ID_.
*name* is the name of the attribute being checked (used only for
error messages). | def check_id(ID, name="ID", config=None, pos=None):
"""
Raises a `~astropy.io.votable.exceptions.VOTableSpecError` if *ID*
is not a valid XML ID_.
*name* is the name of the attribute being checked (used only for
error messages).
"""
if ID is not None and not xml_check.check_id(ID):
warn_or_raise(W02, W02, (name, ID), config, pos)
return False
return True |
Given an arbitrary string, create one that can be used as an xml id.
This is rather simplistic at the moment, since it just replaces
non-valid characters with underscores. | def fix_id(ID, config=None, pos=None):
"""
Given an arbitrary string, create one that can be used as an xml id.
This is rather simplistic at the moment, since it just replaces
non-valid characters with underscores.
"""
if ID is None:
return None
corrected = xml_check.fix_id(ID)
if corrected != ID:
vo_warn(W03, (ID, corrected), config, pos)
return corrected |
Raises a `ValueError` if *token* is not a valid XML token.
As defined by XML Schema Part 2. | def check_token(token, attr_name, config=None, pos=None):
"""
Raises a `ValueError` if *token* is not a valid XML token.
As defined by XML Schema Part 2.
"""
if token is not None and not xml_check.check_token(token):
return False
return True |
Raises a `~astropy.io.votable.exceptions.VOTableSpecError` if
*content_type* is not a valid MIME content type.
As defined by RFC 2045 (syntactically, at least). | def check_mime_content_type(content_type, config=None, pos=None):
"""
Raises a `~astropy.io.votable.exceptions.VOTableSpecError` if
*content_type* is not a valid MIME content type.
As defined by RFC 2045 (syntactically, at least).
"""
if content_type is not None and not xml_check.check_mime_content_type(content_type):
warn_or_raise(W04, W04, content_type, config, pos)
return False
return True |
Raises a `~astropy.io.votable.exceptions.VOTableSpecError` if
*uri* is not a valid URI.
As defined in RFC 2396. | def check_anyuri(uri, config=None, pos=None):
"""
Raises a `~astropy.io.votable.exceptions.VOTableSpecError` if
*uri* is not a valid URI.
As defined in RFC 2396.
"""
if uri is not None and not xml_check.check_anyuri(uri):
warn_or_raise(W05, W05, uri, config, pos)
return False
return True |
Validates the given file against the appropriate VOTable schema.
Parameters
----------
filename : str
The path to the XML file to validate
version : str, optional
The VOTABLE version to check, which must be a string "1.0",
"1.1", "1.2" or "1.3". If it is not one of these,
version "1.1" is assumed.
For version "1.0", it is checked against a DTD, since that
version did not have an XML Schema.
Returns
-------
returncode, stdout, stderr : int, str, str
Returns the returncode from xmllint and the stdout and stderr
as strings | def validate_schema(filename, version="1.1"):
"""
Validates the given file against the appropriate VOTable schema.
Parameters
----------
filename : str
The path to the XML file to validate
version : str, optional
The VOTABLE version to check, which must be a string \"1.0\",
\"1.1\", \"1.2\" or \"1.3\". If it is not one of these,
version \"1.1\" is assumed.
For version \"1.0\", it is checked against a DTD, since that
version did not have an XML Schema.
Returns
-------
returncode, stdout, stderr : int, str, str
Returns the returncode from xmllint and the stdout and stderr
as strings
"""
if version not in ("1.0", "1.1", "1.2", "1.3"):
log.info(f"{filename} has version {version}, using schema 1.1")
version = "1.1"
if version in ("1.1", "1.2", "1.3"):
schema_path = data.get_pkg_data_filename(f"data/VOTable.v{version}.xsd")
else:
schema_path = data.get_pkg_data_filename("data/VOTable.dtd")
return validate.validate_schema(filename, schema_path) |
see Pull Request 4782 or Issue 4781 for details. | def test_gemini_v1_2():
"""
see Pull Request 4782 or Issue 4781 for details.
"""
table = parse_single_table(get_pkg_data_filename("data/gemini.xml"))
assert table is not None
tt = table.to_table()
assert (
tt["access_url"][0]
== "http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/data/pub/GEMINI/"
"S20120515S0064?runid=bx9b1o8cvk1qesrt"
) |
Pytest fixture to run a test case with tilde-prefixed paths.
In the tilde-path case, environment variables are temporarily
modified so that '~' resolves to the data directory. | def home_is_data(monkeypatch):
"""
Pytest fixture to run a test case with tilde-prefixed paths.
In the tilde-path case, environment variables are temporarily
modified so that '~' resolves to the data directory.
"""
path = get_pkg_data_path("data")
# For Unix
monkeypatch.setenv("HOME", path)
# For Windows
monkeypatch.setenv("USERPROFILE", path) |
Pytest fixture to run a test case with tilde-prefixed paths.
In the tilde-path case, environment variables are temporarily
modified so that '~' resolves to the temp directory. | def home_is_tmpdir(monkeypatch, tmp_path):
"""
Pytest fixture to run a test case with tilde-prefixed paths.
In the tilde-path case, environment variables are temporarily
modified so that '~' resolves to the temp directory.
"""
# For Unix
monkeypatch.setenv("HOME", str(tmp_path))
# For Windows
monkeypatch.setenv("USERPROFILE", str(tmp_path)) |
Issue #927. | def test_table_read_with_unnamed_tables():
"""
Issue #927.
"""
with get_pkg_data_fileobj("data/names.xml", encoding="binary") as fd:
t = Table.read(fd, format="votable")
assert len(t) == 1 |
Testing when votable is passed as pathlib.Path object #4412. | def test_votable_path_object():
"""
Testing when votable is passed as pathlib.Path object #4412.
"""
fpath = pathlib.Path(get_pkg_data_filename("data/names.xml"))
table = parse(fpath).get_first_table().to_table()
assert len(table) == 1
assert int(table[0][3]) == 266 |
Test to write and read VOTable with Parquet serialization | def test_read_write_votable_parquet(tmp_path, overwrite):
"""
Test to write and read VOTable with Parquet serialization
"""
# Create some fake data
number_of_objects = 10
ids = [f"COSMOS_{ii:03g}" for ii in range(number_of_objects)]
redshift = np.random.uniform(low=0, high=3, size=number_of_objects)
mass = np.random.uniform(low=1e8, high=1e10, size=number_of_objects)
sfr = np.random.uniform(low=1, high=100, size=number_of_objects)
astropytab = Table([ids, redshift, mass, sfr], names=["id", "z", "mass", "sfr"])
# Create Column metadata
column_metadata = {
"id": {"unit": "", "ucd": "meta.id", "utype": "none"},
"z": {"unit": "", "ucd": "src.redshift", "utype": "none"},
"mass": {"unit": "solMass", "ucd": "phys.mass", "utype": "none"},
"sfr": {"unit": "solMass / yr", "ucd": "phys.SFR", "utype": "none"},
}
# Write VOTable with Parquet serialization
filename = tmp_path / "test_votable_parquet.vot"
astropytab.write(
filename,
column_metadata=column_metadata,
overwrite=overwrite,
format="votable.parquet",
)
# Check both files are written out
assert set(os.listdir(tmp_path)) == {
"test_votable_parquet.vot",
"test_votable_parquet.vot.parquet",
}
# Open created VOTable with Parquet serialization
with warnings.catch_warnings():
warnings.simplefilter("always", ResourceWarning)
votable = parse(filename)
# Get table out
votable_table = votable.resources[0].tables[0].array
# compare tables
assert (astropytab == votable_table).all()
# Compare metadata
# Note: VOTable parses empty units ("") as "---". This is
# taken into account below by .replace("---","").
saved_bool = []
for kk, key in enumerate(column_metadata.keys()):
for tag in column_metadata[key].keys():
saved_bool.append(
column_metadata[key][tag]
== str(
eval(f"votable.resources[0].tables[0].fields[{kk}].{tag}")
).replace("---", "")
)
assert np.asarray(saved_bool).all() |
Issue #8995. | def test_binary2_masked_strings():
"""
Issue #8995.
"""
# Read a VOTable which sets the null mask bit for each empty string value.
votable = parse(get_pkg_data_filename("data/binary2_masked_strings.xml"))
table = votable.get_first_table()
astropy_table = table.to_table()
# Ensure string columns have no masked values and can be written out
assert not np.any(table.array.mask["epoch_photometry_url"])
output = io.BytesIO()
astropy_table.write(output, format="votable") |
Issue #12603. Test that we get the correct output from votable.validate with an invalid
votable. | def test_validate_output_invalid():
"""
Issue #12603. Test that we get the correct output from votable.validate with an invalid
votable.
"""
# A votable with errors
invalid_votable_filepath = get_pkg_data_filename("data/regression.xml")
# When output is None, check that validate returns validation output as a string
validate_out = validate(invalid_votable_filepath, output=None)
assert isinstance(validate_out, str)
# Check for known error string
assert "E02: Incorrect number of elements in array." in validate_out
# When output is not set, check that validate returns a bool
validate_out = validate(invalid_votable_filepath)
assert isinstance(validate_out, bool)
# Check that validation output is correct (votable is not valid)
assert validate_out is False |
Issue #12603. Test that we get the correct output from votable.validate with a valid
votable. | def test_validate_output_valid():
"""
Issue #12603. Test that we get the correct output from votable.validate with a valid
votable.
"""
# A valid votable. (Example from the votable standard:
# https://www.ivoa.net/documents/VOTable/20191021/REC-VOTable-1.4-20191021.html )
valid_votable_filepath = get_pkg_data_filename("data/valid_votable.xml")
# When output is None, check that validate returns validation output as a string
validate_out = validate(valid_votable_filepath, output=None)
assert isinstance(validate_out, str)
# Check for known good output string
assert "astropy.io.votable found no violations" in validate_out
# When output is not set, check that validate returns a bool
validate_out = validate(valid_votable_filepath)
assert isinstance(validate_out, bool)
# Check that validation output is correct (votable is valid)
assert validate_out is True |
A version 1.4 VOTable must use the same namespace as 1.3.
(see https://www.ivoa.net/documents/VOTable/20191021/REC-VOTable-1.4-20191021.html#ToC16). | def test_namespace_warning():
"""
A version 1.4 VOTable must use the same namespace as 1.3.
(see https://www.ivoa.net/documents/VOTable/20191021/REC-VOTable-1.4-20191021.html#ToC16).
"""
bad_namespace = b"""<?xml version="1.0" encoding="utf-8"?>
<VOTABLE version="1.4" xmlns="http://www.ivoa.net/xml/VOTable/v1.4"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<RESOURCE/>
</VOTABLE>
"""
with pytest.warns(W41):
parse(io.BytesIO(bad_namespace), verify="exception")
good_namespace_14 = b"""<?xml version="1.0" encoding="utf-8"?>
<VOTABLE version="1.4" xmlns="http://www.ivoa.net/xml/VOTable/v1.3"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<RESOURCE/>
</VOTABLE>
"""
parse(io.BytesIO(good_namespace_14), verify="exception")
good_namespace_13 = b"""<?xml version="1.0" encoding="utf-8"?>
<VOTABLE version="1.3" xmlns="http://www.ivoa.net/xml/VOTable/v1.3"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<RESOURCE/>
</VOTABLE>
"""
parse(io.BytesIO(good_namespace_13), verify="exception") |
VOTableFile.__init__ allows versions of '1.1', '1.2', '1.3' and '1.4'.
VOTableFile.__init__ does not allow version of '1.0' anymore and now raises a ValueError as it does to other versions not supported. | def test_version():
"""
VOTableFile.__init__ allows versions of '1.1', '1.2', '1.3' and '1.4'.
VOTableFile.__init__ does not allow version of '1.0' anymore and now raises a ValueError as it does to other versions not supported.
"""
# Exercise the checks in __init__
for version in ("1.1", "1.2", "1.3", "1.4"):
VOTableFile(version=version)
for version in ("0.9", "1.0", "2.0"):
with pytest.raises(
ValueError, match=r"should be in \('1.1', '1.2', '1.3', '1.4'\)."
):
VOTableFile(version=version)
# Exercise the checks in the setter
vot = VOTableFile()
for version in ("1.1", "1.2", "1.3", "1.4"):
vot.version = version
for version in ("1.0", "2.0"):
with pytest.raises(
ValueError, match=r"supports VOTable versions '1.1', '1.2', '1.3', '1.4'$"
):
vot.version = version
# Exercise the checks in the parser.
begin = b'<?xml version="1.0" encoding="utf-8"?><VOTABLE version="'
middle = b'" xmlns="http://www.ivoa.net/xml/VOTable/v'
end = (
b'" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><RESOURCE/></VOTABLE>'
)
# Valid versions
for bversion in (b"1.1", b"1.2", b"1.3"):
parse(
io.BytesIO(begin + bversion + middle + bversion + end), verify="exception"
)
parse(io.BytesIO(begin + b"1.4" + middle + b"1.3" + end), verify="exception")
if PYTEST_LT_8_0:
ctx = nullcontext()
else:
ctx = pytest.warns(W41)
# Invalid versions
for bversion in (b"1.0", b"2.0"):
with pytest.warns(W21), ctx:
parse(
io.BytesIO(begin + bversion + middle + bversion + end),
verify="exception",
) |
Utility squashing XML fragment to easier their comparison
This function is only used in the test module. It was more convenient for comparing the xml. | def _squash_xml(data):
"""
Utility squashing XML fragment to easier their comparison
This function is only used in the test module. It was more convenient for comparing the xml.
"""
return data.replace(" ", "").replace("\n", "").replace('"', "").replace("'", "") |
Construct a MIVOT block with wrong tag to test the expected exception | def test_mivot_constructor():
"""
Construct a MIVOT block with wrong tag to test the expected exception
"""
with pytest.raises(ValueError, match="not well-formed"):
MivotBlock(
"""
<VODML xmlns="http://www.ivoa.net/xml/mivot" >
<REPORT status="OK">Unit test mivot block1</REPORT>
<WRONG TAG>
</GLOBALS>
</VODML>
"""
) |
Test the MIVOT block extraction from a file against a reference block stored in data | def test_mivot_readout():
"""
Test the MIVOT block extraction from a file against a reference block stored in data
"""
votable = parse(get_pkg_data_filename("data/mivot_annotated_table.xml"))
ref_data = ""
for resource in votable.resources:
with open(
get_pkg_data_filename("data/mivot_block_custom_datatype.xml")
) as reference:
ref_data = reference.read()
assert _squash_xml(ref_data) == _squash_xml(resource.mivot_block.content)
assert len(resource.tables) == 1 |
Build a VOTable, put a MIVOT block in the first resource, checks it can be retrieved
as well as the following table | def test_mivot_write():
"""
Build a VOTable, put a MIVOT block in the first resource, checks it can be retrieved
as well as the following table
"""
mivot_block = MivotBlock(
"""
<VODML xmlns="http://www.ivoa.net/xml/mivot" >
<REPORT status="OK">
Unit test mivot block1
</REPORT>
<GLOBALS>
</GLOBALS>
</VODML>
"""
)
vtf = VOTableFile()
mivot_resource = Resource()
mivot_resource.type = "meta"
mivot_resource.mivot_block = mivot_block
# pack the meta resource in a top level resource
r1 = Resource()
r1.type = "results"
r1.resources.append(mivot_resource)
vtf.resources.append(r1)
# Push the VOTable in an IOSTream (emulates a disk saving)
buff = io.BytesIO()
vtf.to_xml(buff)
# Read the IOStream (emulates a disk readout)
buff.seek(0)
vtf2 = parse(buff)
assert len(vtf2.resources) == 1
for resource in vtf2.resources:
assert _squash_xml(mivot_block.content) == _squash_xml(
resource.mivot_block.content
)
assert len(resource.tables) == 0 |
Build a VOTable, put a MIVOT block and a table in the first resource, checks it can be retrieved
as well as the following table | def test_mivot_write_after_table():
"""
Build a VOTable, put a MIVOT block and a table in the first resource, checks it can be retrieved
as well as the following table
"""
mivot_block = MivotBlock(
"""
<VODML xmlns="http://www.ivoa.net/xml/mivot" >
<REPORT status="OK">Unit test mivot block1</REPORT>
<GLOBALS>
</GLOBALS>
</VODML>
"""
)
vtf = VOTableFile()
mivot_resource = Resource()
mivot_resource.type = "meta"
mivot_resource.mivot_block = mivot_block
# pack the meta resource in a top level resource
r1 = Resource()
r1.type = "results"
i1 = tree.Info(name="test_name", value="test_value")
r1.infos.append(i1)
r1.resources.append(mivot_resource)
t1 = tree.TableElement(vtf)
t1.name = "t1"
r1.tables.append(t1)
vtf.resources.append(r1)
# Push the VOTable in an IOSTream (emulates a disk saving)
buff = io.BytesIO()
vtf.to_xml(buff)
# Read the IOStream (emulates a disk readout)
buff.seek(0)
vtf2 = parse(buff)
assert len(vtf2.resources) == 1
for resource in vtf2.resources:
assert _squash_xml(mivot_block.content) == _squash_xml(
resource.mivot_block.content
)
assert len(resource.tables) == 1 |
Build a VOTable, put an empty MIVOT block in the first resource, checks it can be retrieved
as well as the following table | def test_write_no_mivot():
"""
Build a VOTable, put an empty MIVOT block in the first resource, checks it can be retrieved
as well as the following table
"""
vtf = VOTableFile()
mivot_resource = Resource()
mivot_resource.type = "meta"
# pack the meta resource in a top level resource
r1 = Resource()
r1.type = "results"
r1.resources.append(mivot_resource)
t1 = tree.TableElement(vtf)
t1.name = "t1"
r1.tables.append(t1)
vtf.resources.append(r1)
# Push the VOTable in an IOSTream (emulates a disk saving)
buff = io.BytesIO()
vtf.to_xml(buff)
# Read the IOStream (emulates a disk readout)
buff.seek(0)
vtf2 = parse(buff)
assert len(vtf2.resources) == 1
for resource in vtf2.resources:
assert (
_squash_xml(resource.mivot_block.content)
== "<VODMLxmlns=http://www.ivoa.net/xml/mivot><REPORTstatus=KO>NoMivotblock</REPORT></VODML>"
)
assert len(resource.tables) == 1 |
Build a VOTable, put a MIVOT block in the first resource after another meta resource,
checks it can be retrieved as well as the following table | def test_mivot_write_after_resource():
"""
Build a VOTable, put a MIVOT block in the first resource after another meta resource,
checks it can be retrieved as well as the following table
"""
mivot_block = MivotBlock(
"""
<VODML xmlns="http://www.ivoa.net/xml/mivot" >
<REPORT status="OK">Unit test mivot block1</REPORT>
<GLOBALS>
</GLOBALS>
</VODML>
"""
)
vtf = VOTableFile()
mivot_resource = Resource()
mivot_resource.type = "meta"
mivot_resource.mivot_block = mivot_block
# pack the meta resource in a top level resource
r1 = Resource()
r1.type = "results"
i1 = tree.Info(name="test_name", value="test_value")
r1.infos.append(i1)
meta_resource = Resource()
meta_resource.type = "meta"
r1.resources.append(meta_resource)
r1.resources.append(mivot_resource)
t1 = tree.TableElement(vtf)
t1.name = "t1"
r1.tables.append(t1)
vtf.resources.append(r1)
# Push the VOTable in an IOSTream (emulates a disk saving)
buff = io.BytesIO()
vtf.to_xml(buff)
# Read the IOStream (emulates a disk readout)
buff.seek(0)
vtf2 = parse(buff)
assert len(vtf2.resources) == 1
for resource in vtf2.resources:
assert _squash_xml(mivot_block.content) == _squash_xml(
resource.mivot_block.content
)
assert len(resource.tables) == 1 |
Build a meta resource containing a MIVOT block,
build the dummy MIVOT block first. | def test_mivot_forbidden_write():
"""
Build a meta resource containing a MIVOT block,
build the dummy MIVOT block first.
"""
mivot_block = MivotBlock(
"""
<VODML xmlns="http://www.ivoa.net/xml/mivot" >
<REPORT status="KO">Unit test mivot block1</REPORT>
<GLOBALS/>
</VODML>
"""
)
# package the MIVOT block in the resource
mivot_resource = Resource()
mivot_resource.type = "results"
with pytest.raises(E26):
# A MIVOT block must be with "type=meta"
mivot_resource.mivot_block = mivot_block |
Build a VOTable with 2 resources containing MivotBlock, parse it, and write it in a file.
Then compare it with another file to see if the order of the elements in a resource is respected,
in particular the MivotBlock which should be before the tables. | def test_mivot_order(tmp_path):
"""
Build a VOTable with 2 resources containing MivotBlock, parse it, and write it in a file.
Then compare it with another file to see if the order of the elements in a resource is respected,
in particular the MivotBlock which should be before the tables.
"""
mivot_block = MivotBlock(
"""
<VODML xmlns="http://www.ivoa.net/xml/mivot" >
</VODML>
"""
)
vtf = VOTableFile()
mivot_resource = Resource()
mivot_resource.type = "meta"
mivot_resource.mivot_block = mivot_block
mivot_resource2 = Resource()
mivot_resource2.type = "meta"
mivot_resource2.mivot_block = mivot_block
# R1 : 2 mivot_block, 2 tables, 1 description, 1 info, 1 CooSys
r1 = Resource()
r1.type = "results"
t1 = tree.TableElement(vtf)
t1.name = "t1"
t2 = tree.TableElement(vtf)
t2.name = "t2"
r1.tables.append(t1)
r1.tables.append(t2)
r1.resources.append(mivot_resource)
r1.resources.append(mivot_resource2)
cs = tree.CooSys(ID="_XYZ", system="ICRS")
r1.coordinate_systems.append(cs)
i1 = tree.Info(name="test_name", value="test_value")
r1.infos.append(i1)
vtf.resources.append(r1)
# R2 : 1 resource "results", 1 mivot_block and 1 table
r2 = Resource()
r2.type = "results"
r3 = Resource()
r3.type = "results"
t3 = tree.TableElement(vtf)
t3.name = "t3"
r2.tables.append(t3)
r2.resources.append(mivot_resource)
r2.resources.append(r3)
vtf.resources.append(r2)
# Push the VOTable in an IOSTream (emulates a disk saving)
buff = io.BytesIO()
vtf.to_xml(buff)
# Read the IOStream (emulates a disk readout)
buff.seek(0)
vtf2 = parse(buff)
vpath = get_pkg_data_filename("data/test.order.xml")
vpath_out = str(tmp_path / "test.order.out.xml")
vtf2.to_xml(vpath_out)
# We want to remove the xml header from the VOTable
with open(vpath_out) as file:
lines = file.readlines()
# The xml header is on 2 lines (line 2 and 3)
del lines[1]
del lines[1]
with open(vpath_out, "w") as file:
file.writelines(lines)
assert filecmp.cmp(vpath, vpath_out) |
test_path_object is needed for test below ``test_validate_path_object``
so that file could be passed as pathlib.Path object. | def test_validate(test_path_object=False):
"""
test_path_object is needed for test below ``test_validate_path_object``
so that file could be passed as pathlib.Path object.
"""
output = io.StringIO()
fpath = get_pkg_data_filename("data/regression.xml")
if test_path_object:
fpath = pathlib.Path(fpath)
# We can't test xmllint, because we can't rely on it being on the
# user's machine.
result = validate(fpath, output, xmllint=False)
assert result is False
output.seek(0)
output = output.readlines()
# Uncomment to generate new groundtruth
# with open('validation.txt', 'wt', encoding='utf-8') as fd:
# fd.write(u''.join(output))
with open(get_pkg_data_filename("data/validation.txt"), encoding="utf-8") as fd:
truth = fd.readlines()
truth = truth[1:]
output = output[1:-1]
sys.stdout.writelines(
difflib.unified_diff(truth, output, fromfile="truth", tofile="output")
)
assert truth == output |
Validating when source is passed as path object (#4412). | def test_validate_path_object():
"""Validating when source is passed as path object (#4412)."""
test_validate(test_path_object=True) |
Validates a large collection of web-accessible VOTable files.
Generates a report as a directory tree of HTML files.
Parameters
----------
urls : list of str, optional
If provided, is a list of HTTP urls to download VOTable files
from. If not provided, a built-in set of ~22,000 urls
compiled by HEASARC will be used.
destdir : path-like, optional
The directory to write the report to. By default, this is a
directory called ``'results'`` in the current directory. If the
directory does not exist, it will be created.
multiprocess : bool, optional
If `True` (default), perform validations in parallel using all
of the cores on this machine.
stilts : path-like, optional
To perform validation with ``votlint`` from the Java-based |STILTS|
VOTable parser, in addition to `astropy.io.votable`, set this to the
path of the ``'stilts.jar'`` file. ``java`` on the system shell
path will be used to run it.
Notes
-----
Downloads of each given URL will be performed only once and cached
locally in *destdir*. To refresh the cache, remove *destdir*
first. | def make_validation_report(
urls=None,
destdir="astropy.io.votable.validator.results",
multiprocess=True,
stilts=None,
):
"""
Validates a large collection of web-accessible VOTable files.
Generates a report as a directory tree of HTML files.
Parameters
----------
urls : list of str, optional
If provided, is a list of HTTP urls to download VOTable files
from. If not provided, a built-in set of ~22,000 urls
compiled by HEASARC will be used.
destdir : path-like, optional
The directory to write the report to. By default, this is a
directory called ``'results'`` in the current directory. If the
directory does not exist, it will be created.
multiprocess : bool, optional
If `True` (default), perform validations in parallel using all
of the cores on this machine.
stilts : path-like, optional
To perform validation with ``votlint`` from the Java-based |STILTS|
VOTable parser, in addition to `astropy.io.votable`, set this to the
path of the ``'stilts.jar'`` file. ``java`` on the system shell
path will be used to run it.
Notes
-----
Downloads of each given URL will be performed only once and cached
locally in *destdir*. To refresh the cache, remove *destdir*
first.
"""
from astropy.utils.console import ProgressBar, Spinner, color_print
if stilts is not None:
if not os.path.exists(stilts):
raise ValueError(f"{stilts} does not exist.")
destdir = os.path.expanduser(destdir)
destdir = os.path.abspath(destdir)
if urls is None:
with Spinner("Loading URLs", "green") as s:
urls = get_urls(destdir, s)
else:
urls = [url.encode() for url in urls if isinstance(url, str)]
color_print("Marking URLs", "green")
for url in ProgressBar(urls):
with result.Result(url, root=destdir) as r:
r["expected"] = type
args = [(url, destdir) for url in urls]
color_print("Downloading VO files", "green")
ProgressBar.map(download, args, multiprocess=multiprocess)
color_print("Validating VO files", "green")
ProgressBar.map(validate_vo, args, multiprocess=multiprocess)
if stilts is not None:
color_print("Validating with votlint", "green")
votlint_args = [(stilts, x, destdir) for x in urls]
ProgressBar.map(votlint_validate, votlint_args, multiprocess=multiprocess)
color_print("Generating HTML files", "green")
ProgressBar.map(write_html_result, args, multiprocess=multiprocess)
with Spinner("Grouping results", "green") as s:
subsets = result.get_result_subsets(urls, destdir, s)
color_print("Generating index", "green")
html.write_index(subsets, urls, destdir)
color_print("Generating subindices", "green")
subindex_args = [(subset, destdir, len(urls)) for subset in subsets]
ProgressBar.map(write_subindex, subindex_args, multiprocess=multiprocess) |
Get the input index corresponding to the given key.
Can pass in either:
the string name of the input or
the input index itself. | def get_index(model, key) -> int:
"""
Get the input index corresponding to the given key.
Can pass in either:
the string name of the input or
the input index itself.
"""
if isinstance(key, str):
if key in model.inputs:
index = model.inputs.index(key)
else:
raise ValueError(f"'{key}' is not one of the inputs: {model.inputs}.")
elif np.issubdtype(type(key), np.integer):
if 0 <= key < len(model.inputs):
index = key
else:
raise IndexError(
f"Integer key: {key} must be non-negative and < {len(model.inputs)}."
)
else:
raise ValueError(f"Key value: {key} must be string or integer.")
return index |
Get the input name corresponding to the input index. | def get_name(model, index: int):
"""Get the input name corresponding to the input index."""
return model.inputs[index] |
Returns a function that evaluates a given Python arithmetic operator
between two models. The operator should be given as a string, like ``'+'``
or ``'**'``. | def _model_oper(oper, **kwargs):
"""
Returns a function that evaluates a given Python arithmetic operator
between two models. The operator should be given as a string, like ``'+'``
or ``'**'``.
"""
return lambda left, right: CompoundModel(oper, left, right, **kwargs) |
Find the route down a CompoundModel's tree to the model with the
specified name (whether it's a leaf or not). | def _get_submodel_path(model, name):
"""Find the route down a CompoundModel's tree to the model with the
specified name (whether it's a leaf or not).
"""
if getattr(model, "name", None) == name:
return []
try:
return ["left"] + _get_submodel_path(model.left, name)
except (AttributeError, TypeError):
pass
try:
return ["right"] + _get_submodel_path(model.right, name)
except (AttributeError, TypeError):
pass |
Perform binary operation. Operands may be matching tuples of operands. | def binary_operation(binoperator, left, right):
"""
Perform binary operation. Operands may be matching tuples of operands.
"""
if isinstance(left, tuple) and isinstance(right, tuple):
return tuple(binoperator(item[0], item[1]) for item in zip(left, right))
return binoperator(left, right) |
Recursive function to collect operators used. | def get_ops(tree, opset):
"""
Recursive function to collect operators used.
"""
if isinstance(tree, CompoundModel):
opset.add(tree.op)
get_ops(tree.left, opset)
get_ops(tree.right, opset)
else:
return |
Traverse a tree noting each node by a key.
The key indicates all the left/right choices necessary to reach that node.
Each key will reference a tuple that contains:
- reference to the compound model for that node.
- left most index contained within that subtree
(relative to all indices for the whole tree)
- right most index contained within that subtree | def make_subtree_dict(tree, nodepath, tdict, leaflist):
"""Traverse a tree noting each node by a key.
The key indicates all the left/right choices necessary to reach that node.
Each key will reference a tuple that contains:
- reference to the compound model for that node.
- left most index contained within that subtree
(relative to all indices for the whole tree)
- right most index contained within that subtree
"""
# if this is a leaf, just append it to the leaflist
if not hasattr(tree, "isleaf"):
leaflist.append(tree)
else:
leftmostind = len(leaflist)
make_subtree_dict(tree.left, nodepath + "l", tdict, leaflist)
make_subtree_dict(tree.right, nodepath + "r", tdict, leaflist)
rightmostind = len(leaflist) - 1
tdict[nodepath] = (tree, leftmostind, rightmostind) |
This function creates a compound model with one or more of the input
values of the input model assigned fixed values (scalar or array).
Parameters
----------
modelinstance : `~astropy.modeling.Model` instance
This is the model that one or more of the
model input values will be fixed to some constant value.
values : dict
A dictionary where the key identifies which input to fix
and its value is the value to fix it at. The key may either be the
name of the input or a number reflecting its order in the inputs.
Examples
--------
>>> from astropy.modeling.models import Gaussian2D
>>> g = Gaussian2D(1, 2, 3, 4, 5)
>>> gv = fix_inputs(g, {0: 2.5})
Results in a 1D function equivalent to Gaussian2D(1, 2, 3, 4, 5)(x=2.5, y) | def fix_inputs(modelinstance, values, bounding_boxes=None, selector_args=None):
"""
This function creates a compound model with one or more of the input
values of the input model assigned fixed values (scalar or array).
Parameters
----------
modelinstance : `~astropy.modeling.Model` instance
This is the model that one or more of the
model input values will be fixed to some constant value.
values : dict
A dictionary where the key identifies which input to fix
and its value is the value to fix it at. The key may either be the
name of the input or a number reflecting its order in the inputs.
Examples
--------
>>> from astropy.modeling.models import Gaussian2D
>>> g = Gaussian2D(1, 2, 3, 4, 5)
>>> gv = fix_inputs(g, {0: 2.5})
Results in a 1D function equivalent to Gaussian2D(1, 2, 3, 4, 5)(x=2.5, y)
"""
model = CompoundModel("fix_inputs", modelinstance, values)
if bounding_boxes is not None:
if selector_args is None:
selector_args = tuple((key, True) for key in values.keys())
bbox = CompoundBoundingBox.validate(
modelinstance, bounding_boxes, selector_args
)
_selector = bbox.selector_args.get_fixed_values(modelinstance, values)
new_bbox = bbox[_selector]
new_bbox = new_bbox.__class__.validate(model, new_bbox)
model.bounding_box = new_bbox
return model |
Set a validated bounding box to a model instance.
Parameters
----------
modelinstance : `~astropy.modeling.Model` instance
This is the model that the validated bounding box will be set on.
bounding_box : tuple
A bounding box tuple, see :ref:`astropy:bounding-boxes` for details
ignored : list
List of the inputs to be ignored by the bounding box.
order : str, optional
The ordering of the bounding box tuple, can be either ``'C'`` or
``'F'``. | def bind_bounding_box(modelinstance, bounding_box, ignored=None, order="C"):
"""
Set a validated bounding box to a model instance.
Parameters
----------
modelinstance : `~astropy.modeling.Model` instance
This is the model that the validated bounding box will be set on.
bounding_box : tuple
A bounding box tuple, see :ref:`astropy:bounding-boxes` for details
ignored : list
List of the inputs to be ignored by the bounding box.
order : str, optional
The ordering of the bounding box tuple, can be either ``'C'`` or
``'F'``.
"""
modelinstance.bounding_box = ModelBoundingBox.validate(
modelinstance, bounding_box, ignored=ignored, order=order
) |
Add a validated compound bounding box to a model instance.
Parameters
----------
modelinstance : `~astropy.modeling.Model` instance
This is the model that the validated compound bounding box will be set on.
bounding_boxes : dict
A dictionary of bounding box tuples, see :ref:`astropy:bounding-boxes`
for details.
selector_args : list
List of selector argument tuples to define selection for compound
bounding box, see :ref:`astropy:bounding-boxes` for details.
create_selector : callable, optional
An optional callable with interface (selector_value, model) which
can generate a bounding box based on a selector value and model if
there is no bounding box in the compound bounding box listed under
that selector value. Default is ``None``, meaning new bounding
box entries will not be automatically generated.
ignored : list
List of the inputs to be ignored by the bounding box.
order : str, optional
The ordering of the bounding box tuple, can be either ``'C'`` or
``'F'``. | def bind_compound_bounding_box(
modelinstance,
bounding_boxes,
selector_args,
create_selector=None,
ignored=None,
order="C",
):
"""
Add a validated compound bounding box to a model instance.
Parameters
----------
modelinstance : `~astropy.modeling.Model` instance
This is the model that the validated compound bounding box will be set on.
bounding_boxes : dict
A dictionary of bounding box tuples, see :ref:`astropy:bounding-boxes`
for details.
selector_args : list
List of selector argument tuples to define selection for compound
bounding box, see :ref:`astropy:bounding-boxes` for details.
create_selector : callable, optional
An optional callable with interface (selector_value, model) which
can generate a bounding box based on a selector value and model if
there is no bounding box in the compound bounding box listed under
that selector value. Default is ``None``, meaning new bounding
box entries will not be automatically generated.
ignored : list
List of the inputs to be ignored by the bounding box.
order : str, optional
The ordering of the bounding box tuple, can be either ``'C'`` or
``'F'``.
"""
modelinstance.bounding_box = CompoundBoundingBox.validate(
modelinstance,
bounding_boxes,
selector_args,
create_selector=create_selector,
ignored=ignored,
order=order,
) |
Create a model from a user defined function. The inputs and parameters of
the model will be inferred from the arguments of the function.
This can be used either as a function or as a decorator. See below for
examples of both usages.
The model is separable only if there is a single input.
.. note::
All model parameters have to be defined as keyword arguments with
default values in the model function. Use `None` as a default argument
value if you do not want to have a default value for that parameter.
The standard settable model properties can be configured by default
using keyword arguments matching the name of the property; however,
these values are not set as model "parameters". Moreover, users
cannot use keyword arguments matching non-settable model properties,
with the exception of ``n_outputs`` which should be set to the number of
outputs of your function.
Parameters
----------
func : function
Function which defines the model. It should take N positional
arguments where ``N`` is dimensions of the model (the number of
independent variable in the model), and any number of keyword arguments
(the parameters). It must return the value of the model (typically as
an array, but can also be a scalar for scalar inputs). This
corresponds to the `~astropy.modeling.Model.evaluate` method.
fit_deriv : function, optional
Function which defines the Jacobian derivative of the model. I.e., the
derivative with respect to the *parameters* of the model. It should
have the same argument signature as ``func``, but should return a
sequence where each element of the sequence is the derivative
with respect to the corresponding argument. This corresponds to the
:meth:`~astropy.modeling.FittableModel.fit_deriv` method.
Examples
--------
Define a sinusoidal model function as a custom 1D model::
>>> from astropy.modeling.models import custom_model
>>> import numpy as np
>>> def sine_model(x, amplitude=1., frequency=1.):
... return amplitude * np.sin(2 * np.pi * frequency * x)
>>> def sine_deriv(x, amplitude=1., frequency=1.):
... return 2 * np.pi * amplitude * np.cos(2 * np.pi * frequency * x)
>>> SineModel = custom_model(sine_model, fit_deriv=sine_deriv)
Create an instance of the custom model and evaluate it::
>>> model = SineModel()
>>> model(0.25) # doctest: +FLOAT_CMP
1.0
This model instance can now be used like a usual astropy model.
The next example demonstrates a 2D Moffat function model, and also
demonstrates the support for docstrings (this example could also include
a derivative, but it has been omitted for simplicity)::
>>> @custom_model
... def Moffat2D(x, y, amplitude=1.0, x_0=0.0, y_0=0.0, gamma=1.0,
... alpha=1.0):
... """Two dimensional Moffat function."""
... rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma ** 2
... return amplitude * (1 + rr_gg) ** (-alpha)
...
>>> print(Moffat2D.__doc__)
Two dimensional Moffat function.
>>> model = Moffat2D()
>>> model(1, 1) # doctest: +FLOAT_CMP
0.3333333333333333 | def custom_model(*args, fit_deriv=None):
"""
Create a model from a user defined function. The inputs and parameters of
the model will be inferred from the arguments of the function.
This can be used either as a function or as a decorator. See below for
examples of both usages.
The model is separable only if there is a single input.
.. note::
All model parameters have to be defined as keyword arguments with
default values in the model function. Use `None` as a default argument
value if you do not want to have a default value for that parameter.
The standard settable model properties can be configured by default
using keyword arguments matching the name of the property; however,
these values are not set as model "parameters". Moreover, users
cannot use keyword arguments matching non-settable model properties,
with the exception of ``n_outputs`` which should be set to the number of
outputs of your function.
Parameters
----------
func : function
Function which defines the model. It should take N positional
arguments where ``N`` is dimensions of the model (the number of
independent variable in the model), and any number of keyword arguments
(the parameters). It must return the value of the model (typically as
an array, but can also be a scalar for scalar inputs). This
corresponds to the `~astropy.modeling.Model.evaluate` method.
fit_deriv : function, optional
Function which defines the Jacobian derivative of the model. I.e., the
derivative with respect to the *parameters* of the model. It should
have the same argument signature as ``func``, but should return a
sequence where each element of the sequence is the derivative
with respect to the corresponding argument. This corresponds to the
:meth:`~astropy.modeling.FittableModel.fit_deriv` method.
Examples
--------
Define a sinusoidal model function as a custom 1D model::
>>> from astropy.modeling.models import custom_model
>>> import numpy as np
>>> def sine_model(x, amplitude=1., frequency=1.):
... return amplitude * np.sin(2 * np.pi * frequency * x)
>>> def sine_deriv(x, amplitude=1., frequency=1.):
... return 2 * np.pi * amplitude * np.cos(2 * np.pi * frequency * x)
>>> SineModel = custom_model(sine_model, fit_deriv=sine_deriv)
Create an instance of the custom model and evaluate it::
>>> model = SineModel()
>>> model(0.25) # doctest: +FLOAT_CMP
1.0
This model instance can now be used like a usual astropy model.
The next example demonstrates a 2D Moffat function model, and also
demonstrates the support for docstrings (this example could also include
a derivative, but it has been omitted for simplicity)::
>>> @custom_model
... def Moffat2D(x, y, amplitude=1.0, x_0=0.0, y_0=0.0, gamma=1.0,
... alpha=1.0):
... \"\"\"Two dimensional Moffat function.\"\"\"
... rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma ** 2
... return amplitude * (1 + rr_gg) ** (-alpha)
...
>>> print(Moffat2D.__doc__)
Two dimensional Moffat function.
>>> model = Moffat2D()
>>> model(1, 1) # doctest: +FLOAT_CMP
0.3333333333333333
"""
if len(args) == 1 and callable(args[0]):
return _custom_model_wrapper(args[0], fit_deriv=fit_deriv)
elif not args:
return functools.partial(_custom_model_wrapper, fit_deriv=fit_deriv)
else:
raise TypeError(
f"{__name__} takes at most one positional argument (the callable/"
"function to be turned into a model. When used as a decorator "
"it should be passed keyword arguments only (if "
"any)."
) |
Processes the inputs to the `custom_model`'s function into the appropriate
categories.
Parameters
----------
func : callable
Returns
-------
inputs : list
list of evaluation inputs
special_params : dict
dictionary of model properties which require special treatment
settable_params : dict
dictionary of defaults for settable model properties
params : dict
dictionary of model parameters set by `custom_model`'s function | def _custom_model_inputs(func):
"""
Processes the inputs to the `custom_model`'s function into the appropriate
categories.
Parameters
----------
func : callable
Returns
-------
inputs : list
list of evaluation inputs
special_params : dict
dictionary of model properties which require special treatment
settable_params : dict
dictionary of defaults for settable model properties
params : dict
dictionary of model parameters set by `custom_model`'s function
"""
inputs, parameters = get_inputs_and_params(func)
special = ["n_outputs"]
settable = [
attr
for attr, value in vars(Model).items()
if isinstance(value, property) and value.fset is not None
]
properties = [
attr
for attr, value in vars(Model).items()
if isinstance(value, property) and value.fset is None and attr not in special
]
special_params = {}
settable_params = {}
params = {}
for param in parameters:
if param.name in special:
special_params[param.name] = param.default
elif param.name in settable:
settable_params[param.name] = param.default
elif param.name in properties:
raise ValueError(
f"Parameter '{param.name}' cannot be a model property: {properties}."
)
else:
params[param.name] = param.default
return inputs, special_params, settable_params, params |
Internal implementation `custom_model`.
When `custom_model` is called as a function its arguments are passed to
this function, and the result of this function is returned.
When `custom_model` is used as a decorator a partial evaluation of this
function is returned by `custom_model`. | def _custom_model_wrapper(func, fit_deriv=None):
"""
Internal implementation `custom_model`.
When `custom_model` is called as a function its arguments are passed to
this function, and the result of this function is returned.
When `custom_model` is used as a decorator a partial evaluation of this
function is returned by `custom_model`.
"""
if not callable(func):
raise ModelDefinitionError(
"func is not callable; it must be a function or other callable object"
)
if fit_deriv is not None and not callable(fit_deriv):
raise ModelDefinitionError(
"fit_deriv not callable; it must be a function or other callable object"
)
model_name = func.__name__
inputs, special_params, settable_params, params = _custom_model_inputs(func)
if fit_deriv is not None and len(fit_deriv.__defaults__) != len(params):
raise ModelDefinitionError(
"derivative function should accept same number of parameters as func."
)
params = {
param: Parameter(param, default=default) for param, default in params.items()
}
mod = find_current_module(2)
if mod:
modname = mod.__name__
else:
modname = "__main__"
members = {
"__module__": str(modname),
"__doc__": func.__doc__,
"n_inputs": len(inputs),
"n_outputs": special_params.pop("n_outputs", 1),
"evaluate": staticmethod(func),
"_settable_properties": settable_params,
}
if fit_deriv is not None:
members["fit_deriv"] = staticmethod(fit_deriv)
members.update(params)
cls = type(model_name, (FittableModel,), members)
cls._separable = len(inputs) == 1
return cls |
Evaluates a model on an input array. Evaluation is limited to
a bounding box if the `Model.bounding_box` attribute is set.
Parameters
----------
model : `Model`
Model to be evaluated.
arr : `numpy.ndarray`, optional
Array on which the model is evaluated.
coords : array-like, optional
Coordinate arrays mapping to ``arr``, such that
``arr[coords] == arr``.
Returns
-------
array : `numpy.ndarray`
The model evaluated on the input ``arr`` or a new array from
``coords``.
If ``arr`` and ``coords`` are both `None`, the returned array is
limited to the `Model.bounding_box` limits. If
`Model.bounding_box` is `None`, ``arr`` or ``coords`` must be passed.
Examples
--------
:ref:`astropy:bounding-boxes` | def render_model(model, arr=None, coords=None):
"""
Evaluates a model on an input array. Evaluation is limited to
a bounding box if the `Model.bounding_box` attribute is set.
Parameters
----------
model : `Model`
Model to be evaluated.
arr : `numpy.ndarray`, optional
Array on which the model is evaluated.
coords : array-like, optional
Coordinate arrays mapping to ``arr``, such that
``arr[coords] == arr``.
Returns
-------
array : `numpy.ndarray`
The model evaluated on the input ``arr`` or a new array from
``coords``.
If ``arr`` and ``coords`` are both `None`, the returned array is
limited to the `Model.bounding_box` limits. If
`Model.bounding_box` is `None`, ``arr`` or ``coords`` must be passed.
Examples
--------
:ref:`astropy:bounding-boxes`
"""
bbox = model.bounding_box
if (coords is None) & (arr is None) & (bbox is None):
raise ValueError("If no bounding_box is set, coords or arr must be input.")
# for consistent indexing
if model.n_inputs == 1:
if coords is not None:
coords = [coords]
if bbox is not None:
bbox = [bbox]
if arr is not None:
arr = arr.copy()
# Check dimensions match model
if arr.ndim != model.n_inputs:
raise ValueError(
"number of array dimensions inconsistent with number of model inputs."
)
if coords is not None:
# Check dimensions match arr and model
coords = np.array(coords)
if len(coords) != model.n_inputs:
raise ValueError(
"coordinate length inconsistent with the number of model inputs."
)
if arr is not None:
if coords[0].shape != arr.shape:
raise ValueError("coordinate shape inconsistent with the array shape.")
else:
arr = np.zeros(coords[0].shape)
if bbox is not None:
# assures position is at center pixel, important when using add_array
pd = pos, delta = (
np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2)) for bb in bbox])
.astype(int)
.T
)
if coords is not None:
sub_shape = tuple(delta * 2 + 1)
sub_coords = np.array([extract_array(c, sub_shape, pos) for c in coords])
else:
limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T]
sub_coords = np.mgrid[limits]
sub_coords = sub_coords[::-1]
if arr is None:
arr = model(*sub_coords)
else:
try:
arr = add_array(arr, model(*sub_coords), pos)
except ValueError:
raise ValueError(
"The `bounding_box` is larger than the input"
" arr in one or more dimensions. Set "
"`model.bounding_box = None`."
)
else:
if coords is None:
im_shape = arr.shape
limits = [slice(i) for i in im_shape]
coords = np.mgrid[limits]
arr += model(*coords[::-1])
return arr |
This is a convenience function intended to disable automatic generation
of the inverse in compound models by disabling one of the constituent
model's inverse. This is to handle cases where user provided inverse
functions are not compatible within an expression.
For example::
compound_model.inverse = hide_inverse(m1) + m2 + m3
This will insure that the defined inverse itself won't attempt to
build its own inverse, which would otherwise fail in this example
(e.g., m = m1 + m2 + m3 happens to raises an exception for this
reason.)
Note that this permanently disables it. To prevent that either copy
the model or restore the inverse later. | def hide_inverse(model):
"""
This is a convenience function intended to disable automatic generation
of the inverse in compound models by disabling one of the constituent
model's inverse. This is to handle cases where user provided inverse
functions are not compatible within an expression.
For example::
compound_model.inverse = hide_inverse(m1) + m2 + m3
This will insure that the defined inverse itself won't attempt to
build its own inverse, which would otherwise fail in this example
(e.g., m = m1 + m2 + m3 happens to raises an exception for this
reason.)
Note that this permanently disables it. To prevent that either copy
the model or restore the inverse later.
"""
del model.inverse
return model |
This is a decorator that can be used to add support for dealing with
quantities to any __call__ method on a fitter which may not support
quantities itself. This is done by temporarily removing units from all
parameters then adding them back once the fitting has completed. | def fitter_unit_support(func):
"""
This is a decorator that can be used to add support for dealing with
quantities to any __call__ method on a fitter which may not support
quantities itself. This is done by temporarily removing units from all
parameters then adding them back once the fitting has completed.
"""
@wraps(func)
def wrapper(self, model, x, y, z=None, **kwargs):
equivalencies = kwargs.pop("equivalencies", None)
data_has_units = (
isinstance(x, Quantity)
or isinstance(y, Quantity)
or isinstance(z, Quantity)
)
model_has_units = model._has_units
if data_has_units or model_has_units:
if model._supports_unit_fitting:
# We now combine any instance-level input equivalencies with user
# specified ones at call-time.
input_units_equivalencies = _combine_equivalency_dict(
model.inputs, equivalencies, model.input_units_equivalencies
)
# If input_units is defined, we transform the input data into those
# expected by the model. We hard-code the input names 'x', and 'y'
# here since FittableModel instances have input names ('x',) or
# ('x', 'y')
if model.input_units is not None:
if isinstance(x, Quantity):
x = x.to(
model.input_units[model.inputs[0]],
equivalencies=input_units_equivalencies[model.inputs[0]],
)
if isinstance(y, Quantity) and z is not None:
y = y.to(
model.input_units[model.inputs[1]],
equivalencies=input_units_equivalencies[model.inputs[1]],
)
# Create a dictionary mapping the real model inputs and outputs
# names to the data. This remapping of names must be done here, after
# the input data is converted to the correct units.
rename_data = {model.inputs[0]: x}
if z is not None:
rename_data[model.outputs[0]] = z
rename_data[model.inputs[1]] = y
else:
rename_data[model.outputs[0]] = y
rename_data["z"] = None
# We now strip away the units from the parameters, taking care to
# first convert any parameters to the units that correspond to the
# input units (to make sure that initial guesses on the parameters)
# are in the right unit system
model = model.without_units_for_data(**rename_data)
if isinstance(model, tuple):
rename_data["_left_kwargs"] = model[1]
rename_data["_right_kwargs"] = model[2]
model = model[0]
# We strip away the units from the input itself
add_back_units = False
if isinstance(x, Quantity):
add_back_units = True
xdata = x.value
else:
xdata = np.asarray(x)
if isinstance(y, Quantity):
add_back_units = True
ydata = y.value
else:
ydata = np.asarray(y)
if z is not None:
if isinstance(z, Quantity):
add_back_units = True
zdata = z.value
else:
zdata = np.asarray(z)
# We run the fitting
if z is None:
model_new = func(self, model, xdata, ydata, **kwargs)
else:
model_new = func(self, model, xdata, ydata, zdata, **kwargs)
# And finally we add back units to the parameters
if add_back_units:
model_new = model_new.with_units_from_data(**rename_data)
return model_new
else:
raise NotImplementedError(
"This model does not support being fit to data with units."
)
else:
return func(self, model, x, y, z=z, **kwargs)
return wrapper |
Convert inputs to float arrays. | def _convert_input(x, y, z=None, n_models=1, model_set_axis=0):
"""Convert inputs to float arrays."""
x = np.asanyarray(x, dtype=float)
y = np.asanyarray(y, dtype=float)
if z is not None:
z = np.asanyarray(z, dtype=float)
data_ndim, data_shape = z.ndim, z.shape
else:
data_ndim, data_shape = y.ndim, y.shape
# For compatibility with how the linear fitter code currently expects to
# work, shift the dependent variable's axes to the expected locations
if n_models > 1 or data_ndim > x.ndim:
if (model_set_axis or 0) >= data_ndim:
raise ValueError("model_set_axis out of range")
if data_shape[model_set_axis] != n_models:
raise ValueError(
"Number of data sets (y or z array) is expected to equal "
"the number of parameter sets"
)
if z is None:
# For a 1-D model the y coordinate's model-set-axis is expected to
# be last, so that its first dimension is the same length as the x
# coordinates. This is in line with the expectations of
# numpy.linalg.lstsq:
# https://numpy.org/doc/stable/reference/generated/numpy.linalg.lstsq.html
# That is, each model should be represented by a column. TODO:
# Obviously this is a detail of np.linalg.lstsq and should be
# handled specifically by any fitters that use it...
y = np.rollaxis(y, model_set_axis, y.ndim)
data_shape = y.shape[:-1]
else:
# Shape of z excluding model_set_axis
data_shape = z.shape[:model_set_axis] + z.shape[model_set_axis + 1 :]
if z is None:
if data_shape != x.shape:
raise ValueError("x and y should have the same shape")
farg = (x, y)
else:
if not (x.shape == y.shape == data_shape):
raise ValueError("x, y and z should have the same shape")
farg = (x, y, z)
return farg |
Constructs the full list of model parameters from the fitted and
constrained parameters.
Parameters
----------
model :
The model being fit
fps :
The fit parameter values to be assigned
use_min_max_bounds: bool
If the set parameter bounds for model will be enforced on each
parameter with bounds.
Default: True | def fitter_to_model_params(model, fps, use_min_max_bounds=True):
"""
Constructs the full list of model parameters from the fitted and
constrained parameters.
Parameters
----------
model :
The model being fit
fps :
The fit parameter values to be assigned
use_min_max_bounds: bool
If the set parameter bounds for model will be enforced on each
parameter with bounds.
Default: True
"""
_, fit_param_indices, _ = model_to_fit_params(model)
has_tied = any(model.tied.values())
has_fixed = any(model.fixed.values())
has_bound = any(b != (None, None) for b in model.bounds.values())
parameters = model.parameters
if not (has_tied or has_fixed or has_bound):
# We can just assign directly
model.parameters = fps
return
fit_param_indices = set(fit_param_indices)
offset = 0
param_metrics = model._param_metrics
for idx, name in enumerate(model.param_names):
if idx not in fit_param_indices:
continue
slice_ = param_metrics[name]["slice"]
shape = param_metrics[name]["shape"]
# This is determining which range of fps (the fitted parameters) maps
# to parameters of the model
size = reduce(operator.mul, shape, 1)
values = fps[offset : offset + size]
# Check bounds constraints
if model.bounds[name] != (None, None) and use_min_max_bounds:
_min, _max = model.bounds[name]
if _min is not None:
values = np.fmax(values, _min)
if _max is not None:
values = np.fmin(values, _max)
parameters[slice_] = values
offset += size
# Update model parameters before calling ``tied`` constraints.
model._array_to_parameters()
# This has to be done in a separate loop due to how tied parameters are
# currently evaluated (the fitted parameters need to actually be *set* on
# the model first, for use in evaluating the "tied" expression--it might be
# better to change this at some point
if has_tied:
for idx, name in enumerate(model.param_names):
if model.tied[name]:
value = model.tied[name](model)
slice_ = param_metrics[name]["slice"]
# To handle multiple tied constraints, model parameters
# need to be updated after each iteration.
parameters[slice_] = value
model._array_to_parameters() |
Convert a model instance's parameter array to an array that can be used
with a fitter that doesn't natively support fixed or tied parameters.
In particular, it removes fixed/tied parameters from the parameter
array.
These may be a subset of the model parameters, if some of them are held
constant or tied. | def model_to_fit_params(model):
"""
Convert a model instance's parameter array to an array that can be used
with a fitter that doesn't natively support fixed or tied parameters.
In particular, it removes fixed/tied parameters from the parameter
array.
These may be a subset of the model parameters, if some of them are held
constant or tied.
"""
fitparam_indices = list(range(len(model.param_names)))
model_params = model.parameters
model_bounds = list(model.bounds.values())
if any(model.fixed.values()) or any(model.tied.values()):
params = list(model_params)
param_metrics = model._param_metrics
for idx, name in list(enumerate(model.param_names))[::-1]:
if model.fixed[name] or model.tied[name]:
slice_ = param_metrics[name]["slice"]
del params[slice_]
del model_bounds[slice_]
del fitparam_indices[idx]
model_params = np.array(params)
for idx, bound in enumerate(model_bounds):
if bound[0] is None:
lower = -np.inf
else:
lower = bound[0]
if bound[1] is None:
upper = np.inf
else:
upper = bound[1]
model_bounds[idx] = (lower, upper)
model_bounds = tuple(zip(*model_bounds))
return model_params, fitparam_indices, model_bounds |
Make sure model constraints are supported by the current fitter. | def _validate_constraints(supported_constraints, model):
"""Make sure model constraints are supported by the current fitter."""
message = "Optimizer cannot handle {0} constraints."
if any(model.fixed.values()) and "fixed" not in supported_constraints:
raise UnsupportedConstraintError(message.format("fixed parameter"))
if any(model.tied.values()) and "tied" not in supported_constraints:
raise UnsupportedConstraintError(message.format("tied parameter"))
if (
any(tuple(b) != (None, None) for b in model.bounds.values())
and "bounds" not in supported_constraints
):
raise UnsupportedConstraintError(message.format("bound parameter"))
if model.eqcons and "eqcons" not in supported_constraints:
raise UnsupportedConstraintError(message.format("equality"))
if model.ineqcons and "ineqcons" not in supported_constraints:
raise UnsupportedConstraintError(message.format("inequality")) |
Check that model and fitter are compatible and return a copy of the model. | def _validate_model(model, supported_constraints):
"""
Check that model and fitter are compatible and return a copy of the model.
"""
if not model.fittable:
raise ValueError("Model does not appear to be fittable.")
if model.linear:
warnings.warn(
"Model is linear in parameters; consider using linear fitting methods.",
AstropyUserWarning,
)
elif len(model) != 1:
# for now only single data sets ca be fitted
raise ValueError("Non-linear fitters can only fit one data set at a time.")
_validate_constraints(supported_constraints, model)
model_copy = model.copy()
return model_copy |
This injects entry points into the `astropy.modeling.fitting` namespace.
This provides a means of inserting a fitting routine without requirement
of it being merged into astropy's core.
Parameters
----------
entry_points : list of `~importlib.metadata.EntryPoint`
entry_points are objects which encapsulate importable objects and
are defined on the installation of a package.
Notes
-----
An explanation of entry points can be found `here
<http://setuptools.readthedocs.io/en/latest/setuptools.html#dynamic-discovery-of-services-and-plugins>`_ | def populate_entry_points(entry_points):
"""
This injects entry points into the `astropy.modeling.fitting` namespace.
This provides a means of inserting a fitting routine without requirement
of it being merged into astropy's core.
Parameters
----------
entry_points : list of `~importlib.metadata.EntryPoint`
entry_points are objects which encapsulate importable objects and
are defined on the installation of a package.
Notes
-----
An explanation of entry points can be found `here
<http://setuptools.readthedocs.io/en/latest/setuptools.html#dynamic-discovery-of-services-and-plugins>`_
"""
for entry_point in entry_points:
name = entry_point.name
try:
entry_point = entry_point.load()
except Exception as e:
# This stops the fitting from choking if an entry_point produces an error.
warnings.warn(
AstropyUserWarning(
f"{type(e).__name__} error occurred in entry point {name}."
)
)
else:
if not isinstance(entry_point, type):
warnings.warn(
AstropyUserWarning(
f"Modeling entry point {name} expected to be a Class."
)
)
else:
if issubclass(entry_point, Fitter):
name = entry_point.__name__
globals()[name] = entry_point
__all__.append(name) # noqa: PYI056
else:
warnings.warn(
AstropyUserWarning(
f"Modeling entry point {name} expected to extend "
"astropy.modeling.Fitter"
)
) |
Make a ufunc model class name from the name of the ufunc. | def _make_class_name(name):
"""Make a ufunc model class name from the name of the ufunc."""
return name[0].upper() + name[1:] + "Ufunc" |
Define a Model from a Numpy ufunc name. | def ufunc_model(name):
"""Define a Model from a Numpy ufunc name."""
ufunc = getattr(np, name)
nin = ufunc.nin
nout = ufunc.nout
if nin == 1:
separable = True
def evaluate(self, x):
return self.func(x)
else:
separable = False
def evaluate(self, x, y):
return self.func(x, y)
klass_name = _make_class_name(name)
members = {
"n_inputs": nin,
"n_outputs": nout,
"func": ufunc,
"linear": False,
"fittable": False,
"_separable": separable,
"_is_dynamic": True,
"evaluate": evaluate,
}
klass = type(str(klass_name), (_NPUfuncModel,), members)
klass.__module__ = "astropy.modeling.math_functions"
return klass |
Convert a parameter to float or float array. | def _tofloat(value):
"""Convert a parameter to float or float array."""
if isiterable(value):
try:
value = np.asanyarray(value, dtype=float)
except (TypeError, ValueError):
# catch arrays with strings or user errors like different
# types of parameters in a parameter set
raise InputParameterError(
f"Parameter of {type(value)} could not be converted to float"
)
elif isinstance(value, Quantity):
# Quantities are fine as is
pass
elif isinstance(value, np.ndarray):
# A scalar/dimensionless array
value = float(value.item())
elif isinstance(value, (numbers.Number, np.number)) and not isinstance(value, bool):
value = float(value)
elif isinstance(value, bool):
raise InputParameterError(
"Expected parameter to be of numerical type, not boolean"
)
else:
raise InputParameterError(
f"Don't know how to convert parameter of {type(value)} to float"
)
return value |
Like array_repr_oneline but works on `Parameter` objects and supports
rendering parameters with units like quantities. | def param_repr_oneline(param):
"""
Like array_repr_oneline but works on `Parameter` objects and supports
rendering parameters with units like quantities.
"""
out = array_repr_oneline(param.value)
if param.unit is not None:
out = f"{out} {param.unit!s}"
return out |
A separability test for the outputs of a transform.
Parameters
----------
transform : `~astropy.modeling.core.Model`
A (compound) model.
Returns
-------
is_separable : ndarray
A boolean array with size ``transform.n_outputs`` where
each element indicates whether the output is independent
and the result of a separable transform.
Examples
--------
>>> from astropy.modeling.models import Shift, Scale, Rotation2D, Polynomial2D
>>> is_separable(Shift(1) & Shift(2) | Scale(1) & Scale(2))
array([ True, True]...)
>>> is_separable(Shift(1) & Shift(2) | Rotation2D(2))
array([False, False]...)
>>> is_separable(Shift(1) & Shift(2) | Mapping([0, 1, 0, 1]) | Polynomial2D(1) & Polynomial2D(2))
array([False, False]...)
>>> is_separable(Shift(1) & Shift(2) | Mapping([0, 1, 0, 1]))
array([ True, True, True, True]...) | def is_separable(transform):
"""
A separability test for the outputs of a transform.
Parameters
----------
transform : `~astropy.modeling.core.Model`
A (compound) model.
Returns
-------
is_separable : ndarray
A boolean array with size ``transform.n_outputs`` where
each element indicates whether the output is independent
and the result of a separable transform.
Examples
--------
>>> from astropy.modeling.models import Shift, Scale, Rotation2D, Polynomial2D
>>> is_separable(Shift(1) & Shift(2) | Scale(1) & Scale(2))
array([ True, True]...)
>>> is_separable(Shift(1) & Shift(2) | Rotation2D(2))
array([False, False]...)
>>> is_separable(Shift(1) & Shift(2) | Mapping([0, 1, 0, 1]) | \
Polynomial2D(1) & Polynomial2D(2))
array([False, False]...)
>>> is_separable(Shift(1) & Shift(2) | Mapping([0, 1, 0, 1]))
array([ True, True, True, True]...)
"""
if transform.n_inputs == 1 and transform.n_outputs > 1:
is_separable = np.array([False] * transform.n_outputs).T
return is_separable
separable_matrix = _separable(transform)
is_separable = separable_matrix.sum(1)
is_separable = np.where(is_separable != 1, False, True)
return is_separable |
Compute the correlation between outputs and inputs.
Parameters
----------
transform : `~astropy.modeling.core.Model`
A (compound) model.
Returns
-------
separable_matrix : ndarray
A boolean correlation matrix of shape (n_outputs, n_inputs).
Indicates the dependence of outputs on inputs. For completely
independent outputs, the diagonal elements are True and
off-diagonal elements are False.
Examples
--------
>>> from astropy.modeling.models import Shift, Scale, Rotation2D, Polynomial2D
>>> separability_matrix(Shift(1) & Shift(2) | Scale(1) & Scale(2))
array([[ True, False], [False, True]]...)
>>> separability_matrix(Shift(1) & Shift(2) | Rotation2D(2))
array([[ True, True], [ True, True]]...)
>>> separability_matrix(Shift(1) & Shift(2) | Mapping([0, 1, 0, 1]) | Polynomial2D(1) & Polynomial2D(2))
array([[ True, True], [ True, True]]...)
>>> separability_matrix(Shift(1) & Shift(2) | Mapping([0, 1, 0, 1]))
array([[ True, False], [False, True], [ True, False], [False, True]]...) | def separability_matrix(transform):
"""
Compute the correlation between outputs and inputs.
Parameters
----------
transform : `~astropy.modeling.core.Model`
A (compound) model.
Returns
-------
separable_matrix : ndarray
A boolean correlation matrix of shape (n_outputs, n_inputs).
Indicates the dependence of outputs on inputs. For completely
independent outputs, the diagonal elements are True and
off-diagonal elements are False.
Examples
--------
>>> from astropy.modeling.models import Shift, Scale, Rotation2D, Polynomial2D
>>> separability_matrix(Shift(1) & Shift(2) | Scale(1) & Scale(2))
array([[ True, False], [False, True]]...)
>>> separability_matrix(Shift(1) & Shift(2) | Rotation2D(2))
array([[ True, True], [ True, True]]...)
>>> separability_matrix(Shift(1) & Shift(2) | Mapping([0, 1, 0, 1]) | \
Polynomial2D(1) & Polynomial2D(2))
array([[ True, True], [ True, True]]...)
>>> separability_matrix(Shift(1) & Shift(2) | Mapping([0, 1, 0, 1]))
array([[ True, False], [False, True], [ True, False], [False, True]]...)
"""
if transform.n_inputs == 1 and transform.n_outputs > 1:
return np.ones((transform.n_outputs, transform.n_inputs), dtype=np.bool_)
separable_matrix = _separable(transform)
separable_matrix = np.where(separable_matrix != 0, True, False)
return separable_matrix |
Compute the number of outputs of two models.
The two models are the left and right model to an operation in
the expression tree of a compound model.
Parameters
----------
left, right : `astropy.modeling.Model` or ndarray
If input is of an array, it is the output of `coord_matrix`. | def _compute_n_outputs(left, right):
"""
Compute the number of outputs of two models.
The two models are the left and right model to an operation in
the expression tree of a compound model.
Parameters
----------
left, right : `astropy.modeling.Model` or ndarray
If input is of an array, it is the output of `coord_matrix`.
"""
if isinstance(left, Model):
lnout = left.n_outputs
else:
lnout = left.shape[0]
if isinstance(right, Model):
rnout = right.n_outputs
else:
rnout = right.shape[0]
noutp = lnout + rnout
return noutp |
Function corresponding to one of the arithmetic operators
['+', '-'. '*', '/', '**'].
This always returns a nonseparable output.
Parameters
----------
left, right : `astropy.modeling.Model` or ndarray
If input is of an array, it is the output of `coord_matrix`.
Returns
-------
result : ndarray
Result from this operation. | def _arith_oper(left, right):
"""
Function corresponding to one of the arithmetic operators
['+', '-'. '*', '/', '**'].
This always returns a nonseparable output.
Parameters
----------
left, right : `astropy.modeling.Model` or ndarray
If input is of an array, it is the output of `coord_matrix`.
Returns
-------
result : ndarray
Result from this operation.
"""
def _n_inputs_outputs(input):
if isinstance(input, Model):
# Models have the same number of inputs and outputs.
n_outputs, n_inputs = input.n_outputs, input.n_inputs
else:
n_outputs, n_inputs = input.shape
return n_inputs, n_outputs
left_inputs, left_outputs = _n_inputs_outputs(left)
right_inputs, right_outputs = _n_inputs_outputs(right)
if left_inputs != right_inputs or left_outputs != right_outputs:
raise ModelDefinitionError(
"Unsupported operands for arithmetic operator: left"
f" (n_inputs={left_inputs}, n_outputs={left_outputs}) and right"
f" (n_inputs={right_inputs}, n_outputs={right_outputs}); models must have"
" the same n_inputs and the same n_outputs for this operator."
)
result = np.ones((left_outputs, left_inputs))
return result |
Create an array representing inputs and outputs of a simple model.
The array has a shape (noutp, model.n_inputs).
Parameters
----------
model : `astropy.modeling.Model`
model
pos : str
Position of this model in the expression tree.
One of ['left', 'right'].
noutp : int
Number of outputs of the compound model of which the input model
is a left or right child. | def _coord_matrix(model, pos, noutp):
"""
Create an array representing inputs and outputs of a simple model.
The array has a shape (noutp, model.n_inputs).
Parameters
----------
model : `astropy.modeling.Model`
model
pos : str
Position of this model in the expression tree.
One of ['left', 'right'].
noutp : int
Number of outputs of the compound model of which the input model
is a left or right child.
"""
if isinstance(model, Mapping):
axes = []
for i in model.mapping:
axis = np.zeros((model.n_inputs,))
axis[i] = 1
axes.append(axis)
m = np.vstack(axes)
mat = np.zeros((noutp, model.n_inputs))
if pos == "left":
mat[: model.n_outputs, : model.n_inputs] = m
else:
mat[-model.n_outputs :, -model.n_inputs :] = m
return mat
if not model.separable:
# this does not work for more than 2 coordinates
mat = np.zeros((noutp, model.n_inputs))
if pos == "left":
mat[: model.n_outputs, : model.n_inputs] = 1
else:
mat[-model.n_outputs :, -model.n_inputs :] = 1
else:
mat = np.zeros((noutp, model.n_inputs))
for i in range(model.n_inputs):
mat[i, i] = 1
if pos == "right":
mat = np.roll(mat, (noutp - model.n_outputs))
return mat |
Function corresponding to '&' operation.
Parameters
----------
left, right : `astropy.modeling.Model` or ndarray
If input is of an array, it is the output of `coord_matrix`.
Returns
-------
result : ndarray
Result from this operation. | def _cstack(left, right):
"""
Function corresponding to '&' operation.
Parameters
----------
left, right : `astropy.modeling.Model` or ndarray
If input is of an array, it is the output of `coord_matrix`.
Returns
-------
result : ndarray
Result from this operation.
"""
noutp = _compute_n_outputs(left, right)
if isinstance(left, Model):
cleft = _coord_matrix(left, "left", noutp)
else:
cleft = np.zeros((noutp, left.shape[1]))
cleft[: left.shape[0], : left.shape[1]] = left
if isinstance(right, Model):
cright = _coord_matrix(right, "right", noutp)
else:
cright = np.zeros((noutp, right.shape[1]))
cright[-right.shape[0] :, -right.shape[1] :] = right
return np.hstack([cleft, cright]) |
Function corresponding to "|" operation.
Parameters
----------
left, right : `astropy.modeling.Model` or ndarray
If input is of an array, it is the output of `coord_matrix`.
Returns
-------
result : ndarray
Result from this operation. | def _cdot(left, right):
"""
Function corresponding to "|" operation.
Parameters
----------
left, right : `astropy.modeling.Model` or ndarray
If input is of an array, it is the output of `coord_matrix`.
Returns
-------
result : ndarray
Result from this operation.
"""
left, right = right, left
def _n_inputs_outputs(input, position):
"""
Return ``n_inputs``, ``n_outputs`` for a model or coord_matrix.
"""
if isinstance(input, Model):
coords = _coord_matrix(input, position, input.n_outputs)
else:
coords = input
return coords
cleft = _n_inputs_outputs(left, "left")
cright = _n_inputs_outputs(right, "right")
try:
result = np.dot(cleft, cright)
except ValueError:
raise ModelDefinitionError(
'Models cannot be combined with the "|" operator; '
f"left coord_matrix is {cright}, right coord_matrix is {cleft}"
)
return result |
Calculate the separability of outputs.
Parameters
----------
transform : `astropy.modeling.Model`
A transform (usually a compound model).
Returns :
is_separable : ndarray of dtype np.bool
An array of shape (transform.n_outputs,) of boolean type
Each element represents the separablity of the corresponding output. | def _separable(transform):
"""
Calculate the separability of outputs.
Parameters
----------
transform : `astropy.modeling.Model`
A transform (usually a compound model).
Returns :
is_separable : ndarray of dtype np.bool
An array of shape (transform.n_outputs,) of boolean type
Each element represents the separablity of the corresponding output.
"""
if (
transform_matrix := transform._calculate_separability_matrix()
) is not NotImplemented:
return transform_matrix
elif isinstance(transform, CompoundModel):
sepleft = _separable(transform.left)
sepright = _separable(transform.right)
return _operators[transform.op](sepleft, sepright)
elif isinstance(transform, Model):
return _coord_matrix(transform, "left", transform.n_outputs) |
Least square statistic, with optional weights, in N-dimensions.
Parameters
----------
measured_vals : ndarray or sequence
Measured data values. Will be cast to array whose
shape must match the array-cast of the evaluated model.
updated_model : :class:`~astropy.modeling.Model` instance
Model with parameters set by the current iteration of the optimizer.
when evaluated on "x", must return array of shape "measured_vals"
weights : ndarray or None
Array of weights to apply to each residual.
*x : ndarray
Independent variables on which to evaluate the model.
Returns
-------
res : float
The sum of least squares.
See Also
--------
:func:`~astropy.modeling.statistic.leastsquare_1d`
:func:`~astropy.modeling.statistic.leastsquare_2d`
:func:`~astropy.modeling.statistic.leastsquare_3d`
Notes
-----
Models in :mod:`~astropy.modeling` have broadcasting rules that try to
match inputs with outputs with Model shapes. Numpy arrays have flexible
broadcasting rules, so mismatched shapes can often be made compatible. To
ensure data matches the model we must perform shape comparison and leverage
the Numpy arithmetic functions. This can obfuscate arithmetic computation
overrides, like with Quantities. Implement a custom statistic for more
direct control. | def leastsquare(measured_vals, updated_model, weights, *x):
"""Least square statistic, with optional weights, in N-dimensions.
Parameters
----------
measured_vals : ndarray or sequence
Measured data values. Will be cast to array whose
shape must match the array-cast of the evaluated model.
updated_model : :class:`~astropy.modeling.Model` instance
Model with parameters set by the current iteration of the optimizer.
when evaluated on "x", must return array of shape "measured_vals"
weights : ndarray or None
Array of weights to apply to each residual.
*x : ndarray
Independent variables on which to evaluate the model.
Returns
-------
res : float
The sum of least squares.
See Also
--------
:func:`~astropy.modeling.statistic.leastsquare_1d`
:func:`~astropy.modeling.statistic.leastsquare_2d`
:func:`~astropy.modeling.statistic.leastsquare_3d`
Notes
-----
Models in :mod:`~astropy.modeling` have broadcasting rules that try to
match inputs with outputs with Model shapes. Numpy arrays have flexible
broadcasting rules, so mismatched shapes can often be made compatible. To
ensure data matches the model we must perform shape comparison and leverage
the Numpy arithmetic functions. This can obfuscate arithmetic computation
overrides, like with Quantities. Implement a custom statistic for more
direct control.
"""
model_vals = updated_model(*x)
if np.shape(model_vals) != np.shape(measured_vals):
raise ValueError(
f"Shape mismatch between model ({np.shape(model_vals)}) "
f"and measured ({np.shape(measured_vals)})"
)
if weights is None:
weights = 1.0
return np.sum(np.square(weights * np.subtract(model_vals, measured_vals))) |
Least square statistic with optional weights.
Safer than the general :func:`~astropy.modeling.statistic.leastsquare`
for 1D models by avoiding numpy methods that support broadcasting.
Parameters
----------
measured_vals : ndarray
Measured data values.
updated_model : `~astropy.modeling.Model`
Model with parameters set by the current iteration of the optimizer.
weights : ndarray or None
Array of weights to apply to each residual.
x : ndarray
Independent variable "x" on which to evaluate the model.
Returns
-------
res : float
The sum of least squares.
See Also
--------
:func:`~astropy.modeling.statistic.leastsquare` | def leastsquare_1d(measured_vals, updated_model, weights, x):
"""
Least square statistic with optional weights.
Safer than the general :func:`~astropy.modeling.statistic.leastsquare`
for 1D models by avoiding numpy methods that support broadcasting.
Parameters
----------
measured_vals : ndarray
Measured data values.
updated_model : `~astropy.modeling.Model`
Model with parameters set by the current iteration of the optimizer.
weights : ndarray or None
Array of weights to apply to each residual.
x : ndarray
Independent variable "x" on which to evaluate the model.
Returns
-------
res : float
The sum of least squares.
See Also
--------
:func:`~astropy.modeling.statistic.leastsquare`
"""
model_vals = updated_model(x)
if weights is None:
return np.sum((model_vals - measured_vals) ** 2)
return np.sum((weights * (model_vals - measured_vals)) ** 2) |
Least square statistic with optional weights.
Safer than the general :func:`~astropy.modeling.statistic.leastsquare`
for 2D models by avoiding numpy methods that support broadcasting.
Parameters
----------
measured_vals : ndarray
Measured data values.
updated_model : `~astropy.modeling.Model`
Model with parameters set by the current iteration of the optimizer.
weights : ndarray or None
Array of weights to apply to each residual.
x : ndarray
Independent variable "x" on which to evaluate the model.
y : ndarray
Independent variable "y" on which to evaluate the model.
Returns
-------
res : float
The sum of least squares.
See Also
--------
:func:`~astropy.modeling.statistic.leastsquare` | def leastsquare_2d(measured_vals, updated_model, weights, x, y):
"""
Least square statistic with optional weights.
Safer than the general :func:`~astropy.modeling.statistic.leastsquare`
for 2D models by avoiding numpy methods that support broadcasting.
Parameters
----------
measured_vals : ndarray
Measured data values.
updated_model : `~astropy.modeling.Model`
Model with parameters set by the current iteration of the optimizer.
weights : ndarray or None
Array of weights to apply to each residual.
x : ndarray
Independent variable "x" on which to evaluate the model.
y : ndarray
Independent variable "y" on which to evaluate the model.
Returns
-------
res : float
The sum of least squares.
See Also
--------
:func:`~astropy.modeling.statistic.leastsquare`
"""
model_vals = updated_model(x, y)
if weights is None:
return np.sum((model_vals - measured_vals) ** 2)
return np.sum((weights * (model_vals - measured_vals)) ** 2) |
Least square statistic with optional weights.
Safer than the general :func:`~astropy.modeling.statistic.leastsquare`
for 3D models by avoiding numpy methods that support broadcasting.
Parameters
----------
measured_vals : ndarray
Measured data values.
updated_model : `~astropy.modeling.Model`
Model with parameters set by the current iteration of the optimizer.
weights : ndarray or None
Array of weights to apply to each residual.
x : ndarray
Independent variable "x" on which to evaluate the model.
y : ndarray
Independent variable "y" on which to evaluate the model.
z : ndarray
Independent variable "z" on which to evaluate the model.
Returns
-------
res : float
The sum of least squares.
See Also
--------
:func:`~astropy.modeling.statistic.leastsquare` | def leastsquare_3d(measured_vals, updated_model, weights, x, y, z):
"""
Least square statistic with optional weights.
Safer than the general :func:`~astropy.modeling.statistic.leastsquare`
for 3D models by avoiding numpy methods that support broadcasting.
Parameters
----------
measured_vals : ndarray
Measured data values.
updated_model : `~astropy.modeling.Model`
Model with parameters set by the current iteration of the optimizer.
weights : ndarray or None
Array of weights to apply to each residual.
x : ndarray
Independent variable "x" on which to evaluate the model.
y : ndarray
Independent variable "y" on which to evaluate the model.
z : ndarray
Independent variable "z" on which to evaluate the model.
Returns
-------
res : float
The sum of least squares.
See Also
--------
:func:`~astropy.modeling.statistic.leastsquare`
"""
model_vals = updated_model(x, y, z)
if weights is None:
return np.sum((model_vals - measured_vals) ** 2)
return np.sum((weights * (model_vals - measured_vals)) ** 2) |
Make a ``Tabular`` model where ``n_inputs`` is
based on the dimension of the lookup_table.
This model has to be further initialized and when evaluated
returns the interpolated values.
Parameters
----------
dim : int
Dimensions of the lookup table.
name : str
Name for the class.
Examples
--------
>>> table = np.array([[3., 0., 0.],
... [0., 2., 0.],
... [0., 0., 0.]])
>>> tab = tabular_model(2, name='Tabular2D')
>>> print(tab)
<class 'astropy.modeling.tabular.Tabular2D'>
Name: Tabular2D
N_inputs: 2
N_outputs: 1
>>> points = ([1, 2, 3], [1, 2, 3])
Setting fill_value to None, allows extrapolation.
>>> m = tab(points, lookup_table=table, name='my_table',
... bounds_error=False, fill_value=None, method='nearest')
>>> xinterp = [0, 1, 1.5, 2.72, 3.14]
>>> m(xinterp, xinterp) # doctest: +FLOAT_CMP
array([3., 3., 3., 0., 0.]) | def tabular_model(dim, name=None):
"""
Make a ``Tabular`` model where ``n_inputs`` is
based on the dimension of the lookup_table.
This model has to be further initialized and when evaluated
returns the interpolated values.
Parameters
----------
dim : int
Dimensions of the lookup table.
name : str
Name for the class.
Examples
--------
>>> table = np.array([[3., 0., 0.],
... [0., 2., 0.],
... [0., 0., 0.]])
>>> tab = tabular_model(2, name='Tabular2D')
>>> print(tab)
<class 'astropy.modeling.tabular.Tabular2D'>
Name: Tabular2D
N_inputs: 2
N_outputs: 1
>>> points = ([1, 2, 3], [1, 2, 3])
Setting fill_value to None, allows extrapolation.
>>> m = tab(points, lookup_table=table, name='my_table',
... bounds_error=False, fill_value=None, method='nearest')
>>> xinterp = [0, 1, 1.5, 2.72, 3.14]
>>> m(xinterp, xinterp) # doctest: +FLOAT_CMP
array([3., 3., 3., 0., 0.])
"""
if dim < 1:
raise ValueError("Lookup table must have at least one dimension.")
table = np.zeros([2] * dim)
members = {"lookup_table": table, "n_inputs": dim, "n_outputs": 1}
if dim == 1:
members["_separable"] = True
else:
members["_separable"] = False
if name is None:
model_id = _Tabular._id
_Tabular._id += 1
name = f"Tabular{model_id}"
model_class = type(str(name), (_Tabular,), members)
model_class.__module__ = "astropy.modeling.tabular"
return model_class |
Given a binary operator (as a callable of two arguments) ``oper`` and
two callables ``f`` and ``g`` which accept the same arguments,
returns a *new* function that takes the same arguments as ``f`` and ``g``,
but passes the outputs of ``f`` and ``g`` in the given ``oper``.
``f`` and ``g`` are assumed to return tuples (which may be 1-tuples). The
given operator is applied element-wise to tuple outputs).
Example
-------
>>> from operator import add
>>> def prod(x, y):
... return (x * y,)
...
>>> sum_of_prod = make_binary_operator_eval(add, prod, prod)
>>> sum_of_prod(3, 5)
(30,) | def make_binary_operator_eval(oper, f, g):
"""
Given a binary operator (as a callable of two arguments) ``oper`` and
two callables ``f`` and ``g`` which accept the same arguments,
returns a *new* function that takes the same arguments as ``f`` and ``g``,
but passes the outputs of ``f`` and ``g`` in the given ``oper``.
``f`` and ``g`` are assumed to return tuples (which may be 1-tuples). The
given operator is applied element-wise to tuple outputs).
Example
-------
>>> from operator import add
>>> def prod(x, y):
... return (x * y,)
...
>>> sum_of_prod = make_binary_operator_eval(add, prod, prod)
>>> sum_of_prod(3, 5)
(30,)
"""
return lambda inputs, params: tuple(
oper(x, y) for x, y in zip(f(inputs, params), g(inputs, params))
) |
Map domain into window by shifting and scaling.
Parameters
----------
oldx : array
original coordinates
domain : list or tuple of length 2
function domain
window : list or tuple of length 2
range into which to map the domain | def poly_map_domain(oldx, domain, window):
"""
Map domain into window by shifting and scaling.
Parameters
----------
oldx : array
original coordinates
domain : list or tuple of length 2
function domain
window : list or tuple of length 2
range into which to map the domain
"""
domain = np.array(domain, dtype=np.float64)
window = np.array(window, dtype=np.float64)
if domain.shape != (2,) or window.shape != (2,):
raise ValueError('Expected "domain" and "window" to be a tuple of size 2.')
scl = (window[1] - window[0]) / (domain[1] - domain[0])
off = (window[0] * domain[1] - window[1] * domain[0]) / (domain[1] - domain[0])
return off + scl * oldx |
The number of combinations of N things taken k at a time.
Parameters
----------
N : int, array
Number of things.
k : int, array
Number of elements taken. | def comb(N, k):
"""
The number of combinations of N things taken k at a time.
Parameters
----------
N : int, array
Number of things.
k : int, array
Number of elements taken.
"""
if (k > N) or (N < 0) or (k < 0):
return 0
val = 1
for j in range(min(k, N - k)):
val = (val * (N - j)) / (j + 1)
return val |
Represents a multi-dimensional Numpy array flattened onto a single line. | def array_repr_oneline(array):
"""
Represents a multi-dimensional Numpy array flattened onto a single line.
"""
r = np.array2string(array, separator=", ", suppress_small=True)
return " ".join(line.strip() for line in r.splitlines()) |
For use with the join operator &: Combine left input/output labels with
right input/output labels.
If none of the labels conflict then this just returns a sum of tuples.
However if *any* of the labels conflict, this appends '0' to the left-hand
labels and '1' to the right-hand labels so there is no ambiguity). | def combine_labels(left, right):
"""
For use with the join operator &: Combine left input/output labels with
right input/output labels.
If none of the labels conflict then this just returns a sum of tuples.
However if *any* of the labels conflict, this appends '0' to the left-hand
labels and '1' to the right-hand labels so there is no ambiguity).
"""
if set(left).intersection(right):
left = tuple(label + "0" for label in left)
right = tuple(label + "1" for label in right)
return left + right |
Calculates the half size of a box encapsulating a rotated 2D
ellipse.
Parameters
----------
a : float or `~astropy.units.Quantity`
The ellipse semimajor axis.
b : float or `~astropy.units.Quantity`
The ellipse semiminor axis.
theta : float or `~astropy.units.Quantity` ['angle']
The rotation angle as an angular quantity
(`~astropy.units.Quantity` or `~astropy.coordinates.Angle`) or
a value in radians (as a float). The rotation angle increases
counterclockwise.
Returns
-------
offsets : tuple
The absolute value of the offset distances from the ellipse center that
define its bounding box region, ``(dx, dy)``.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Ellipse2D
from astropy.modeling.utils import ellipse_extent, render_model
amplitude = 1
x0 = 50
y0 = 50
a = 30
b = 10
theta = np.pi / 4
model = Ellipse2D(amplitude, x0, y0, a, b, theta)
dx, dy = ellipse_extent(a, b, theta)
limits = [x0 - dx, x0 + dx, y0 - dy, y0 + dy]
model.bounding_box = limits
image = render_model(model)
plt.imshow(image, cmap='binary', interpolation='nearest', alpha=.5,
extent = limits)
plt.show() | def ellipse_extent(a, b, theta):
"""
Calculates the half size of a box encapsulating a rotated 2D
ellipse.
Parameters
----------
a : float or `~astropy.units.Quantity`
The ellipse semimajor axis.
b : float or `~astropy.units.Quantity`
The ellipse semiminor axis.
theta : float or `~astropy.units.Quantity` ['angle']
The rotation angle as an angular quantity
(`~astropy.units.Quantity` or `~astropy.coordinates.Angle`) or
a value in radians (as a float). The rotation angle increases
counterclockwise.
Returns
-------
offsets : tuple
The absolute value of the offset distances from the ellipse center that
define its bounding box region, ``(dx, dy)``.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Ellipse2D
from astropy.modeling.utils import ellipse_extent, render_model
amplitude = 1
x0 = 50
y0 = 50
a = 30
b = 10
theta = np.pi / 4
model = Ellipse2D(amplitude, x0, y0, a, b, theta)
dx, dy = ellipse_extent(a, b, theta)
limits = [x0 - dx, x0 + dx, y0 - dy, y0 + dy]
model.bounding_box = limits
image = render_model(model)
plt.imshow(image, cmap='binary', interpolation='nearest', alpha=.5,
extent = limits)
plt.show()
"""
from .parameters import Parameter # prevent circular import
if isinstance(theta, Parameter):
if theta.quantity is None:
theta = theta.value
else:
theta = theta.quantity
t = np.arctan2(-b * np.tan(theta), a)
dx = a * np.cos(t) * np.cos(theta) - b * np.sin(t) * np.sin(theta)
t = np.arctan2(b, a * np.tan(theta))
dy = b * np.sin(t) * np.cos(theta) + a * np.cos(t) * np.sin(theta)
if isinstance(dx, u.Quantity) or isinstance(dy, u.Quantity):
return np.abs(u.Quantity([dx, dy], subok=True))
return np.abs([dx, dy]) |
Given a callable, determine the input variables and the
parameters.
Parameters
----------
func : callable
Returns
-------
inputs, params : tuple
Each entry is a list of inspect.Parameter objects | def get_inputs_and_params(func):
"""
Given a callable, determine the input variables and the
parameters.
Parameters
----------
func : callable
Returns
-------
inputs, params : tuple
Each entry is a list of inspect.Parameter objects
"""
sig = signature(func)
inputs = []
params = []
for param in sig.parameters.values():
if param.kind in (param.VAR_POSITIONAL, param.VAR_KEYWORD):
raise ValueError("Signature must not have *args or **kwargs")
if param.default == param.empty:
inputs.append(param)
else:
params.append(param)
return inputs, params |
Convert ``value`` to radian. | def _to_radian(value):
"""Convert ``value`` to radian."""
if isinstance(value, u.Quantity):
return value.to(u.rad)
return np.deg2rad(value) |
Convert value with ``raw_unit`` to ``orig_unit``. | def _to_orig_unit(value, raw_unit=None, orig_unit=None):
"""Convert value with ``raw_unit`` to ``orig_unit``."""
if raw_unit is not None:
return (value * raw_unit).to(orig_unit)
return np.rad2deg(value) |
Read the records of an IRAF database file into a python list.
Parameters
----------
fname : str
name of an IRAF database file
Returns
-------
A list of records | def get_records(fname):
"""
Read the records of an IRAF database file into a python list.
Parameters
----------
fname : str
name of an IRAF database file
Returns
-------
A list of records
"""
f = open(fname)
dtb = f.read()
f.close()
recs = dtb.split("begin")[1:]
records = [Record(r) for r in recs]
return records |
Read an IRAF database file.
Parameters
----------
fname : str
name of an IRAF database file
Returns
-------
the database file as a string | def get_database_string(fname):
"""
Read an IRAF database file.
Parameters
----------
fname : str
name of an IRAF database file
Returns
-------
the database file as a string
"""
f = open(fname)
dtb = f.read()
f.close()
return dtb |
Check that creating model sets with components whose _n_models are
different raise a value error | def test_model_set_raises_value_error(expr, result):
"""Check that creating model sets with components whose _n_models are
different raise a value error
"""
MESSAGE = r"Both operands must have equal values for .*"
with pytest.raises(ValueError, match=MESSAGE):
expr(Const1D((2, 2), n_models=2), Const1D(3, n_models=1)) |
Like test_two_model_class_arithmetic_1d, but creates a new model from two
model *instances* with fixed parameters. | def test_two_model_instance_arithmetic_1d(expr, result):
"""
Like test_two_model_class_arithmetic_1d, but creates a new model from two
model *instances* with fixed parameters.
"""
s = expr(Const1D(2), Const1D(3))
assert isinstance(s, CompoundModel)
assert s.n_inputs == 1
assert s.n_outputs == 1
out = s(0)
assert out == result
assert isinstance(out, float) |
Shift and Scale are two of the simplest models to test model composition
with. | def test_simple_two_model_compose_1d():
"""
Shift and Scale are two of the simplest models to test model composition
with.
"""
S1 = Shift(2) | Scale(3) # First shift then scale
assert isinstance(S1, CompoundModel)
assert S1.n_inputs == 1
assert S1.n_outputs == 1
assert S1(1) == 9.0
S2 = Scale(2) | Shift(3) # First scale then shift
assert isinstance(S2, CompoundModel)
assert S2.n_inputs == 1
assert S2.n_outputs == 1
assert S2(1) == 5.0
# Test with array inputs
assert_array_equal(S2([1, 2, 3]), [5.0, 7.0, 9.0]) |
A simple example consisting of two rotations. | def test_simple_two_model_compose_2d():
"""
A simple example consisting of two rotations.
"""
r1 = Rotation2D(45) | Rotation2D(45)
assert isinstance(r1, CompoundModel)
assert r1.n_inputs == 2
assert r1.n_outputs == 2
assert_allclose(r1(0, 1), (-1, 0), atol=1e-10)
r2 = Rotation2D(90) | Rotation2D(90) # Rotate twice by 90 degrees
assert_allclose(r2(0, 1), (0, -1), atol=1e-10)
# Compose R with itself to produce 4 rotations
r3 = r1 | r1
assert_allclose(r3(0, 1), (0, -1), atol=1e-10) |
Test that CompoundModel.n_submodels properly returns the number
of components. | def test_n_submodels():
"""
Test that CompoundModel.n_submodels properly returns the number
of components.
"""
g2 = Gaussian1D() + Gaussian1D()
assert g2.n_submodels == 2
g3 = g2 + Gaussian1D()
assert g3.n_submodels == 3
g5 = g3 | g2
assert g5.n_submodels == 5
g7 = g5 / g2
assert g7.n_submodels == 7 |
Test that the expression strings from compound models are formatted
correctly. | def test_expression_formatting():
"""
Test that the expression strings from compound models are formatted
correctly.
"""
# For the purposes of this test it doesn't matter a great deal what
# model(s) are used in the expression, I don't think
G = Gaussian1D(1, 1, 1)
G2 = Gaussian2D(1, 2, 3, 4, 5, 6)
M = G + G
assert M._format_expression() == "[0] + [1]"
M = G + G + G
assert M._format_expression() == "[0] + [1] + [2]"
M = G + G * G
assert M._format_expression() == "[0] + [1] * [2]"
M = G * G + G
assert M._format_expression() == "[0] * [1] + [2]"
M = G + G * G + G
assert M._format_expression() == "[0] + [1] * [2] + [3]"
M = (G + G) * (G + G)
assert M._format_expression() == "([0] + [1]) * ([2] + [3])"
# This example uses parentheses in the expression, but those won't be
# preserved in the expression formatting since they technically aren't
# necessary, and there's no way to know that they were originally
# parenthesized (short of some deep, and probably not worthwhile
# introspection)
M = (G * G) + (G * G)
assert M._format_expression() == "[0] * [1] + [2] * [3]"
M = G**G
assert M._format_expression() == "[0] ** [1]"
M = G + G**G
assert M._format_expression() == "[0] + [1] ** [2]"
M = (G + G) ** G
assert M._format_expression() == "([0] + [1]) ** [2]"
M = G + G | G
assert M._format_expression() == "[0] + [1] | [2]"
M = G + (G | G)
assert M._format_expression() == "[0] + ([1] | [2])"
M = G & G | G2
assert M._format_expression() == "[0] & [1] | [2]"
M = G & (G | G)
assert M._format_expression() == "[0] & ([1] | [2])" |
Test basic inversion of compound models in the limited sense supported for
models made from compositions and joins only. | def test_basic_compound_inverse():
"""
Test basic inversion of compound models in the limited sense supported for
models made from compositions and joins only.
"""
t = (Shift(2) & Shift(3)) | (Scale(2) & Scale(3)) | Rotation2D(90)
assert_allclose(t.inverse(*t(0, 1)), (0, 1)) |
Ensure inverses aren't supported in cases where it shouldn't be. | def test_compound_unsupported_inverse(model):
"""
Ensure inverses aren't supported in cases where it shouldn't be.
"""
MESSAGE = r"No analytical or user-supplied inverse transform .*"
with pytest.raises(NotImplementedError, match=MESSAGE):
model.inverse |
Tests a couple basic examples of the Mapping model--specifically examples
that merely permute the outputs. | def test_mapping_basic_permutations():
"""
Tests a couple basic examples of the Mapping model--specifically examples
that merely permute the outputs.
"""
x, y = Rotation2D(90)(1, 2)
rs = Rotation2D(90) | Mapping((1, 0))
x_prime, y_prime = rs(1, 2)
assert_allclose((x, y), (y_prime, x_prime))
# A more complicated permutation
m = Rotation2D(90) & Scale(2)
x, y, z = m(1, 2, 3)
ms = m | Mapping((2, 0, 1))
x_prime, y_prime, z_prime = ms(1, 2, 3)
assert_allclose((x, y, z), (y_prime, z_prime, x_prime)) |
Tests inverting a compound model that includes a `Mapping`. | def test_mapping_inverse():
"""Tests inverting a compound model that includes a `Mapping`."""
rs1 = Rotation2D(12.1) & Scale(13.2)
rs2 = Rotation2D(14.3) & Scale(15.4)
# Rotates 2 of the coordinates and scales the third--then rotates on a
# different axis and scales on the axis of rotation. No physical meaning
# here just a simple test
m = rs1 | Mapping([2, 0, 1]) | rs2
assert_allclose((0, 1, 2), m.inverse(*m(0, 1, 2)), atol=1e-08) |
Test a case where an Identity (or Mapping) model is the first in a chain
of composite models and thus is responsible for handling input broadcasting
properly.
Regression test for https://github.com/astropy/astropy/pull/3362 | def test_identity_input():
"""
Test a case where an Identity (or Mapping) model is the first in a chain
of composite models and thus is responsible for handling input broadcasting
properly.
Regression test for https://github.com/astropy/astropy/pull/3362
"""
ident1 = Identity(1)
shift = Shift(1)
rotation = Rotation2D(angle=90)
model = ident1 & shift | rotation
assert_allclose(model(1, 2), [-3.0, 1.0]) |
Test that certain operators do not work with models whose inputs/outputs do
not match up correctly. | def test_invalid_operands():
"""
Test that certain operators do not work with models whose inputs/outputs do
not match up correctly.
"""
MESSAGE = r"Unsupported operands for |:.*"
with pytest.raises(ModelDefinitionError, match=MESSAGE):
Rotation2D(90) | Gaussian1D(1, 0, 0.1)
MESSAGE = r"Both operands must match numbers of inputs and outputs"
with pytest.raises(ModelDefinitionError, match=MESSAGE):
Rotation2D(90) + Gaussian1D(1, 0, 0.1) |
Tests that polynomials are scaled when used in compound models.
Issue #3699 | def test_compound_with_polynomials_2d(poly):
"""
Tests that polynomials are scaled when used in compound models.
Issue #3699
"""
poly.parameters = [1, 2, 3, 4, 1, 2]
shift = Shift(3)
model = poly | shift
x, y = np.mgrid[:20, :37]
result_compound = model(x, y)
result = shift(poly(x, y))
assert_allclose(result, result_compound) |
Test indexing on compound model instances. | def test_indexing_on_instance():
"""Test indexing on compound model instances."""
m = Gaussian1D(1, 0, 0.1) + Const1D(2)
assert isinstance(m[0], Gaussian1D)
assert isinstance(m[1], Const1D)
assert m.param_names == ("amplitude_0", "mean_0", "stddev_0", "amplitude_1")
# Test parameter equivalence
assert m[0].amplitude == 1 == m.amplitude_0
assert m[0].mean == 0 == m.mean_0
assert m[0].stddev == 0.1 == m.stddev_0
assert m[1].amplitude == 2 == m.amplitude_1
# Test that parameter value updates are symmetric between the compound
# model and the submodel returned by indexing
const = m[1]
m.amplitude_1 = 42
assert const.amplitude == 42
const.amplitude = 137
assert m.amplitude_1 == 137
# Similar couple of tests, but now where the compound model was created
# from model instances
g = Gaussian1D(1, 2, 3, name="g")
p = Polynomial1D(2, name="p")
m = g + p
assert m[0].name == "g"
assert m[1].name == "p"
assert m["g"].name == "g"
assert m["p"].name == "p"
poly = m[1]
m.c0_1 = 12345
assert poly.c0 == 12345
poly.c1 = 6789
assert m.c1_1 == 6789
# Test negative indexing
assert isinstance(m[-1], Polynomial1D)
assert isinstance(m[-2], Gaussian1D)
MESSAGE = r"list index out of range"
with pytest.raises(IndexError, match=MESSAGE):
m[42]
MESSAGE = r"No component with name 'foobar' found"
with pytest.raises(IndexError, match=MESSAGE):
m["foobar"]
# Confirm index-by-name works with fix_inputs
g = Gaussian2D(1, 2, 3, 4, 5, name="g")
m = fix_inputs(g, {0: 1})
assert m["g"].name == "g"
# Test string slicing
A = Const1D(1.1, name="A")
B = Const1D(2.1, name="B")
C = Const1D(3.1, name="C")
M = A + B * C
assert_allclose(M["B":"C"](1), 6.510000000000001) |
Subsets and Splits