response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Test different supported formats for chunked reading. | def test_read_chunks_formats(masked):
"""
Test different supported formats for chunked reading.
"""
t1 = simple_table(size=102, cols=10, kinds="fS", masked=masked)
for i, name in enumerate(t1.colnames):
t1.rename_column(name, f"col{i + 1}")
# TO DO commented_header does not currently work due to the special-cased
# implementation of header parsing.
for format in "tab", "csv", "no_header", "rdb", "basic":
out = StringIO()
ascii.write(t1, out, format=format)
t_gen = ascii.read(
out.getvalue(),
format=format,
fast_reader={"chunk_size": 400, "chunk_generator": True},
)
ts = list(t_gen)
for t in ts:
for col, col1 in zip(t.columns.values(), t1.columns.values()):
assert col.name == col1.name
assert col.dtype.kind == col1.dtype.kind
assert len(ts) > 4
t2 = table.vstack(ts)
assert np.all(t1 == t2)
# Now read the full table in chunks
t3 = ascii.read(out.getvalue(), format=format, fast_reader={"chunk_size": 400})
assert np.all(t1 == t3) |
Column changes type or size between chunks. This also tests the case with
no final newline. | def test_read_chunks_table_changes():
"""Column changes type or size between chunks. This also tests the case with
no final newline.
"""
col = ["a b c"] + ["1.12334 xyz a"] * 50 + ["abcdefg 555 abc"] * 50
table = "\n".join(col)
t1 = ascii.read(table, guess=False)
t2 = ascii.read(table, fast_reader={"chunk_size": 100})
# This also confirms that the dtypes are exactly the same, i.e.
# the string itemsizes are the same.
assert np.all(t1 == t2) |
Test that pure-Python reader is used in case the file contains non-ASCII characters
in it. | def test_read_non_ascii():
"""Test that pure-Python reader is used in case the file contains non-ASCII characters
in it.
"""
table = Table.read(["col1, col2", "\u2119, \u01b4", "1, 2"], format="csv")
assert np.all(table["col1"] == ["\u2119", "1"])
assert np.all(table["col2"] == ["\u01b4", "2"]) |
Test that fast_reader dictionary is preserved through guessing sequence. | def test_kwargs_dict_guess(enable):
"""Test that fast_reader dictionary is preserved through guessing sequence."""
# Fails for enable=(True, 'force') - #5578
ascii.read("a\tb\n 1\t2\n3\t 4.0", fast_reader={"enable": enable})
assert get_read_trace()[-1]["kwargs"]["reader_cls"] is (
ascii.Tab if (enable is False) else ascii.FastTab
)
for k in get_read_trace():
if not k.get("status", "Disabled").startswith("Disabled"):
assert k.get("kwargs").get("fast_reader").get("enable") is enable |
Test that duplicate column names are successfully de-duplicated for the
basic format. Skip the case of rdb=True and fast_reader='force' when selecting
include_names, since that fails and is tested below. | def test_deduplicate_names_basic(rdb, fast_reader):
"""Test that duplicate column names are successfully de-duplicated for the
basic format. Skip the case of rdb=True and fast_reader='force' when selecting
include_names, since that fails and is tested below.
"""
lines = _get_lines(rdb)
dat = ascii.read(lines, fast_reader=fast_reader)
assert dat.colnames == ["a", "a_2", "a_1", "a_3", "a_4"]
assert len(dat) == 2
dat = ascii.read(lines, fast_reader=fast_reader, include_names=["a", "a_2", "a_3"])
assert len(dat) == 2
assert dat.colnames == ["a", "a_2", "a_3"]
assert np.all(dat["a"] == [1, 10])
assert np.all(dat["a_2"] == [2, 20])
assert np.all(dat["a_3"] == [4, 40])
dat = ascii.read(
lines,
fast_reader=fast_reader,
names=["b1", "b2", "b3", "b4", "b5"],
include_names=["b1", "b2", "a_4", "b4"],
)
assert len(dat) == 2
assert dat.colnames == ["b1", "b2", "b4"]
assert np.all(dat["b1"] == [1, 10])
assert np.all(dat["b2"] == [2, 20])
assert np.all(dat["b4"] == [4, 40])
dat = ascii.read(
lines,
fast_reader=fast_reader,
names=["b1", "b2", "b3", "b4", "b5"],
exclude_names=["b3", "b5", "a_3", "a_4"],
)
assert len(dat) == 2
assert dat.colnames == ["b1", "b2", "b4"]
assert np.all(dat["b1"] == [1, 10])
assert np.all(dat["b2"] == [2, 20])
assert np.all(dat["b4"] == [4, 40]) |
Test that selecting column names via `include_names` works for the RDB format
with fast reader. This is testing the fix for a bug identified in #9939. | def test_include_names_rdb_fast():
"""Test that selecting column names via `include_names` works for the RDB format
with fast reader. This is testing the fix for a bug identified in #9939.
"""
lines = _get_lines(True)
lines[0] = "a\ta_2\ta_1\ta_3\ta_4"
dat = ascii.read(lines, fast_reader="force", include_names=["a", "a_2", "a_3"])
assert len(dat) == 2
assert dat["a"].dtype.kind == "i"
assert dat["a_2"].dtype.kind == "i" |
Test that on selecting column names via `include_names` in the RDB format with
different types and duplicate column names type assignment is correctly preserved. | def test_deduplicate_names_with_types(fast_reader):
"""Test that on selecting column names via `include_names` in the RDB format with
different types and duplicate column names type assignment is correctly preserved.
"""
lines = _get_lines(True)
lines[1] = "N\tN\tN\tS\tS"
dat = ascii.read(lines, fast_reader=fast_reader, include_names=["a", "a_2", "a_3"])
assert len(dat) == 2
assert dat["a_2"].dtype.kind == "i"
assert dat["a_3"].dtype.kind == "U"
dat = ascii.read(
lines,
fast_reader=fast_reader,
names=["b1", "b2", "b3", "b4", "b5"],
include_names=["a1", "a_2", "b1", "b2", "b4"],
)
assert len(dat) == 2
assert dat.colnames == ["b1", "b2", "b4"]
assert dat["b2"].dtype.kind == "i"
assert dat["b4"].dtype.kind == "U" |
Test exceptions for invalid (duplicate or `None`) names specified via argument. | def test_set_invalid_names(rdb, fast_reader):
"""
Test exceptions for invalid (duplicate or `None`) names specified via argument.
"""
lines = _get_lines(rdb)
if rdb:
fmt = "rdb"
else:
fmt = "basic"
with pytest.raises(ValueError) as err:
ascii.read(
lines,
fast_reader=fast_reader,
format=fmt,
guess=rdb,
names=["b1", "b2", "b1", "b4", "b5"],
)
assert "Duplicate column names" in str(err.value)
with pytest.raises(TypeError) as err:
ascii.read(
lines,
fast_reader=fast_reader,
format=fmt,
guess=rdb,
names=["b1", "b2", "b1", None, None],
)
assert "Cannot have None for column name" in str(err.value) |
Test converters where the column name is specified with
a wildcard. | def test_read_converters_wildcard():
"""Test converters where the column name is specified with
a wildcard.
"""
converters = {"F*": [ascii.convert_numpy(np.float32)]}
t = ascii.read(["Fabc Iabc", "1 2"], converters=converters)
assert np.issubdtype(t["Fabc"].dtype, np.float32)
assert not np.issubdtype(t["Iabc"].dtype, np.float32) |
Test providing io.ascii read converters as type or dtypes instead of
convert_numpy(type) outputs | def test_read_converters_simplified():
"""Test providing io.ascii read converters as type or dtypes instead of
convert_numpy(type) outputs"""
t = Table()
t["a"] = [1, 2]
t["b"] = [3.5, 4]
t["c"] = ["True", "False"]
t["d"] = ["true", "false"] # Looks kindof like boolean but actually a string
t["e"] = [5, 6]
out = StringIO()
t.write(out, format="ascii.basic")
converters = {"a": str, "e": np.float32}
t2 = Table.read(out.getvalue(), format="ascii.basic", converters=converters)
assert t2.pformat(show_dtype=True) == [
" a b c d e ",
"str1 float64 str5 str5 float32",
"---- ------- ----- ----- -------",
" 1 3.5 True true 5.0",
" 2 4.0 False false 6.0",
]
converters = {"a": float, "*": [np.int64, float, bool, str]}
t2 = Table.read(out.getvalue(), format="ascii.basic", converters=converters)
assert t2.pformat_all(show_dtype=True) == [
" a b c d e ",
"float64 float64 bool str5 int64",
"------- ------- ----- ----- -----",
" 1.0 3.5 True true 5",
" 2.0 4.0 False false 6",
]
# Test failures
for converters in (
{"*": [int, 1, bool, str]}, # bad converter type
# Tuple converter where 2nd element is not a subclass of NoType
{"a": [(int, int)]},
# Tuple converter with 3 elements not 2
{"a": [(int, int, int)]},
):
with pytest.raises(ValueError, match="Error: invalid format for converters"):
t2 = Table.read(
out.getvalue(), format="ascii.basic", converters=converters, guess=False
) |
Normal SimpleRST Table | def test_read_normal():
"""Normal SimpleRST Table"""
table = """
# comment (with blank line above)
======= =========
Col1 Col2
======= =========
1.2 "hello"
2.4 's worlds
======= =========
"""
reader = ascii.get_reader(reader_cls=ascii.RST)
dat = reader.read(table)
assert_equal(dat.colnames, ["Col1", "Col2"])
assert_almost_equal(dat[1][0], 2.4)
assert_equal(dat[0][1], '"hello"')
assert_equal(dat[1][1], "'s worlds") |
Normal SimpleRST Table with provided column names | def test_read_normal_names():
"""Normal SimpleRST Table with provided column names"""
table = """
# comment (with blank line above)
======= =========
Col1 Col2
======= =========
1.2 "hello"
2.4 's worlds
======= =========
"""
reader = ascii.get_reader(reader_cls=ascii.RST, names=("name1", "name2"))
dat = reader.read(table)
assert_equal(dat.colnames, ["name1", "name2"])
assert_almost_equal(dat[1][0], 2.4) |
Normal SimpleRST Table with provided column names | def test_read_normal_names_include():
"""Normal SimpleRST Table with provided column names"""
table = """
# comment (with blank line above)
======= ========== ======
Col1 Col2 Col3
======= ========== ======
1.2 "hello" 3
2.4 's worlds 7
======= ========== ======
"""
reader = ascii.get_reader(
reader_cls=ascii.RST,
names=("name1", "name2", "name3"),
include_names=("name1", "name3"),
)
dat = reader.read(table)
assert_equal(dat.colnames, ["name1", "name3"])
assert_almost_equal(dat[1][0], 2.4)
assert_equal(dat[0][1], 3) |
Nice, typical SimpleRST table with col name excluded | def test_read_normal_exclude():
"""Nice, typical SimpleRST table with col name excluded"""
table = """
======= ==========
Col1 Col2
======= ==========
1.2 "hello"
2.4 's worlds
======= ==========
"""
reader = ascii.get_reader(reader_cls=ascii.RST, exclude_names=("Col1",))
dat = reader.read(table)
assert_equal(dat.colnames, ["Col2"])
assert_equal(dat[1][0], "'s worlds") |
The right hand column should be allowed to overflow | def test_read_unbounded_right_column():
"""The right hand column should be allowed to overflow"""
table = """
# comment (with blank line above)
===== ===== ====
Col1 Col2 Col3
===== ===== ====
1.2 2 Hello
2.4 4 Worlds
===== ===== ====
"""
reader = ascii.get_reader(reader_cls=ascii.RST)
dat = reader.read(table)
assert_equal(dat[0][2], "Hello")
assert_equal(dat[1][2], "Worlds") |
The right hand column should be allowed to overflow | def test_read_unbounded_right_column_header():
"""The right hand column should be allowed to overflow"""
table = """
# comment (with blank line above)
===== ===== ====
Col1 Col2 Col3Long
===== ===== ====
1.2 2 Hello
2.4 4 Worlds
===== ===== ====
"""
reader = ascii.get_reader(reader_cls=ascii.RST)
dat = reader.read(table)
assert_equal(dat.colnames[-1], "Col3Long") |
We should be able to read right indented tables correctly | def test_read_right_indented_table():
"""We should be able to read right indented tables correctly"""
table = """
# comment (with blank line above)
==== ==== ====
Col1 Col2 Col3
==== ==== ====
3 3.4 foo
1 4.5 bar
==== ==== ====
"""
reader = ascii.get_reader(reader_cls=ascii.RST)
dat = reader.read(table)
assert_equal(dat.colnames, ["Col1", "Col2", "Col3"])
assert_equal(dat[0][2], "foo")
assert_equal(dat[1][0], 1) |
Trailing spaces in the row definition column shouldn't matter | def test_trailing_spaces_in_row_definition():
"""Trailing spaces in the row definition column shouldn't matter"""
table = (
"\n"
"# comment (with blank line above)\n"
" ==== ==== ==== \n"
" Col1 Col2 Col3\n"
" ==== ==== ==== \n"
" 3 3.4 foo\n"
" 1 4.5 bar\n"
" ==== ==== ==== \n"
)
# make sure no one accidentally deletes the trailing whitespaces in the
# table.
assert len(table) == 151
reader = ascii.get_reader(reader_cls=ascii.RST)
dat = reader.read(table)
assert_equal(dat.colnames, ["Col1", "Col2", "Col3"])
assert_equal(dat[0][2], "foo")
assert_equal(dat[1][0], 1) |
Write a table as a normal SimpleRST Table | def test_write_normal():
"""Write a table as a normal SimpleRST Table"""
out = StringIO()
ascii.write(dat, out, format="rst")
assert_equal_splitlines(
out.getvalue(),
"""\
==== ========= ==== ====
Col1 Col2 Col3 Col4
==== ========= ==== ====
1.2 "hello" 1 a
2.4 's worlds 2 2
==== ========= ==== ====
""",
) |
Round-trip a table with header_rows specified | def test_rst_with_header_rows():
"""Round-trip a table with header_rows specified"""
lines = [
"======= ======== ====",
" wave response ints",
" nm ct ",
"float64 float32 int8",
"======= ======== ====",
" 350.0 1.0 1",
" 950.0 2.0 2",
"======= ======== ====",
]
tbl = QTable.read(lines, format="ascii.rst", header_rows=["name", "unit", "dtype"])
assert tbl["wave"].unit == u.nm
assert tbl["response"].unit == u.ct
assert tbl["wave"].dtype == np.float64
assert tbl["response"].dtype == np.float32
assert tbl["ints"].dtype == np.int8
out = StringIO()
tbl.write(out, format="ascii.rst", header_rows=["name", "unit", "dtype"])
assert out.getvalue().splitlines() == lines |
Test for https://github.com/astropy/astropy/issues/15989 | def test_ipac_read_long_columns():
"""Test for https://github.com/astropy/astropy/issues/15989"""
test_data = """\
| oid|expid|cadence|
| long| l| int|
90000000000000001 123 1
"""
dat = ascii.read(test_data, format="ipac")
# assert oid, as a long column, is int64
oid = dat["oid"]
assert oid[0] == 90000000000000001
assert oid.dtype.kind == "i"
assert oid.dtype.itemsize == 8
# expid is declared as a long column,
# the type needs to be int64, even though all
# the values are within int32 range
expid = dat["expid"]
assert expid[0] == 123
assert expid.dtype.kind == "i"
assert expid.dtype.itemsize == 8 |
Test code in BaseOutputter._convert_vals to handle Column.dtype
attribute. See discussion in #11895. | def test_col_dtype_in_custom_class():
"""Test code in BaseOutputter._convert_vals to handle Column.dtype
attribute. See discussion in #11895."""
dtypes = [np.float32, np.int8, np.int16]
class TestDtypeHeader(ascii.BasicHeader):
def get_cols(self, lines):
super().get_cols(lines)
for col, dtype in zip(self.cols, dtypes):
col.dtype = dtype
class TestDtype(ascii.Basic):
"""
Basic table Data reader_cls with data type alternating float32, int8
"""
header_class = TestDtypeHeader
txt = """
a b c
1 2 3
"""
reader = ascii.get_reader(TestDtype)
t = reader.read(txt)
for col, dtype in zip(t.itercols(), dtypes):
assert col.dtype.type is dtype |
Pytest fixture to run a test case with tilde-prefixed paths.
In the tilde-path case, environment variables are temporarily
modified so that '~' resolves to the temp directory. | def home_is_tmpdir(monkeypatch, tmp_path):
"""
Pytest fixture to run a test case with tilde-prefixed paths.
In the tilde-path case, environment variables are temporarily
modified so that '~' resolves to the temp directory.
"""
# For Unix
monkeypatch.setenv("HOME", str(tmp_path))
# For Windows
monkeypatch.setenv("USERPROFILE", str(tmp_path)) |
see discussion in #2255 | def test_write_fill_masked_different(fast_writer):
"""see discussion in #2255"""
data = ascii.read(tab_to_fill)
data = table.Table(data, masked=True)
data["a"].mask = [True, False]
data["c"].mask = [False, True]
for test_def in test_def_masked_fill_value:
check_write_table(test_def, data, fast_writer) |
Write an IPAC table that contains no data. | def test_write_no_data_ipac(fast_writer):
"""Write an IPAC table that contains no data."""
table = ascii.get_reader(reader_cls=ascii.Ipac)
data = table.read("data/no_data_ipac.dat")
for test_def in test_defs_no_data:
check_write_table(test_def, data, fast_writer)
check_write_table_via_table(test_def, data, fast_writer) |
Write an IPAC table that contains no data but has invalid (incorrectly
specified) metadata stored in the top-level metadata and therefore should
raise a warning, and check that the warning has been raised | def test_write_invalid_toplevel_meta_ipac():
"""Write an IPAC table that contains no data but has invalid (incorrectly
specified) metadata stored in the top-level metadata and therefore should
raise a warning, and check that the warning has been raised"""
table = ascii.get_reader(reader_cls=ascii.Ipac)
data = table.read("data/no_data_ipac.dat")
data.meta["blah"] = "extra"
out = StringIO()
with pytest.warns(AstropyWarning, match=r".*were not written.*") as warn:
data.write(out, format="ascii.ipac")
assert len(warn) == 1 |
Write an IPAC table that contains no data but has invalid (incorrectly
specified) metadata stored appropriately in the ``keywords`` section
of the metadata but with invalid format and therefore should raise a
warning, and check that the warning has been raised | def test_write_invalid_keyword_meta_ipac():
"""Write an IPAC table that contains no data but has invalid (incorrectly
specified) metadata stored appropriately in the ``keywords`` section
of the metadata but with invalid format and therefore should raise a
warning, and check that the warning has been raised"""
table = ascii.get_reader(reader_cls=ascii.Ipac)
data = table.read("data/no_data_ipac.dat")
data.meta["keywords"]["blah"] = "invalid"
out = StringIO()
with pytest.warns(AstropyWarning, match=r".*has been skipped.*") as warn:
data.write(out, format="ascii.ipac")
assert len(warn) == 1 |
Write an IPAC table that contains no data and has *correctly* specified
metadata. No warnings should be issued | def test_write_valid_meta_ipac():
"""Write an IPAC table that contains no data and has *correctly* specified
metadata. No warnings should be issued"""
table = ascii.get_reader(reader_cls=ascii.Ipac)
data = table.read("data/no_data_ipac.dat")
data.meta["keywords"]["blah"] = {"value": "invalid"}
out = StringIO()
data.write(out, format="ascii.ipac") |
Write comments in output originally read by io.ascii. | def test_write_comments(fast_writer):
"""Write comments in output originally read by io.ascii."""
data = ascii.read("#c1\n # c2\t\na,b,c\n# c3\n1,2,3")
out = StringIO()
ascii.write(data, out, format="basic", fast_writer=fast_writer)
expected = ["# c1", "# c2", "# c3", "a b c", "1 2 3"]
assert out.getvalue().splitlines() == expected
# header comes before comments for commented-header
out = StringIO()
ascii.write(data, out, format="commented_header", fast_writer=fast_writer)
expected = ["# a b c", "# c1", "# c2", "# c3", "1 2 3"]
assert out.getvalue().splitlines() == expected
# setting comment=False should disable comment writing
out = StringIO()
ascii.write(data, out, format="basic", comment=False, fast_writer=fast_writer)
expected = ["a b c", "1 2 3"]
assert out.getvalue().splitlines() == expected |
Check different formats for a column. | def test_write_format(fast_writer, fmt):
"""Check different formats for a column."""
data = ascii.read("#c1\n # c2\t\na,b,c\n# c3\n1.11,2.22,3.33")
out = StringIO()
expected = ["# c1", "# c2", "# c3", "a b c", "1.1 2.22 3.33"]
data["a"].format = fmt
ascii.write(data, out, format="basic", fast_writer=fast_writer)
assert out.getvalue().splitlines() == expected |
Names should be stripped of whitespace by default. | def test_strip_names(fast_writer):
"""Names should be stripped of whitespace by default."""
data = table.Table([[1], [2], [3]], names=(" A", "B ", " C "))
out = StringIO()
ascii.write(data, out, format="csv", fast_writer=fast_writer)
assert out.getvalue().splitlines()[0] == "A,B,C" |
Check to make sure that Latex and AASTex writers attempt to fall
back on the **unit** attribute of **Column** if the supplied
**latexdict** does not specify units. | def test_latex_units():
"""
Check to make sure that Latex and AASTex writers attempt to fall
back on the **unit** attribute of **Column** if the supplied
**latexdict** does not specify units.
"""
t = table.Table(
[
table.Column(name="date", data=["a", "b"]),
table.Column(name="NUV exp.time", data=[1, 2]),
]
)
latexdict = copy.deepcopy(ascii.latexdicts["AA"])
latexdict["units"] = {"NUV exp.time": "s"}
out = StringIO()
tablehead = Template(
r"\tablehead{\colhead{date} & \colhead{NUV exp.time}\\ \colhead{$u1} & \colhead{$u2}}"
)
expected = [
r"\begin{table}{cc}",
tablehead.substitute(u1=" ", u2="s"),
r"\startdata",
r"a & 1 \\",
"b & 2",
r"\enddata",
r"\end{table}",
"",
]
ascii.write(t, out, format="aastex", latexdict=latexdict)
assert out.getvalue() == os.linesep.join(expected)
# use unit attribute instead
t["NUV exp.time"].unit = u.s
t["date"].unit = u.yr
out = StringIO()
ascii.write(t, out, format="aastex", latexdict=ascii.latexdicts["AA"])
expected[1] = tablehead.substitute(u1=r"$\mathrm{yr}$", u2=r"$\mathrm{s}$")
assert out.getvalue() == os.linesep.join(expected) |
Test the fix for #3562 with confusing exception using comment=False
for the commented_header writer. | def test_commented_header_comments(fast_writer):
"""
Test the fix for #3562 with confusing exception using comment=False
for the commented_header writer.
"""
t = table.Table([[1, 2]])
with pytest.raises(ValueError) as err:
out = StringIO()
ascii.write(
t, out, format="commented_header", comment=False, fast_writer=fast_writer
)
assert "for the commented_header writer you must supply a string" in str(err.value) |
Test the fix for #4350 where byte strings were output with a
leading `b` on Py3. | def test_byte_string_output(fast_writer):
"""
Test the fix for #4350 where byte strings were output with a
leading `b` on Py3.
"""
t = table.Table([["Hello", "World"]], dtype=["S10"])
out = StringIO()
ascii.write(t, out, fast_writer=fast_writer)
assert out.getvalue().splitlines() == ["col0", "Hello", "World"] |
Test for #4508. | def test_names_with_formats(
names, include_names, exclude_names, formats, issues_warning
):
"""Test for #4508."""
t = table.Table([[1, 2, 3], [4.1, 5.2, 6.3]])
out = StringIO()
if issues_warning:
ctx = pytest.warns(AstropyWarning)
else:
ctx = nullcontext()
with ctx as warn:
ascii.write(
t,
out,
names=names,
include_names=include_names,
exclude_names=exclude_names,
formats=formats,
)
if issues_warning:
assert len(warn) == 1 |
Test the fix for #4508. | def test_columns_names_with_formats(formats, issues_warning):
"""Test the fix for #4508."""
t = table.Table([[1, 2, 3], [4.1, 5.2, 6.3]])
out = StringIO()
if issues_warning:
ctx = pytest.warns(AstropyWarning)
else:
ctx = nullcontext()
with ctx as warn:
ascii.write(t, out, formats=formats)
if issues_warning:
assert len(warn) == 1 |
Test the fix for #4350 where byte strings were output with a
leading `b` on Py3. | def test_write_quoted_empty_field(fast_writer):
"""
Test the fix for #4350 where byte strings were output with a
leading `b` on Py3.
"""
t = table.Table([["Hello", ""], ["", ""]], dtype=["S10", "S10"])
out = StringIO()
ascii.write(t, out, fast_writer=fast_writer)
assert out.getvalue().splitlines() == ["col0 col1", 'Hello ""', '"" ""']
out = StringIO()
ascii.write(t, out, fast_writer=fast_writer, delimiter=",")
assert out.getvalue().splitlines() == ["col0,col1", "Hello,", ","] |
Test writing empty table #8275. | def test_write_empty_table(fast_writer):
"""Test writing empty table #8275."""
t = table.Table([[]], dtype=["S2"])
out = StringIO()
ascii.write(t, out, fast_writer=fast_writer)
assert out.getvalue().splitlines() == ["col0"] |
Test overwrite argument for various ASCII writers | def test_write_overwrite_ascii(
format, fast_writer, tmp_path, home_is_tmpdir, path_format
):
"""Test overwrite argument for various ASCII writers"""
true_filename = tmp_path / "table-tmp.dat"
if path_format == "plain":
filename = true_filename
elif path_format == "tilde-str":
filename = os.path.join("~", "table-tmp.dat")
else:
filename = pathlib.Path("~", "table-tmp.dat")
with open(true_filename, "w"):
# create empty file
pass
t = table.Table([["Hello", ""], ["", ""]], dtype=["S10", "S10"])
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
t.write(filename, format=format, fast_writer=fast_writer)
t.write(filename, overwrite=True, format=format, fast_writer=fast_writer)
# If the output is a file object, overwrite is ignored
with open(true_filename, "w") as fp:
t.write(fp, overwrite=False, format=format, fast_writer=fast_writer)
t.write(fp, overwrite=True, format=format, fast_writer=fast_writer)
if "tilde" in path_format:
# Ensure no files have been accidentally written to a literal tilde path
assert not os.path.exists(filename) |
Round trip a simple masked table through every writable format and confirm
that reading back gives the same result. | def test_roundtrip_masked(fmt_name_class):
"""
Round trip a simple masked table through every writable format and confirm
that reading back gives the same result.
"""
fmt_name, fmt_cls = fmt_name_class
if not getattr(fmt_cls, "_io_registry_can_write", True):
return
# Skip tests for fixed_width or HTML without bs4
if (fmt_name == "html" and not HAS_BS4) or fmt_name == "fixed_width":
return
if "qdp" in fmt_name:
# QDP tables are for numeric values only
t = simple_table(masked=True, kinds=["f", "i"])
else:
t = simple_table(masked=True)
out = StringIO()
fast = fmt_name in ascii.core.FAST_CLASSES
try:
ascii.write(t, out, format=fmt_name, fast_writer=fast)
except ImportError: # Some failed dependency, skip test
return
# No-header formats need to be told the column names
kwargs = {"names": t.colnames} if "no_header" in fmt_name else {}
if "qdp" in fmt_name:
kwargs.update({"table_id": 0, "names": t.colnames})
t2 = ascii.read(
out.getvalue(), format=fmt_name, fast_reader=fast, guess=False, **kwargs
)
assert t.colnames == t2.colnames
for col, col2 in zip(t.itercols(), t2.itercols()):
assert col.dtype.kind == col2.dtype.kind
assert np.all(col == col2) |
Test fix for #7357 where writing a Table with comments to 'csv' fails with
a cryptic message. The comments are dropped by default, but when comment='#'
is supplied they are still written. | def test_write_csv_with_comments(fast_writer):
"""
Test fix for #7357 where writing a Table with comments to 'csv' fails with
a cryptic message. The comments are dropped by default, but when comment='#'
is supplied they are still written.
"""
out = StringIO()
t = table.Table([[1, 2], [3, 4]], names=["a", "b"])
t.meta["comments"] = ["hello"]
ascii.write(t, out, format="csv", fast_writer=fast_writer)
assert out.getvalue().splitlines() == ["a,b", "1,3", "2,4"]
out = StringIO()
ascii.write(t, out, format="csv", fast_writer=fast_writer, comment="#")
assert out.getvalue().splitlines() == ["#hello", "a,b", "1,3", "2,4"] |
Test fix for #8680 where writing a QTable with a quantity mixin generates
an exception if a format is specified. | def test_write_formatted_mixin(fast_writer):
"""
Test fix for #8680 where writing a QTable with a quantity mixin generates
an exception if a format is specified.
"""
out = StringIO()
t = table.QTable([[1, 2], [1, 2] * u.m], names=["a", "b"])
ascii.write(t, out, fast_writer=fast_writer, formats={"a": "%02d", "b": "%.2f"})
assert out.getvalue().splitlines() == ["a b", "01 1.00", "02 2.00"] |
Test that trying to write a multidim column fails in every format except
ECSV. | def test_multidim_column_error(fmt_name_class):
"""
Test that trying to write a multidim column fails in every format except
ECSV.
"""
fmt_name, fmt_cls = fmt_name_class
if not getattr(fmt_cls, "_io_registry_can_write", True):
return
# Skip tests for ecsv or HTML without bs4. See the comment in latex.py
# Latex class where max_ndim = None is defined regarding latex and aastex.
if (fmt_name == "html" and not HAS_BS4) or fmt_name in ("ecsv", "latex", "aastex"):
return
out = StringIO()
t = table.Table()
t["a"] = np.arange(16).reshape(2, 2, 2, 2)
t["b"] = [1, 2]
fast = fmt_name in ascii.core.FAST_CLASSES
with pytest.raises(ValueError, match=r"column\(s\) with dimension"):
ascii.write(t, out, format=fmt_name, fast_writer=fast) |
Test that writing a set of columns also roundtrips (as long as the
table does not have metadata, etc.) | def test_write_as_columns(fast_writer):
"""
Test that writing a set of columns also roundtrips (as long as the
table does not have metadata, etc.)
"""
# Use masked in case that makes it more difficult.
data = ascii.read(tab_to_fill)
data = table.Table(data, masked=True)
data["a"].mask = [True, False]
data["c"].mask = [False, True]
data = list(data.columns.values())
for test_def in test_def_masked_fill_value:
check_write_table(test_def, data, fast_writer) |
Converts an a string to an int if possible, or to a float.
If the string is neither a string or a float a value error is raised. | def _int_or_float(s):
"""
Converts an a string to an int if possible, or to a float.
If the string is neither a string or a float a value error is raised.
"""
if isinstance(s, float):
# Already a float so just pass through
return s
try:
return int(s)
except (ValueError, TypeError):
try:
return float(s)
except (ValueError, TypeError) as e:
raise ValueError(str(e)) |
Converts a card value to its appropriate string representation as
defined by the FITS format. | def _format_value(value):
"""
Converts a card value to its appropriate string representation as
defined by the FITS format.
"""
# string value should occupies at least 8 columns, unless it is
# a null string
if isinstance(value, str):
if value == "":
return "''"
else:
exp_val_str = value.replace("'", "''")
val_str = f"'{exp_val_str:8}'"
return f"{val_str:20}"
# must be before int checking since bool is also int
elif isinstance(value, (bool, np.bool_)):
return f"{repr(value)[0]:>20}" # T or F
elif _is_int(value):
return f"{value:>20d}"
elif isinstance(value, (float, np.floating)):
return f"{_format_float(value):>20}"
elif isinstance(value, (complex, np.complexfloating)):
val_str = f"({_format_float(value.real)}, {_format_float(value.imag)})"
return f"{val_str:>20}"
elif isinstance(value, Undefined):
return ""
else:
return "" |
Format a floating number to make sure it is at most 20 characters. | def _format_float(value):
"""Format a floating number to make sure it is at most 20 characters."""
value_str = str(value).replace("e", "E")
# Limit the value string to at most 20 characters.
if (str_len := len(value_str)) > 20:
idx = value_str.find("E")
if idx < 0:
# No scientific notation, truncate decimal places
value_str = value_str[:20]
else:
# Scientific notation, truncate significand (mantissa)
value_str = value_str[: 20 - (str_len - idx)] + value_str[idx:]
return value_str |
Pad blank space to the input string to be multiple of 80. | def _pad(input):
"""Pad blank space to the input string to be multiple of 80."""
_len = len(input)
if _len == Card.length:
return input
elif _len > Card.length:
strlen = _len % Card.length
if strlen == 0:
return input
else:
return input + " " * (Card.length - strlen)
# minimum length is 80
else:
strlen = _len % Card.length
return input + " " * (Card.length - strlen) |
Get the index of the ``key`` in the ``names`` list.
The ``key`` can be an integer or string. If integer, it is the index
in the list. If string,
a. Field (column) names are case sensitive: you can have two
different columns called 'abc' and 'ABC' respectively.
b. When you *refer* to a field (presumably with the field
method), it will try to match the exact name first, so in
the example in (a), field('abc') will get the first field,
and field('ABC') will get the second field.
If there is no exact name matched, it will try to match the
name with case insensitivity. So, in the last example,
field('Abc') will cause an exception since there is no unique
mapping. If there is a field named "XYZ" and no other field
name is a case variant of "XYZ", then field('xyz'),
field('Xyz'), etc. will get this field. | def _get_index(names, key):
"""
Get the index of the ``key`` in the ``names`` list.
The ``key`` can be an integer or string. If integer, it is the index
in the list. If string,
a. Field (column) names are case sensitive: you can have two
different columns called 'abc' and 'ABC' respectively.
b. When you *refer* to a field (presumably with the field
method), it will try to match the exact name first, so in
the example in (a), field('abc') will get the first field,
and field('ABC') will get the second field.
If there is no exact name matched, it will try to match the
name with case insensitivity. So, in the last example,
field('Abc') will cause an exception since there is no unique
mapping. If there is a field named "XYZ" and no other field
name is a case variant of "XYZ", then field('xyz'),
field('Xyz'), etc. will get this field.
"""
if _is_int(key):
indx = int(key)
elif isinstance(key, str):
# try to find exact match first
try:
indx = names.index(key.rstrip())
except ValueError:
# try to match case-insentively,
_key = key.lower().rstrip()
names = [n.lower().rstrip() for n in names]
count = names.count(_key) # occurrence of _key in names
if count == 1:
indx = names.index(_key)
elif count == 0:
raise KeyError(f"Key '{key}' does not exist.")
else: # multiple match
raise KeyError(f"Ambiguous key name '{key}'.")
else:
raise KeyError(f"Illegal key '{key!r}'.")
return indx |
Unwrap the X format column into a Boolean array.
Parameters
----------
input
input ``Uint8`` array of shape (`s`, `nbytes`)
output
output Boolean array of shape (`s`, `repeat`)
repeat
number of bits | def _unwrapx(input, output, repeat):
"""
Unwrap the X format column into a Boolean array.
Parameters
----------
input
input ``Uint8`` array of shape (`s`, `nbytes`)
output
output Boolean array of shape (`s`, `repeat`)
repeat
number of bits
"""
pow2 = np.array([128, 64, 32, 16, 8, 4, 2, 1], dtype="uint8")
nbytes = ((repeat - 1) // 8) + 1
for i in range(nbytes):
_min = i * 8
_max = min((i + 1) * 8, repeat)
for j in range(_min, _max):
output[..., j] = np.bitwise_and(input[..., i], pow2[j - i * 8]) |
Wrap the X format column Boolean array into an ``UInt8`` array.
Parameters
----------
input
input Boolean array of shape (`s`, `repeat`)
output
output ``Uint8`` array of shape (`s`, `nbytes`)
repeat
number of bits | def _wrapx(input, output, repeat):
"""
Wrap the X format column Boolean array into an ``UInt8`` array.
Parameters
----------
input
input Boolean array of shape (`s`, `repeat`)
output
output ``Uint8`` array of shape (`s`, `nbytes`)
repeat
number of bits
"""
output[...] = 0 # reset the output
nbytes = ((repeat - 1) // 8) + 1
unused = nbytes * 8 - repeat
for i in range(nbytes):
_min = i * 8
_max = min((i + 1) * 8, repeat)
for j in range(_min, _max):
if j != _min:
np.left_shift(output[..., i], 1, output[..., i])
np.add(output[..., i], input[..., j], output[..., i])
# shift the unused bits
np.left_shift(output[..., i], unused, output[..., i]) |
Construct the P (or Q) format column array, both the data descriptors and
the data. It returns the output "data" array of data type `dtype`.
The descriptor location will have a zero offset for all columns
after this call. The final offset will be calculated when the file
is written.
Parameters
----------
array
input object array
descr_output
output "descriptor" array of data type int32 (for P format arrays) or
int64 (for Q format arrays)--must be nrows long in its first dimension
format
the _FormatP object representing the format of the variable array
nrows : int, optional
number of rows to create in the column; defaults to the number of rows
in the input array | def _makep(array, descr_output, format, nrows=None):
"""
Construct the P (or Q) format column array, both the data descriptors and
the data. It returns the output "data" array of data type `dtype`.
The descriptor location will have a zero offset for all columns
after this call. The final offset will be calculated when the file
is written.
Parameters
----------
array
input object array
descr_output
output "descriptor" array of data type int32 (for P format arrays) or
int64 (for Q format arrays)--must be nrows long in its first dimension
format
the _FormatP object representing the format of the variable array
nrows : int, optional
number of rows to create in the column; defaults to the number of rows
in the input array
"""
# TODO: A great deal of this is redundant with FITS_rec._convert_p; see if
# we can merge the two somehow.
_offset = 0
if not nrows:
nrows = len(array)
data_output = _VLF([None] * nrows, dtype=format.dtype)
if format.dtype == "S":
_nbytes = 1
else:
_nbytes = np.array([], dtype=format.dtype).itemsize
for idx in range(nrows):
if idx < len(array):
rowval = array[idx]
else:
if format.dtype == "S":
rowval = " " * data_output.max
else:
rowval = [0] * data_output.max
if format.dtype == "S":
data_output[idx] = chararray.array(encode_ascii(rowval), itemsize=1)
else:
data_output[idx] = np.array(rowval, dtype=format.dtype)
nelem = data_output[idx].shape
descr_output[idx, 0] = np.prod(nelem)
descr_output[idx, 1] = _offset
_offset += descr_output[idx, 0] * _nbytes
return data_output |
Parse ``TFORMn`` keyword for a binary table into a
``(repeat, format, option)`` tuple. | def _parse_tformat(tform):
"""Parse ``TFORMn`` keyword for a binary table into a
``(repeat, format, option)`` tuple.
"""
try:
(repeat, format, option) = TFORMAT_RE.match(tform.strip()).groups()
except Exception:
# TODO: Maybe catch this error use a default type (bytes, maybe?) for
# unrecognized column types. As long as we can determine the correct
# byte width somehow..
raise VerifyError(f"Format {tform!r} is not recognized.")
if repeat == "":
repeat = 1
else:
repeat = int(repeat)
return (repeat, format.upper(), option) |
Parse the ``TFORMn`` keywords for ASCII tables into a ``(format, width,
precision)`` tuple (the latter is always zero unless format is one of 'E',
'F', or 'D'). | def _parse_ascii_tformat(tform, strict=False):
"""
Parse the ``TFORMn`` keywords for ASCII tables into a ``(format, width,
precision)`` tuple (the latter is always zero unless format is one of 'E',
'F', or 'D').
"""
match = TFORMAT_ASCII_RE.match(tform.strip())
if not match:
raise VerifyError(f"Format {tform!r} is not recognized.")
# Be flexible on case
format = match.group("format")
if format is None:
# Floating point format
format = match.group("formatf").upper()
width = match.group("widthf")
precision = match.group("precision")
if width is None or precision is None:
if strict:
raise VerifyError(
"Format {!r} is not unambiguously an ASCII table format."
)
else:
width = 0 if width is None else width
precision = 1 if precision is None else precision
else:
format = format.upper()
width = match.group("width")
if width is None:
if strict:
raise VerifyError(
"Format {!r} is not unambiguously an ASCII table format."
)
else:
# Just use a default width of 0 if unspecified
width = 0
precision = 0
def convert_int(val):
msg = (
"Format {!r} is not valid--field width and decimal precision "
"must be integers."
)
try:
val = int(val)
except (ValueError, TypeError):
raise VerifyError(msg.format(tform))
return val
if width and precision:
# This should only be the case for floating-point formats
width, precision = convert_int(width), convert_int(precision)
elif width:
# Just for integer/string formats; ignore precision
width = convert_int(width)
else:
# For any format, if width was unspecified use the set defaults
width, precision = ASCII_DEFAULT_WIDTHS[format]
if width <= 0:
raise VerifyError(
f"Format {tform!r} not valid--field width must be a positive integeter."
)
if precision >= width:
raise VerifyError(
f"Format {tform!r} not valid--the number of decimal digits "
f"must be less than the format's total width {width}."
)
return format, width, precision |
Parse the ``TDIM`` value into a tuple (may return an empty tuple if
the value ``TDIM`` value is empty or invalid). | def _parse_tdim(tdim):
"""Parse the ``TDIM`` value into a tuple (may return an empty tuple if
the value ``TDIM`` value is empty or invalid).
"""
m = tdim and TDIM_RE.match(tdim)
if m:
dims = m.group("dims")
return tuple(int(d.strip()) for d in dims.split(","))[::-1]
# Ignore any dim values that don't specify a multidimensional column
return () |
Given a scalar value or string, returns the minimum FITS column format
that can represent that value. 'minimum' is defined by the order given in
FORMATORDER. | def _scalar_to_format(value):
"""
Given a scalar value or string, returns the minimum FITS column format
that can represent that value. 'minimum' is defined by the order given in
FORMATORDER.
"""
# First, if value is a string, try to convert to the appropriate scalar
# value
for type_ in (int, float, complex):
try:
value = type_(value)
break
except ValueError:
continue
numpy_dtype_str = np.min_scalar_type(value).str
numpy_dtype_str = numpy_dtype_str[1:] # Strip endianness
try:
fits_format = NUMPY2FITS[numpy_dtype_str]
return FITSUPCONVERTERS.get(fits_format, fits_format)
except KeyError:
return "A" + str(len(value)) |
Compares two numpy recformats using the ordering given by FORMATORDER. | def _cmp_recformats(f1, f2):
"""
Compares two numpy recformats using the ordering given by FORMATORDER.
"""
if f1[0] == "S" and f2[0] == "S":
return cmp(int(f1[1:]), int(f2[1:]))
else:
f1, f2 = NUMPY2FITS[f1], NUMPY2FITS[f2]
return cmp(FORMATORDER.index(f1), FORMATORDER.index(f2)) |
Convert FITS format spec to record format spec. | def _convert_fits2record(format):
"""
Convert FITS format spec to record format spec.
"""
repeat, dtype, option = _parse_tformat(format)
if dtype in FITS2NUMPY:
if dtype == "A":
output_format = FITS2NUMPY[dtype] + str(repeat)
# to accommodate both the ASCII table and binary table column
# format spec, i.e. A7 in ASCII table is the same as 7A in
# binary table, so both will produce 'S7'.
# Technically the FITS standard does not allow this but it's a very
# common mistake
if format.lstrip()[0] == "A" and option != "":
# make sure option is integer
output_format = FITS2NUMPY[dtype] + str(int(option))
else:
repeat_str = ""
if repeat != 1:
repeat_str = str(repeat)
output_format = repeat_str + FITS2NUMPY[dtype]
elif dtype == "X":
output_format = _FormatX(repeat)
elif dtype == "P":
output_format = _FormatP.from_tform(format)
elif dtype == "Q":
output_format = _FormatQ.from_tform(format)
elif dtype == "F":
output_format = "f8"
else:
raise ValueError(f"Illegal format `{format}`.")
return output_format |
Convert record format spec to FITS format spec. | def _convert_record2fits(format):
"""
Convert record format spec to FITS format spec.
"""
recformat, kind, dtype = _dtype_to_recformat(format)
shape = dtype.shape
itemsize = dtype.base.itemsize
if dtype.char == "U" or (
dtype.subdtype is not None and dtype.subdtype[0].char == "U"
):
# Unicode dtype--itemsize is 4 times actual ASCII character length,
# which what matters for FITS column formats
# Use dtype.base and dtype.subdtype --dtype for multi-dimensional items
itemsize = itemsize // 4
option = str(itemsize)
ndims = len(shape)
repeat = 1
if ndims > 0:
nel = np.array(shape, dtype="i8").prod()
if nel > 1:
repeat = nel
if kind == "S":
# This is a kludge that will place string arrays into a
# single field, so at least we won't lose data. Need to
# use a TDIM keyword to fix this, declaring as (slength,
# dim1, dim2, ...) as mwrfits does
ntot = int(repeat) * int(option)
output_format = str(ntot) + "A"
elif recformat in NUMPY2FITS: # record format
if repeat != 1:
repeat = str(repeat)
else:
repeat = ""
output_format = repeat + NUMPY2FITS[recformat]
else:
raise ValueError(f"Illegal format `{format}`.")
return output_format |
Utility function for converting a dtype object or string that instantiates
a dtype (e.g. 'float32') into one of the two character Numpy format codes
that have been traditionally used by Astropy. | def _dtype_to_recformat(dtype):
"""
Utility function for converting a dtype object or string that instantiates
a dtype (e.g. 'float32') into one of the two character Numpy format codes
that have been traditionally used by Astropy.
"""
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
kind = dtype.base.kind
if kind in ("U", "S"):
recformat = kind = "S"
else:
itemsize = dtype.base.itemsize
recformat = kind + str(itemsize)
return recformat, kind, dtype |
Convert FITS format spec to record format spec. Do the opposite if
reverse=True. | def _convert_format(format, reverse=False):
"""
Convert FITS format spec to record format spec. Do the opposite if
reverse=True.
"""
if reverse:
return _convert_record2fits(format)
else:
return _convert_fits2record(format) |
Convert ASCII table format spec to record format spec. | def _convert_ascii_format(format, reverse=False):
"""Convert ASCII table format spec to record format spec."""
if reverse:
recformat, kind, dtype = _dtype_to_recformat(format)
itemsize = dtype.itemsize
if kind == "S":
return "A" + str(itemsize)
elif NUMPY2FITS.get(recformat) == "L":
# Special case for logical/boolean types--for ASCII tables we
# represent these as single character columns containing 'T' or 'F'
# (a la the storage format for Logical columns in binary tables)
return "A1"
elif kind == "i":
# Use for the width the maximum required to represent integers
# of that byte size plus 1 for signs, but use a minimum of the
# default width (to keep with existing behavior)
width = 1 + len(str(2 ** (itemsize * 8)))
width = max(width, ASCII_DEFAULT_WIDTHS["I"][0])
return "I" + str(width)
elif kind == "f":
# This is tricky, but go ahead and use D if float-64, and E
# if float-32 with their default widths
if itemsize >= 8:
format = "D"
else:
format = "E"
width = ".".join(str(w) for w in ASCII_DEFAULT_WIDTHS[format])
return format + width
# TODO: There may be reasonable ways to represent other Numpy types so
# let's see what other possibilities there are besides just 'S', 'i',
# and 'f'. If it doesn't have a reasonable ASCII representation then
# raise an exception
else:
format, width, precision = _parse_ascii_tformat(format)
# This gives a sensible "default" dtype for a given ASCII
# format code
recformat = ASCII2NUMPY[format]
# The following logic is taken from CFITSIO:
# For integers, if the width <= 4 we can safely use 16-bit ints for all
# values, if width >= 10 we may need to accommodate 64-bit ints.
# values [for the non-standard J format code just always force 64-bit]
if format == "I":
if width <= 4:
recformat = "i2"
elif width > 9:
recformat = "i8"
elif format == "A":
recformat += str(width)
return recformat |
Parse the ``TDISPn`` keywords for ASCII and binary tables into a
``(format, width, precision, exponential)`` tuple (the TDISP values
for ASCII and binary are identical except for 'Lw',
which is only present in BINTABLE extensions.
Parameters
----------
tdisp : str
TDISPn FITS Header keyword. Used to specify display formatting.
Returns
-------
formatc: str
The format characters from TDISPn
width: str
The width int value from TDISPn
precision: str
The precision int value from TDISPn
exponential: str
The exponential int value from TDISPn | def _parse_tdisp_format(tdisp):
"""
Parse the ``TDISPn`` keywords for ASCII and binary tables into a
``(format, width, precision, exponential)`` tuple (the TDISP values
for ASCII and binary are identical except for 'Lw',
which is only present in BINTABLE extensions.
Parameters
----------
tdisp : str
TDISPn FITS Header keyword. Used to specify display formatting.
Returns
-------
formatc: str
The format characters from TDISPn
width: str
The width int value from TDISPn
precision: str
The precision int value from TDISPn
exponential: str
The exponential int value from TDISPn
"""
# Use appropriate regex for format type
tdisp = tdisp.strip()
fmt_key = (
tdisp[0]
if tdisp[0] != "E" or (len(tdisp) > 1 and tdisp[1] not in "NS")
else tdisp[:2]
)
try:
tdisp_re = TDISP_RE_DICT[fmt_key]
except KeyError:
raise VerifyError(f"Format {tdisp} is not recognized.")
match = tdisp_re.match(tdisp.strip())
if not match or match.group("formatc") is None:
raise VerifyError(f"Format {tdisp} is not recognized.")
formatc = match.group("formatc")
width = match.group("width")
precision = None
exponential = None
# Some formats have precision and exponential
if tdisp[0] in ("I", "B", "O", "Z", "F", "E", "G", "D"):
precision = match.group("precision")
if precision is None:
precision = 1
if tdisp[0] in ("E", "D", "G") and tdisp[1] not in ("N", "S"):
exponential = match.group("exponential")
if exponential is None:
exponential = 1
# Once parsed, check format dict to do conversion to a formatting string
return formatc, width, precision, exponential |
Turn the TDISPn fortran format pieces into a final Python format string.
See the format_type definitions above the TDISP_FMT_DICT. If codes is
changed to take advantage of the exponential specification, will need to
add it as another input parameter.
Parameters
----------
tdisp : str
TDISPn FITS Header keyword. Used to specify display formatting.
Returns
-------
format_string: str
The TDISPn keyword string translated into a Python format string. | def _fortran_to_python_format(tdisp):
"""
Turn the TDISPn fortran format pieces into a final Python format string.
See the format_type definitions above the TDISP_FMT_DICT. If codes is
changed to take advantage of the exponential specification, will need to
add it as another input parameter.
Parameters
----------
tdisp : str
TDISPn FITS Header keyword. Used to specify display formatting.
Returns
-------
format_string: str
The TDISPn keyword string translated into a Python format string.
"""
format_type, width, precision, exponential = _parse_tdisp_format(tdisp)
try:
fmt = TDISP_FMT_DICT[format_type]
return fmt.format(width=width, precision=precision)
except KeyError:
raise VerifyError(f"Format {format_type} is not recognized.") |
Turn the Python format string to a TDISP FITS compliant format string. Not
all formats convert. these will cause a Warning and return None.
Parameters
----------
format_string : str
TDISPn FITS Header keyword. Used to specify display formatting.
logical_dtype : bool
True is this format type should be a logical type, 'L'. Needs special
handling.
Returns
-------
tdsip_string: str
The TDISPn keyword string translated into a Python format string. | def python_to_tdisp(format_string, logical_dtype=False):
"""
Turn the Python format string to a TDISP FITS compliant format string. Not
all formats convert. these will cause a Warning and return None.
Parameters
----------
format_string : str
TDISPn FITS Header keyword. Used to specify display formatting.
logical_dtype : bool
True is this format type should be a logical type, 'L'. Needs special
handling.
Returns
-------
tdsip_string: str
The TDISPn keyword string translated into a Python format string.
"""
fmt_to_tdisp = {
"a": "A",
"s": "A",
"d": "I",
"b": "B",
"o": "O",
"x": "Z",
"X": "Z",
"f": "F",
"F": "F",
"g": "G",
"G": "G",
"e": "E",
"E": "E",
}
if format_string in [None, "", "{}"]:
return None
# Strip out extra format characters that aren't a type or a width/precision
if format_string[0] == "{" and format_string != "{}":
fmt_str = format_string.lstrip("{:").rstrip("}")
elif format_string[0] == "%":
fmt_str = format_string.lstrip("%")
else:
fmt_str = format_string
precision, sep = "", ""
# Character format, only translate right aligned, and don't take zero fills
if fmt_str[-1].isdigit() and fmt_str[0] == ">" and fmt_str[1] != "0":
ftype = fmt_to_tdisp["a"]
width = fmt_str[1:]
elif fmt_str[-1] == "s" and fmt_str != "s":
ftype = fmt_to_tdisp["a"]
width = fmt_str[:-1].lstrip("0")
# Number formats, don't take zero fills
elif fmt_str[-1].isalpha() and len(fmt_str) > 1 and fmt_str[0] != "0":
ftype = fmt_to_tdisp[fmt_str[-1]]
fmt_str = fmt_str[:-1]
# If format has a "." split out the width and precision
if "." in fmt_str:
width, precision = fmt_str.split(".")
sep = "."
if width == "":
key = ftype if ftype != "G" else "F"
width = str(
int(precision)
+ (ASCII_DEFAULT_WIDTHS[key][0] - ASCII_DEFAULT_WIDTHS[key][1])
)
# Otherwise we just have a width
else:
width = fmt_str
else:
warnings.warn(
f"Format {format_string} cannot be mapped to the accepted TDISPn "
"keyword values. Format will not be moved into TDISPn keyword.",
AstropyUserWarning,
)
return None
# Catch logical data type, set the format type back to L in this case
if logical_dtype:
ftype = "L"
return ftype + width + sep + precision |
Determine whether `origin` is a FITS file.
Parameters
----------
origin : str or readable file-like
Path or file object containing a potential FITS file.
Returns
-------
is_fits : bool
Returns `True` if the given file is a FITS file. | def is_fits(origin, filepath, fileobj, *args, **kwargs):
"""
Determine whether `origin` is a FITS file.
Parameters
----------
origin : str or readable file-like
Path or file object containing a potential FITS file.
Returns
-------
is_fits : bool
Returns `True` if the given file is a FITS file.
"""
if fileobj is not None:
pos = fileobj.tell()
sig = fileobj.read(30)
fileobj.seek(pos)
return sig == FITS_SIGNATURE
elif filepath is not None:
return filepath.lower().endswith(
(".fits", ".fits.gz", ".fit", ".fit.gz", ".fts", ".fts.gz")
)
return isinstance(args[0], (HDUList, TableHDU, BinTableHDU, GroupsHDU)) |
Decode a Table ``tbl`` that has astropy Columns + appropriate meta-data into
the corresponding table with mixin columns (as appropriate). | def _decode_mixins(tbl):
"""Decode a Table ``tbl`` that has astropy Columns + appropriate meta-data into
the corresponding table with mixin columns (as appropriate).
"""
# If available read in __serialized_columns__ meta info which is stored
# in FITS COMMENTS between two sentinels.
try:
i0 = tbl.meta["comments"].index("--BEGIN-ASTROPY-SERIALIZED-COLUMNS--")
i1 = tbl.meta["comments"].index("--END-ASTROPY-SERIALIZED-COLUMNS--")
except (ValueError, KeyError):
return tbl
# The YAML data are split into COMMENT cards, with lines longer than 70
# characters being split with a continuation character \ (backslash).
# Strip the backslashes and join together.
continuation_line = False
lines = []
for line in tbl.meta["comments"][i0 + 1 : i1]:
if continuation_line:
lines[-1] = lines[-1] + line[:70]
else:
lines.append(line[:70])
continuation_line = len(line) == 71
del tbl.meta["comments"][i0 : i1 + 1]
if not tbl.meta["comments"]:
del tbl.meta["comments"]
info = meta.get_header_from_yaml(lines)
# Add serialized column information to table meta for use in constructing mixins
tbl.meta["__serialized_columns__"] = info["meta"]["__serialized_columns__"]
# Use the `datatype` attribute info to update column attributes that are
# NOT already handled via standard FITS column keys (name, dtype, unit).
for col in info["datatype"]:
for attr in ["description", "meta"]:
if attr in col:
setattr(tbl[col["name"]].info, attr, col[attr])
# Construct new table with mixins, using tbl.meta['__serialized_columns__']
# as guidance.
tbl = serialize._construct_mixins_from_columns(tbl)
return tbl |
Read a Table object from an FITS file.
If the ``astropy_native`` argument is ``True``, then input FITS columns
which are representations of an astropy core object will be converted to
that class and stored in the ``Table`` as "mixin columns". Currently this
is limited to FITS columns which adhere to the FITS Time standard, in which
case they will be converted to a `~astropy.time.Time` column in the output
table.
Parameters
----------
input : str or file-like or compatible `astropy.io.fits` HDU object
If a string, the filename to read the table from. If a file object, or
a compatible HDU object, the object to extract the table from. The
following `astropy.io.fits` HDU objects can be used as input:
- :class:`~astropy.io.fits.hdu.table.TableHDU`
- :class:`~astropy.io.fits.hdu.table.BinTableHDU`
- :class:`~astropy.io.fits.hdu.table.GroupsHDU`
- :class:`~astropy.io.fits.hdu.hdulist.HDUList`
hdu : int or str, optional
The HDU to read the table from.
astropy_native : bool, optional
Read in FITS columns as native astropy objects where possible instead
of standard Table Column objects. Default is False.
memmap : bool, optional
Whether to use memory mapping, which accesses data on disk as needed. If
you are only accessing part of the data, this is often more efficient.
If you want to access all the values in the table, and you are able to
fit the table in memory, you may be better off leaving memory mapping
off. However, if your table would not fit in memory, you should set this
to `True`.
When set to `True` then ``mask_invalid`` is set to `False` since the
masking would cause loading the full data array.
character_as_bytes : bool, optional
If `True`, string columns are stored as Numpy byte arrays (dtype ``S``)
and are converted on-the-fly to unicode strings when accessing
individual elements. If you need to use Numpy unicode arrays (dtype
``U``) internally, you should set this to `False`, but note that this
will use more memory. If set to `False`, string columns will not be
memory-mapped even if ``memmap`` is `True`.
unit_parse_strict : str, optional
Behaviour when encountering invalid column units in the FITS header.
Default is "warn", which will emit a ``UnitsWarning`` and create a
:class:`~astropy.units.core.UnrecognizedUnit`.
Values are the ones allowed by the ``parse_strict`` argument of
:class:`~astropy.units.core.Unit`: ``raise``, ``warn`` and ``silent``.
mask_invalid : bool, optional
By default the code masks NaNs in float columns and empty strings in
string columns. Set this parameter to `False` to avoid the performance
penalty of doing this masking step. The masking is always deactivated
when using ``memmap=True`` (see above). | def read_table_fits(
input,
hdu=None,
astropy_native=False,
memmap=False,
character_as_bytes=True,
unit_parse_strict="warn",
mask_invalid=True,
):
"""
Read a Table object from an FITS file.
If the ``astropy_native`` argument is ``True``, then input FITS columns
which are representations of an astropy core object will be converted to
that class and stored in the ``Table`` as "mixin columns". Currently this
is limited to FITS columns which adhere to the FITS Time standard, in which
case they will be converted to a `~astropy.time.Time` column in the output
table.
Parameters
----------
input : str or file-like or compatible `astropy.io.fits` HDU object
If a string, the filename to read the table from. If a file object, or
a compatible HDU object, the object to extract the table from. The
following `astropy.io.fits` HDU objects can be used as input:
- :class:`~astropy.io.fits.hdu.table.TableHDU`
- :class:`~astropy.io.fits.hdu.table.BinTableHDU`
- :class:`~astropy.io.fits.hdu.table.GroupsHDU`
- :class:`~astropy.io.fits.hdu.hdulist.HDUList`
hdu : int or str, optional
The HDU to read the table from.
astropy_native : bool, optional
Read in FITS columns as native astropy objects where possible instead
of standard Table Column objects. Default is False.
memmap : bool, optional
Whether to use memory mapping, which accesses data on disk as needed. If
you are only accessing part of the data, this is often more efficient.
If you want to access all the values in the table, and you are able to
fit the table in memory, you may be better off leaving memory mapping
off. However, if your table would not fit in memory, you should set this
to `True`.
When set to `True` then ``mask_invalid`` is set to `False` since the
masking would cause loading the full data array.
character_as_bytes : bool, optional
If `True`, string columns are stored as Numpy byte arrays (dtype ``S``)
and are converted on-the-fly to unicode strings when accessing
individual elements. If you need to use Numpy unicode arrays (dtype
``U``) internally, you should set this to `False`, but note that this
will use more memory. If set to `False`, string columns will not be
memory-mapped even if ``memmap`` is `True`.
unit_parse_strict : str, optional
Behaviour when encountering invalid column units in the FITS header.
Default is "warn", which will emit a ``UnitsWarning`` and create a
:class:`~astropy.units.core.UnrecognizedUnit`.
Values are the ones allowed by the ``parse_strict`` argument of
:class:`~astropy.units.core.Unit`: ``raise``, ``warn`` and ``silent``.
mask_invalid : bool, optional
By default the code masks NaNs in float columns and empty strings in
string columns. Set this parameter to `False` to avoid the performance
penalty of doing this masking step. The masking is always deactivated
when using ``memmap=True`` (see above).
"""
if isinstance(input, HDUList):
# Parse all table objects
tables = {}
for ihdu, hdu_item in enumerate(input):
if isinstance(hdu_item, (TableHDU, BinTableHDU, GroupsHDU)):
tables[ihdu] = hdu_item
if len(tables) > 1:
if hdu is None:
warnings.warn(
"hdu= was not specified but multiple tables"
" are present, reading in first available"
f" table (hdu={first(tables)})",
AstropyUserWarning,
)
hdu = first(tables)
# hdu might not be an integer, so we first need to convert it
# to the correct HDU index
hdu = input.index_of(hdu)
if hdu in tables:
table = tables[hdu]
else:
raise ValueError(f"No table found in hdu={hdu}")
elif len(tables) == 1:
if hdu is not None:
msg = None
try:
hdi = input.index_of(hdu)
except KeyError:
msg = f"Specified hdu={hdu} not found"
else:
if hdi >= len(input):
msg = f"Specified hdu={hdu} not found"
elif hdi not in tables:
msg = f"No table found in specified hdu={hdu}"
if msg is not None:
warnings.warn(
f"{msg}, reading in first available table "
f"(hdu={first(tables)}) instead. This will"
" result in an error in future versions!",
AstropyDeprecationWarning,
)
table = tables[first(tables)]
else:
raise ValueError("No table found")
elif isinstance(input, (TableHDU, BinTableHDU, GroupsHDU)):
table = input
else:
if memmap:
# using memmap is not compatible with masking invalid value by
# default so we deactivate the masking
mask_invalid = False
hdulist = fits_open(input, character_as_bytes=character_as_bytes, memmap=memmap)
try:
return read_table_fits(
hdulist,
hdu=hdu,
astropy_native=astropy_native,
unit_parse_strict=unit_parse_strict,
mask_invalid=mask_invalid,
)
finally:
hdulist.close()
# In the loop below we access the data using data[col.name] rather than
# col.array to make sure that the data is scaled correctly if needed.
data = table.data
columns = []
for col in data.columns:
# Check if column is masked. Here, we make a guess based on the
# presence of FITS mask values. For integer columns, this is simply
# the null header, for float and complex, the presence of NaN, and for
# string, empty strings.
# Since Multi-element columns with dtypes such as '2f8' have a subdtype,
# we should look up the type of column on that.
# Also propagate TNULL (for ints) or the FITS default null value for
# floats and strings to the column's fill_value to ensure round trips
# preserve null values.
masked = mask = False
fill_value = None
coltype = col.dtype.subdtype[0].type if col.dtype.subdtype else col.dtype.type
if col.null is not None:
mask = data[col.name] == col.null
# Return a MaskedColumn even if no elements are masked so
# we roundtrip better.
masked = True
fill_value = col.null
elif mask_invalid and issubclass(coltype, np.inexact):
mask = np.isnan(data[col.name])
fill_value = np.nan
elif mask_invalid and issubclass(coltype, np.character):
mask = col.array == b""
fill_value = b""
if masked or np.any(mask):
column = MaskedColumn(
data=data[col.name],
name=col.name,
mask=mask,
copy=False,
fill_value=fill_value,
)
else:
column = Column(data=data[col.name], name=col.name, copy=False)
# Copy over units
if col.unit is not None:
column.unit = u.Unit(
col.unit, format="fits", parse_strict=unit_parse_strict
)
# Copy over display format
if col.disp is not None:
column.format = _fortran_to_python_format(col.disp)
columns.append(column)
# Create Table object
t = Table(columns, copy=False)
# TODO: deal properly with unsigned integers
hdr = table.header
if astropy_native:
# Avoid circular imports, and also only import if necessary.
from .fitstime import fits_to_time
hdr = fits_to_time(hdr, t)
for key, value, comment in hdr.cards:
if key in ["COMMENT", "HISTORY"]:
# Convert to io.ascii format
if key == "COMMENT":
key = "comments"
if key in t.meta:
t.meta[key].append(value)
else:
t.meta[key] = [value]
elif key in t.meta: # key is duplicate
if isinstance(t.meta[key], list):
t.meta[key].append(value)
else:
t.meta[key] = [t.meta[key], value]
elif is_column_keyword(key) or key in REMOVE_KEYWORDS:
pass
else:
t.meta[key] = value
# TODO: implement masking
# Decode any mixin columns that have been stored as standard Columns.
t = _decode_mixins(t)
return t |
Encode a Table ``tbl`` that may have mixin columns to a Table with only
astropy Columns + appropriate meta-data to allow subsequent decoding. | def _encode_mixins(tbl):
"""Encode a Table ``tbl`` that may have mixin columns to a Table with only
astropy Columns + appropriate meta-data to allow subsequent decoding.
"""
# Determine if information will be lost without serializing meta. This is hardcoded
# to the set difference between column info attributes and what FITS can store
# natively (name, dtype, unit). See _get_col_attributes() in table/meta.py for where
# this comes from.
info_lost = any(
any(
getattr(col.info, attr, None) not in (None, {})
for attr in ("description", "meta")
)
for col in tbl.itercols()
)
# Convert the table to one with no mixins, only Column objects. This adds
# meta data which is extracted with meta.get_yaml_from_table. This ignores
# Time-subclass columns and leave them in the table so that the downstream
# FITS Time handling does the right thing.
with serialize_context_as("fits"):
encode_tbl = serialize.represent_mixins_as_columns(tbl, exclude_classes=(Time,))
# If the encoded table is unchanged then there were no mixins. But if there
# is column metadata (format, description, meta) that would be lost, then
# still go through the serialized columns machinery.
if encode_tbl is tbl and not info_lost:
return tbl
# Copy the meta dict if it was not copied by represent_mixins_as_columns.
# We will modify .meta['comments'] below and we do not want to see these
# comments in the input table.
if encode_tbl is tbl:
meta_copy = deepcopy(tbl.meta)
encode_tbl = Table(tbl.columns, meta=meta_copy, copy=False)
# Get the YAML serialization of information describing the table columns.
# This is re-using ECSV code that combined existing table.meta with with
# the extra __serialized_columns__ key. For FITS the table.meta is handled
# by the native FITS connect code, so don't include that in the YAML
# output.
ser_col = "__serialized_columns__"
# encode_tbl might not have a __serialized_columns__ key if there were no mixins,
# but machinery below expects it to be available, so just make an empty dict.
encode_tbl.meta.setdefault(ser_col, {})
tbl_meta_copy = encode_tbl.meta.copy()
try:
encode_tbl.meta = {ser_col: encode_tbl.meta[ser_col]}
meta_yaml_lines = meta.get_yaml_from_table(encode_tbl)
finally:
encode_tbl.meta = tbl_meta_copy
del encode_tbl.meta[ser_col]
if "comments" not in encode_tbl.meta:
encode_tbl.meta["comments"] = []
encode_tbl.meta["comments"].append("--BEGIN-ASTROPY-SERIALIZED-COLUMNS--")
for line in meta_yaml_lines:
if len(line) == 0:
lines = [""]
else:
# Split line into 70 character chunks for COMMENT cards
idxs = list(range(0, len(line) + 70, 70))
lines = [line[i0:i1] + "\\" for i0, i1 in pairwise(idxs)]
lines[-1] = lines[-1][:-1]
encode_tbl.meta["comments"].extend(lines)
encode_tbl.meta["comments"].append("--END-ASTROPY-SERIALIZED-COLUMNS--")
return encode_tbl |
Write a Table object to a FITS file.
Parameters
----------
input : Table
The table to write out.
output : str
The filename to write the table to.
overwrite : bool
Whether to overwrite any existing file without warning.
append : bool
Whether to append the table to an existing file | def write_table_fits(input, output, overwrite=False, append=False):
"""
Write a Table object to a FITS file.
Parameters
----------
input : Table
The table to write out.
output : str
The filename to write the table to.
overwrite : bool
Whether to overwrite any existing file without warning.
append : bool
Whether to append the table to an existing file
"""
# Encode any mixin columns into standard Columns.
input = _encode_mixins(input)
table_hdu = table_to_hdu(input, character_as_bytes=True)
# Check if output file already exists
if isinstance(output, str) and os.path.exists(output):
if overwrite:
os.remove(output)
elif not append:
raise OSError(NOT_OVERWRITING_MSG.format(output))
if append:
# verify=False stops it reading and checking the existing file.
fits_append(output, table_hdu.data, table_hdu.header, verify=False)
else:
table_hdu.writeto(output) |
Get the header from an HDU of a FITS file.
Parameters
----------
filename : path-like or file-like
File to get header from. If an opened file object, its mode
must be one of the following rb, rb+, or ab+).
ext, extname, extver
The rest of the arguments are for HDU specification. See the
`getdata` documentation for explanations/examples.
**kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
Returns
-------
header : `Header` object | def getheader(filename, *args, **kwargs):
"""
Get the header from an HDU of a FITS file.
Parameters
----------
filename : path-like or file-like
File to get header from. If an opened file object, its mode
must be one of the following rb, rb+, or ab+).
ext, extname, extver
The rest of the arguments are for HDU specification. See the
`getdata` documentation for explanations/examples.
**kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
Returns
-------
header : `Header` object
"""
mode, closed = _get_file_mode(filename)
hdulist, extidx = _getext(filename, mode, *args, **kwargs)
try:
hdu = hdulist[extidx]
header = hdu.header
finally:
hdulist.close(closed=closed)
return header |
Get the data from an HDU of a FITS file (and optionally the
header).
Parameters
----------
filename : path-like or file-like
File to get data from. If opened, mode must be one of the
following rb, rb+, or ab+.
ext
The rest of the arguments are for HDU specification.
They are flexible and are best illustrated by examples.
No extra arguments implies the primary HDU::
getdata('in.fits')
.. note::
Exclusive to ``getdata``: if ``ext`` is not specified
and primary header contains no data, ``getdata`` attempts
to retrieve data from first extension HDU.
By HDU number::
getdata('in.fits', 0) # the primary HDU
getdata('in.fits', 2) # the second extension HDU
getdata('in.fits', ext=2) # the second extension HDU
By name, i.e., ``EXTNAME`` value (if unique)::
getdata('in.fits', 'sci')
getdata('in.fits', extname='sci') # equivalent
Note ``EXTNAME`` values are not case sensitive
By combination of ``EXTNAME`` and EXTVER`` as separate
arguments or as a tuple::
getdata('in.fits', 'sci', 2) # EXTNAME='SCI' & EXTVER=2
getdata('in.fits', extname='sci', extver=2) # equivalent
getdata('in.fits', ('sci', 2)) # equivalent
Ambiguous or conflicting specifications will raise an exception::
getdata('in.fits', ext=('sci',1), extname='err', extver=2)
header : bool, optional
If `True`, return the data and the header of the specified HDU as a
tuple.
lower, upper : bool, optional
If ``lower`` or ``upper`` are `True`, the field names in the
returned data object will be converted to lower or upper case,
respectively.
view : ndarray, optional
When given, the data will be returned wrapped in the given ndarray
subclass by calling::
data.view(view)
**kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
Returns
-------
array : ndarray or `~numpy.recarray` or `~astropy.io.fits.Group`
Type depends on the type of the extension being referenced.
If the optional keyword ``header`` is set to `True`, this
function will return a (``data``, ``header``) tuple.
Raises
------
IndexError
If no data is found in searched HDUs. | def getdata(filename, *args, header=None, lower=None, upper=None, view=None, **kwargs):
"""
Get the data from an HDU of a FITS file (and optionally the
header).
Parameters
----------
filename : path-like or file-like
File to get data from. If opened, mode must be one of the
following rb, rb+, or ab+.
ext
The rest of the arguments are for HDU specification.
They are flexible and are best illustrated by examples.
No extra arguments implies the primary HDU::
getdata('in.fits')
.. note::
Exclusive to ``getdata``: if ``ext`` is not specified
and primary header contains no data, ``getdata`` attempts
to retrieve data from first extension HDU.
By HDU number::
getdata('in.fits', 0) # the primary HDU
getdata('in.fits', 2) # the second extension HDU
getdata('in.fits', ext=2) # the second extension HDU
By name, i.e., ``EXTNAME`` value (if unique)::
getdata('in.fits', 'sci')
getdata('in.fits', extname='sci') # equivalent
Note ``EXTNAME`` values are not case sensitive
By combination of ``EXTNAME`` and EXTVER`` as separate
arguments or as a tuple::
getdata('in.fits', 'sci', 2) # EXTNAME='SCI' & EXTVER=2
getdata('in.fits', extname='sci', extver=2) # equivalent
getdata('in.fits', ('sci', 2)) # equivalent
Ambiguous or conflicting specifications will raise an exception::
getdata('in.fits', ext=('sci',1), extname='err', extver=2)
header : bool, optional
If `True`, return the data and the header of the specified HDU as a
tuple.
lower, upper : bool, optional
If ``lower`` or ``upper`` are `True`, the field names in the
returned data object will be converted to lower or upper case,
respectively.
view : ndarray, optional
When given, the data will be returned wrapped in the given ndarray
subclass by calling::
data.view(view)
**kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
Returns
-------
array : ndarray or `~numpy.recarray` or `~astropy.io.fits.Group`
Type depends on the type of the extension being referenced.
If the optional keyword ``header`` is set to `True`, this
function will return a (``data``, ``header``) tuple.
Raises
------
IndexError
If no data is found in searched HDUs.
"""
mode, closed = _get_file_mode(filename)
ext = kwargs.get("ext")
extname = kwargs.get("extname")
extver = kwargs.get("extver")
ext_given = not (
len(args) == 0 and ext is None and extname is None and extver is None
)
hdulist, extidx = _getext(filename, mode, *args, **kwargs)
try:
hdu = hdulist[extidx]
data = hdu.data
if data is None:
if ext_given:
raise IndexError(f"No data in HDU #{extidx}.")
# fallback to the first extension HDU
if len(hdulist) == 1:
raise IndexError("No data in Primary HDU and no extension HDU found.")
hdu = hdulist[1]
data = hdu.data
if data is None:
raise IndexError("No data in either Primary or first extension HDUs.")
if header:
hdr = hdu.header
finally:
hdulist.close(closed=closed)
# Change case of names if requested
trans = None
if lower:
trans = operator.methodcaller("lower")
elif upper:
trans = operator.methodcaller("upper")
if trans:
if data.dtype.names is None:
# this data does not have fields
return
if data.dtype.descr[0][0] == "":
# this data does not have fields
return
data.dtype.names = [trans(n) for n in data.dtype.names]
# allow different views into the underlying ndarray. Keep the original
# view just in case there is a problem
if isinstance(view, type) and issubclass(view, np.ndarray):
data = data.view(view)
if header:
return data, hdr
else:
return data |
Get a keyword's value from a header in a FITS file.
Parameters
----------
filename : path-like or file-like
Name of the FITS file, or file object (if opened, mode must be
one of the following rb, rb+, or ab+).
keyword : str
Keyword name
ext, extname, extver
The rest of the arguments are for HDU specification.
See `getdata` for explanations/examples.
**kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
*Note:* This function automatically specifies ``do_not_scale_image_data
= True`` when opening the file so that values can be retrieved from the
unmodified header.
Returns
-------
keyword value : str, int, or float | def getval(filename, keyword, *args, **kwargs):
"""
Get a keyword's value from a header in a FITS file.
Parameters
----------
filename : path-like or file-like
Name of the FITS file, or file object (if opened, mode must be
one of the following rb, rb+, or ab+).
keyword : str
Keyword name
ext, extname, extver
The rest of the arguments are for HDU specification.
See `getdata` for explanations/examples.
**kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
*Note:* This function automatically specifies ``do_not_scale_image_data
= True`` when opening the file so that values can be retrieved from the
unmodified header.
Returns
-------
keyword value : str, int, or float
"""
if "do_not_scale_image_data" not in kwargs:
kwargs["do_not_scale_image_data"] = True
hdr = getheader(filename, *args, **kwargs)
return hdr[keyword] |
Set a keyword's value from a header in a FITS file.
If the keyword already exists, it's value/comment will be updated.
If it does not exist, a new card will be created and it will be
placed before or after the specified location. If no ``before`` or
``after`` is specified, it will be appended at the end.
When updating more than one keyword in a file, this convenience
function is a much less efficient approach compared with opening
the file for update, modifying the header, and closing the file.
Parameters
----------
filename : path-like or file-like
Name of the FITS file, or file object If opened, mode must be update
(rb+). An opened file object or `~gzip.GzipFile` object will be closed
upon return.
keyword : str
Keyword name
value : str, int, float, optional
Keyword value (default: `None`, meaning don't modify)
comment : str, optional
Keyword comment, (default: `None`, meaning don't modify)
before : str, int, optional
Name of the keyword, or index of the card before which the new card
will be placed. The argument ``before`` takes precedence over
``after`` if both are specified (default: `None`).
after : str, int, optional
Name of the keyword, or index of the card after which the new card will
be placed. (default: `None`).
savecomment : bool, optional
When `True`, preserve the current comment for an existing keyword. The
argument ``savecomment`` takes precedence over ``comment`` if both
specified. If ``comment`` is not specified then the current comment
will automatically be preserved (default: `False`).
ext, extname, extver
The rest of the arguments are for HDU specification.
See `getdata` for explanations/examples.
**kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
*Note:* This function automatically specifies ``do_not_scale_image_data
= True`` when opening the file so that values can be retrieved from the
unmodified header. | def setval(
filename,
keyword,
*args,
value=None,
comment=None,
before=None,
after=None,
savecomment=False,
**kwargs,
):
"""
Set a keyword's value from a header in a FITS file.
If the keyword already exists, it's value/comment will be updated.
If it does not exist, a new card will be created and it will be
placed before or after the specified location. If no ``before`` or
``after`` is specified, it will be appended at the end.
When updating more than one keyword in a file, this convenience
function is a much less efficient approach compared with opening
the file for update, modifying the header, and closing the file.
Parameters
----------
filename : path-like or file-like
Name of the FITS file, or file object If opened, mode must be update
(rb+). An opened file object or `~gzip.GzipFile` object will be closed
upon return.
keyword : str
Keyword name
value : str, int, float, optional
Keyword value (default: `None`, meaning don't modify)
comment : str, optional
Keyword comment, (default: `None`, meaning don't modify)
before : str, int, optional
Name of the keyword, or index of the card before which the new card
will be placed. The argument ``before`` takes precedence over
``after`` if both are specified (default: `None`).
after : str, int, optional
Name of the keyword, or index of the card after which the new card will
be placed. (default: `None`).
savecomment : bool, optional
When `True`, preserve the current comment for an existing keyword. The
argument ``savecomment`` takes precedence over ``comment`` if both
specified. If ``comment`` is not specified then the current comment
will automatically be preserved (default: `False`).
ext, extname, extver
The rest of the arguments are for HDU specification.
See `getdata` for explanations/examples.
**kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
*Note:* This function automatically specifies ``do_not_scale_image_data
= True`` when opening the file so that values can be retrieved from the
unmodified header.
"""
if "do_not_scale_image_data" not in kwargs:
kwargs["do_not_scale_image_data"] = True
closed = fileobj_closed(filename)
hdulist, extidx = _getext(filename, "update", *args, **kwargs)
try:
if keyword in hdulist[extidx].header and savecomment:
comment = None
hdulist[extidx].header.set(keyword, value, comment, before, after)
finally:
hdulist.close(closed=closed) |
Delete all instances of keyword from a header in a FITS file.
Parameters
----------
filename : path-like or file-like
Name of the FITS file, or file object If opened, mode must be update
(rb+). An opened file object or `~gzip.GzipFile` object will be closed
upon return.
keyword : str, int
Keyword name or index
ext, extname, extver
The rest of the arguments are for HDU specification.
See `getdata` for explanations/examples.
**kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
*Note:* This function automatically specifies ``do_not_scale_image_data
= True`` when opening the file so that values can be retrieved from the
unmodified header. | def delval(filename, keyword, *args, **kwargs):
"""
Delete all instances of keyword from a header in a FITS file.
Parameters
----------
filename : path-like or file-like
Name of the FITS file, or file object If opened, mode must be update
(rb+). An opened file object or `~gzip.GzipFile` object will be closed
upon return.
keyword : str, int
Keyword name or index
ext, extname, extver
The rest of the arguments are for HDU specification.
See `getdata` for explanations/examples.
**kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
*Note:* This function automatically specifies ``do_not_scale_image_data
= True`` when opening the file so that values can be retrieved from the
unmodified header.
"""
if "do_not_scale_image_data" not in kwargs:
kwargs["do_not_scale_image_data"] = True
closed = fileobj_closed(filename)
hdulist, extidx = _getext(filename, "update", *args, **kwargs)
try:
del hdulist[extidx].header[keyword]
finally:
hdulist.close(closed=closed) |
Create a new FITS file using the supplied data/header.
Parameters
----------
filename : path-like or file-like
File to write to. If opened, must be opened in a writable binary
mode such as 'wb' or 'ab+'.
data : array or `~numpy.recarray` or `~astropy.io.fits.Group`
data to write to the new file
header : `Header` object, optional
the header associated with ``data``. If `None`, a header
of the appropriate type is created for the supplied data. This
argument is optional.
output_verify : str
Output verification option. Must be one of ``"fix"``, ``"silentfix"``,
``"ignore"``, ``"warn"``, or ``"exception"``. May also be any
combination of ``"fix"`` or ``"silentfix"`` with ``"+ignore"``,
``+warn``, or ``+exception" (e.g. ``"fix+warn"``). See
:ref:`astropy:verify` for more info.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
checksum : bool, optional
If `True`, adds both ``DATASUM`` and ``CHECKSUM`` cards to the
headers of all HDU's written to the file.
Notes
-----
gzip, zip and bzip2 compression algorithms are natively supported.
Compression mode is determined from the filename extension
('.gz', '.zip' or '.bz2' respectively). It is also possible to pass a
compressed file object, e.g. `gzip.GzipFile`. | def writeto(
filename,
data,
header=None,
output_verify="exception",
overwrite=False,
checksum=False,
):
"""
Create a new FITS file using the supplied data/header.
Parameters
----------
filename : path-like or file-like
File to write to. If opened, must be opened in a writable binary
mode such as 'wb' or 'ab+'.
data : array or `~numpy.recarray` or `~astropy.io.fits.Group`
data to write to the new file
header : `Header` object, optional
the header associated with ``data``. If `None`, a header
of the appropriate type is created for the supplied data. This
argument is optional.
output_verify : str
Output verification option. Must be one of ``"fix"``, ``"silentfix"``,
``"ignore"``, ``"warn"``, or ``"exception"``. May also be any
combination of ``"fix"`` or ``"silentfix"`` with ``"+ignore"``,
``+warn``, or ``+exception" (e.g. ``"fix+warn"``). See
:ref:`astropy:verify` for more info.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
checksum : bool, optional
If `True`, adds both ``DATASUM`` and ``CHECKSUM`` cards to the
headers of all HDU's written to the file.
Notes
-----
gzip, zip and bzip2 compression algorithms are natively supported.
Compression mode is determined from the filename extension
('.gz', '.zip' or '.bz2' respectively). It is also possible to pass a
compressed file object, e.g. `gzip.GzipFile`.
"""
hdu = _makehdu(data, header)
if hdu.is_image and not isinstance(hdu, PrimaryHDU):
hdu = PrimaryHDU(data, header=header)
hdu.writeto(
filename, overwrite=overwrite, output_verify=output_verify, checksum=checksum
) |
Convert an `~astropy.table.Table` object to a FITS
`~astropy.io.fits.BinTableHDU`.
Parameters
----------
table : astropy.table.Table
The table to convert.
character_as_bytes : bool
Whether to return bytes for string columns when accessed from the HDU.
By default this is `False` and (unicode) strings are returned, but for
large tables this may use up a lot of memory.
Returns
-------
table_hdu : `~astropy.io.fits.BinTableHDU`
The FITS binary table HDU. | def table_to_hdu(table, character_as_bytes=False):
"""
Convert an `~astropy.table.Table` object to a FITS
`~astropy.io.fits.BinTableHDU`.
Parameters
----------
table : astropy.table.Table
The table to convert.
character_as_bytes : bool
Whether to return bytes for string columns when accessed from the HDU.
By default this is `False` and (unicode) strings are returned, but for
large tables this may use up a lot of memory.
Returns
-------
table_hdu : `~astropy.io.fits.BinTableHDU`
The FITS binary table HDU.
"""
# Avoid circular imports
from .column import python_to_tdisp
from .connect import REMOVE_KEYWORDS, is_column_keyword
# Header to store Time related metadata
hdr = None
# Not all tables with mixin columns are supported
if table.has_mixin_columns:
# Import is done here, in order to avoid it at build time as erfa is not
# yet available then.
from astropy.table.column import BaseColumn
from astropy.time import Time
from astropy.units import Quantity
from .fitstime import time_to_fits
# Only those columns which are instances of BaseColumn, Quantity or Time can
# be written
unsupported_cols = table.columns.not_isinstance((BaseColumn, Quantity, Time))
if unsupported_cols:
unsupported_names = [col.info.name for col in unsupported_cols]
raise ValueError(
f"cannot write table with mixin column(s) {unsupported_names}"
)
time_cols = table.columns.isinstance(Time)
if time_cols:
table, hdr = time_to_fits(table)
# Create a new HDU object
tarray = table.as_array()
if isinstance(tarray, np.ma.MaskedArray):
# Fill masked values carefully:
# float column's default mask value needs to be Nan and
# string column's default mask should be an empty string.
# Note: getting the fill value for the structured array is
# more reliable than for individual columns for string entries.
# (no 'N/A' for a single-element string, where it should be 'N').
default_fill_value = np.ma.default_fill_value(tarray.dtype)
for colname, (coldtype, _) in tarray.dtype.fields.items():
if np.all(tarray.fill_value[colname] == default_fill_value[colname]):
# Since multi-element columns with dtypes such as '2f8' have
# a subdtype, we should look up the type of column on that.
coltype = (
coldtype.subdtype[0].type if coldtype.subdtype else coldtype.type
)
if issubclass(coltype, np.complexfloating):
tarray.fill_value[colname] = complex(np.nan, np.nan)
elif issubclass(coltype, np.inexact):
tarray.fill_value[colname] = np.nan
elif issubclass(coltype, np.character):
tarray.fill_value[colname] = ""
# TODO: it might be better to construct the FITS table directly from
# the Table columns, rather than go via a structured array.
table_hdu = BinTableHDU.from_columns(
tarray.filled(), header=hdr, character_as_bytes=character_as_bytes
)
for col in table_hdu.columns:
# Binary FITS tables support TNULL *only* for integer data columns
# TODO: Determine a schema for handling non-integer masked columns
# with non-default fill values in FITS (if at all possible).
# Be careful that we do not set null for columns that were not masked!
int_formats = ("B", "I", "J", "K")
if (
col.format in int_formats or col.format.p_format in int_formats
) and hasattr(table[col.name], "mask"):
fill_value = tarray[col.name].fill_value
col.null = fill_value.astype(int)
else:
table_hdu = BinTableHDU.from_columns(
tarray, header=hdr, character_as_bytes=character_as_bytes
)
# Set units and format display for output HDU
for col in table_hdu.columns:
if table[col.name].info.format is not None:
# check for boolean types, special format case
logical = table[col.name].info.dtype == bool
tdisp_format = python_to_tdisp(
table[col.name].info.format, logical_dtype=logical
)
if tdisp_format is not None:
col.disp = tdisp_format
unit = table[col.name].unit
if unit is not None:
# Local imports to avoid importing units when it is not required,
# e.g. for command-line scripts
from astropy.units import Unit
from astropy.units.format.fits import UnitScaleError
try:
col.unit = unit.to_string(format="fits")
except UnitScaleError:
scale = unit.scale
raise UnitScaleError(
f"The column '{col.name}' could not be stored in FITS "
f"format because it has a scale '({str(scale)})' that "
"is not recognized by the FITS standard. Either scale "
"the data or change the units."
)
except ValueError:
# Warn that the unit is lost, but let the details depend on
# whether the column was serialized (because it was a
# quantity), since then the unit can be recovered by astropy.
warning = (
f"The unit '{unit.to_string()}' could not be saved in "
"native FITS format "
)
if any(
"SerializedColumn" in item and "name: " + col.name in item
for item in table.meta.get("comments", [])
):
warning += (
"and hence will be lost to non-astropy fits readers. "
"Within astropy, the unit can roundtrip using QTable, "
"though one has to enable the unit before reading."
)
else:
warning += (
"and cannot be recovered in reading. It can roundtrip "
"within astropy by using QTable both to write and read "
"back, though one has to enable the unit before reading."
)
warnings.warn(warning, AstropyUserWarning)
else:
# Try creating a Unit to issue a warning if the unit is not
# FITS compliant
Unit(col.unit, format="fits", parse_strict="warn")
# Column-specific override keywords for coordinate columns
coord_meta = table.meta.pop("__coordinate_columns__", {})
for col_name, col_info in coord_meta.items():
col = table_hdu.columns[col_name]
# Set the column coordinate attributes from data saved earlier.
# Note: have to set these, even if we have no data.
for attr in "coord_type", "coord_unit":
setattr(col, attr, col_info.get(attr, None))
trpos = col_info.get("time_ref_pos", None)
if trpos is not None:
col.time_ref_pos = trpos
for key, value in table.meta.items():
if is_column_keyword(key.upper()) or key.upper() in REMOVE_KEYWORDS:
warnings.warn(
f"Meta-data keyword {key} will be ignored since it conflicts "
"with a FITS reserved keyword",
AstropyUserWarning,
)
continue
# Convert to FITS format
if key == "comments":
key = "comment"
if isinstance(value, list):
for item in value:
try:
table_hdu.header.append((key, item))
except ValueError:
warnings.warn(
f"Attribute `{key}` of type {type(value)} cannot be "
"added to FITS Header - skipping",
AstropyUserWarning,
)
else:
try:
table_hdu.header[key] = value
except ValueError:
warnings.warn(
f"Attribute `{key}` of type {type(value)} cannot be "
"added to FITS Header - skipping",
AstropyUserWarning,
)
return table_hdu |
Append the header/data to FITS file if filename exists, create if not.
If only ``data`` is supplied, a minimal header is created.
Parameters
----------
filename : path-like or file-like
File to write to. If opened, must be opened for update (rb+) unless it
is a new file, then it must be opened for append (ab+). A file or
`~gzip.GzipFile` object opened for update will be closed after return.
data : array, :class:`~astropy.table.Table`, or `~astropy.io.fits.Group`
The new data used for appending.
header : `Header` object, optional
The header associated with ``data``. If `None`, an appropriate header
will be created for the data object supplied.
checksum : bool, optional
When `True` adds both ``DATASUM`` and ``CHECKSUM`` cards to the header
of the HDU when written to the file.
verify : bool, optional
When `True`, the existing FITS file will be read in to verify it for
correctness before appending. When `False`, content is simply appended
to the end of the file. Setting ``verify`` to `False` can be much
faster.
**kwargs
Additional arguments are passed to:
- `~astropy.io.fits.writeto` if the file does not exist or is empty.
In this case ``output_verify`` is the only possible argument.
- `~astropy.io.fits.open` if ``verify`` is True or if ``filename``
is a file object.
- Otherwise no additional arguments can be used. | def append(filename, data, header=None, checksum=False, verify=True, **kwargs):
"""
Append the header/data to FITS file if filename exists, create if not.
If only ``data`` is supplied, a minimal header is created.
Parameters
----------
filename : path-like or file-like
File to write to. If opened, must be opened for update (rb+) unless it
is a new file, then it must be opened for append (ab+). A file or
`~gzip.GzipFile` object opened for update will be closed after return.
data : array, :class:`~astropy.table.Table`, or `~astropy.io.fits.Group`
The new data used for appending.
header : `Header` object, optional
The header associated with ``data``. If `None`, an appropriate header
will be created for the data object supplied.
checksum : bool, optional
When `True` adds both ``DATASUM`` and ``CHECKSUM`` cards to the header
of the HDU when written to the file.
verify : bool, optional
When `True`, the existing FITS file will be read in to verify it for
correctness before appending. When `False`, content is simply appended
to the end of the file. Setting ``verify`` to `False` can be much
faster.
**kwargs
Additional arguments are passed to:
- `~astropy.io.fits.writeto` if the file does not exist or is empty.
In this case ``output_verify`` is the only possible argument.
- `~astropy.io.fits.open` if ``verify`` is True or if ``filename``
is a file object.
- Otherwise no additional arguments can be used.
"""
if isinstance(filename, path_like):
filename = os.path.expanduser(filename)
name, closed, noexist_or_empty = _stat_filename_or_fileobj(filename)
if noexist_or_empty:
#
# The input file or file like object either doesn't exits or is
# empty. Use the writeto convenience function to write the
# output to the empty object.
#
writeto(filename, data, header, checksum=checksum, **kwargs)
else:
hdu = _makehdu(data, header)
if isinstance(hdu, PrimaryHDU):
hdu = ImageHDU(data, header)
if verify or not closed:
f = fitsopen(filename, mode="append", **kwargs)
try:
f.append(hdu)
# Set a flag in the HDU so that only this HDU gets a checksum
# when writing the file.
hdu._output_checksum = checksum
finally:
f.close(closed=closed)
else:
f = _File(filename, mode="append")
try:
hdu._output_checksum = checksum
hdu._writeto(f)
finally:
f.close() |
Update the specified HDU with the input data/header.
Parameters
----------
filename : path-like or file-like
File to update. If opened, mode must be update (rb+). An opened file
object or `~gzip.GzipFile` object will be closed upon return.
data : array, `~astropy.table.Table`, or `~astropy.io.fits.Group`
The new data used for updating.
header : `Header` object, optional
The header associated with ``data``. If `None`, an appropriate header
will be created for the data object supplied.
ext, extname, extver
The rest of the arguments are flexible: the 3rd argument can be the
header associated with the data. If the 3rd argument is not a
`Header`, it (and other positional arguments) are assumed to be the
HDU specification(s). Header and HDU specs can also be
keyword arguments. For example::
update(file, dat, hdr, 'sci') # update the 'sci' extension
update(file, dat, 3) # update the 3rd extension HDU
update(file, dat, hdr, 3) # update the 3rd extension HDU
update(file, dat, 'sci', 2) # update the 2nd extension HDU named 'sci'
update(file, dat, 3, header=hdr) # update the 3rd extension HDU
update(file, dat, header=hdr, ext=5) # update the 5th extension HDU
**kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`. | def update(filename, data, *args, **kwargs):
"""
Update the specified HDU with the input data/header.
Parameters
----------
filename : path-like or file-like
File to update. If opened, mode must be update (rb+). An opened file
object or `~gzip.GzipFile` object will be closed upon return.
data : array, `~astropy.table.Table`, or `~astropy.io.fits.Group`
The new data used for updating.
header : `Header` object, optional
The header associated with ``data``. If `None`, an appropriate header
will be created for the data object supplied.
ext, extname, extver
The rest of the arguments are flexible: the 3rd argument can be the
header associated with the data. If the 3rd argument is not a
`Header`, it (and other positional arguments) are assumed to be the
HDU specification(s). Header and HDU specs can also be
keyword arguments. For example::
update(file, dat, hdr, 'sci') # update the 'sci' extension
update(file, dat, 3) # update the 3rd extension HDU
update(file, dat, hdr, 3) # update the 3rd extension HDU
update(file, dat, 'sci', 2) # update the 2nd extension HDU named 'sci'
update(file, dat, 3, header=hdr) # update the 3rd extension HDU
update(file, dat, header=hdr, ext=5) # update the 5th extension HDU
**kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
"""
# The arguments to this function are a bit trickier to deal with than others
# in this module, since the documentation has promised that the header
# argument can be an optional positional argument.
if args and isinstance(args[0], Header):
header = args[0]
args = args[1:]
else:
header = None
# The header can also be a keyword argument--if both are provided the
# keyword takes precedence
header = kwargs.pop("header", header)
new_hdu = _makehdu(data, header)
closed = fileobj_closed(filename)
hdulist, _ext = _getext(filename, "update", *args, **kwargs)
try:
hdulist[_ext] = new_hdu
finally:
hdulist.close(closed=closed) |
Print the summary information on a FITS file.
This includes the name, type, length of header, data shape and type
for each HDU.
Parameters
----------
filename : path-like or file-like
FITS file to obtain info from. If opened, mode must be one of
the following: rb, rb+, or ab+ (i.e. the file must be readable).
output : file, bool, optional
A file-like object to write the output to. If ``False``, does not
output to a file and instead returns a list of tuples representing the
HDU info. Writes to ``sys.stdout`` by default.
**kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
*Note:* This function sets ``ignore_missing_end=True`` by default. | def info(filename, output=None, **kwargs):
"""
Print the summary information on a FITS file.
This includes the name, type, length of header, data shape and type
for each HDU.
Parameters
----------
filename : path-like or file-like
FITS file to obtain info from. If opened, mode must be one of
the following: rb, rb+, or ab+ (i.e. the file must be readable).
output : file, bool, optional
A file-like object to write the output to. If ``False``, does not
output to a file and instead returns a list of tuples representing the
HDU info. Writes to ``sys.stdout`` by default.
**kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
*Note:* This function sets ``ignore_missing_end=True`` by default.
"""
mode, closed = _get_file_mode(filename, default="readonly")
# Set the default value for the ignore_missing_end parameter
if "ignore_missing_end" not in kwargs:
kwargs["ignore_missing_end"] = True
f = fitsopen(filename, mode=mode, **kwargs)
try:
ret = f.info(output=output)
finally:
if closed:
f.close()
return ret |
Compare two parts of a FITS file, including entire FITS files,
FITS `HDUList` objects and FITS ``HDU`` objects.
Parameters
----------
inputa : str, `HDUList` object, or ``HDU`` object
The filename of a FITS file, `HDUList`, or ``HDU``
object to compare to ``inputb``.
inputb : str, `HDUList` object, or ``HDU`` object
The filename of a FITS file, `HDUList`, or ``HDU``
object to compare to ``inputa``.
ext, extname, extver
Additional positional arguments are for HDU specification if your
inputs are string filenames (will not work if
``inputa`` and ``inputb`` are ``HDU`` objects or `HDUList` objects).
They are flexible and are best illustrated by examples. In addition
to using these arguments positionally you can directly call the
keyword parameters ``ext``, ``extname``.
By HDU number::
printdiff('inA.fits', 'inB.fits', 0) # the primary HDU
printdiff('inA.fits', 'inB.fits', 2) # the second extension HDU
printdiff('inA.fits', 'inB.fits', ext=2) # the second extension HDU
By name, i.e., ``EXTNAME`` value (if unique). ``EXTNAME`` values are
not case sensitive:
printdiff('inA.fits', 'inB.fits', 'sci')
printdiff('inA.fits', 'inB.fits', extname='sci') # equivalent
By combination of ``EXTNAME`` and ``EXTVER`` as separate
arguments or as a tuple::
printdiff('inA.fits', 'inB.fits', 'sci', 2) # EXTNAME='SCI'
# & EXTVER=2
printdiff('inA.fits', 'inB.fits', extname='sci', extver=2)
# equivalent
printdiff('inA.fits', 'inB.fits', ('sci', 2)) # equivalent
Ambiguous or conflicting specifications will raise an exception::
printdiff('inA.fits', 'inB.fits',
ext=('sci', 1), extname='err', extver=2)
**kwargs
Any additional keyword arguments to be passed to
`~astropy.io.fits.FITSDiff`.
Notes
-----
The primary use for the `printdiff` function is to allow quick print out
of a FITS difference report and will write to ``sys.stdout``.
To save the diff report to a file please use `~astropy.io.fits.FITSDiff`
directly. | def printdiff(inputa, inputb, *args, **kwargs):
"""
Compare two parts of a FITS file, including entire FITS files,
FITS `HDUList` objects and FITS ``HDU`` objects.
Parameters
----------
inputa : str, `HDUList` object, or ``HDU`` object
The filename of a FITS file, `HDUList`, or ``HDU``
object to compare to ``inputb``.
inputb : str, `HDUList` object, or ``HDU`` object
The filename of a FITS file, `HDUList`, or ``HDU``
object to compare to ``inputa``.
ext, extname, extver
Additional positional arguments are for HDU specification if your
inputs are string filenames (will not work if
``inputa`` and ``inputb`` are ``HDU`` objects or `HDUList` objects).
They are flexible and are best illustrated by examples. In addition
to using these arguments positionally you can directly call the
keyword parameters ``ext``, ``extname``.
By HDU number::
printdiff('inA.fits', 'inB.fits', 0) # the primary HDU
printdiff('inA.fits', 'inB.fits', 2) # the second extension HDU
printdiff('inA.fits', 'inB.fits', ext=2) # the second extension HDU
By name, i.e., ``EXTNAME`` value (if unique). ``EXTNAME`` values are
not case sensitive:
printdiff('inA.fits', 'inB.fits', 'sci')
printdiff('inA.fits', 'inB.fits', extname='sci') # equivalent
By combination of ``EXTNAME`` and ``EXTVER`` as separate
arguments or as a tuple::
printdiff('inA.fits', 'inB.fits', 'sci', 2) # EXTNAME='SCI'
# & EXTVER=2
printdiff('inA.fits', 'inB.fits', extname='sci', extver=2)
# equivalent
printdiff('inA.fits', 'inB.fits', ('sci', 2)) # equivalent
Ambiguous or conflicting specifications will raise an exception::
printdiff('inA.fits', 'inB.fits',
ext=('sci', 1), extname='err', extver=2)
**kwargs
Any additional keyword arguments to be passed to
`~astropy.io.fits.FITSDiff`.
Notes
-----
The primary use for the `printdiff` function is to allow quick print out
of a FITS difference report and will write to ``sys.stdout``.
To save the diff report to a file please use `~astropy.io.fits.FITSDiff`
directly.
"""
# Pop extension keywords
extension = {
key: kwargs.pop(key) for key in ["ext", "extname", "extver"] if key in kwargs
}
has_extensions = args or extension
if isinstance(inputa, str) and has_extensions:
# Use handy _getext to interpret any ext keywords, but
# will need to close a if fails
modea, closeda = _get_file_mode(inputa)
modeb, closedb = _get_file_mode(inputb)
hdulista, extidxa = _getext(inputa, modea, *args, **extension)
# Have to close a if b doesn't make it
try:
hdulistb, extidxb = _getext(inputb, modeb, *args, **extension)
except Exception:
hdulista.close(closed=closeda)
raise
try:
hdua = hdulista[extidxa]
hdub = hdulistb[extidxb]
# See below print for note
print(HDUDiff(hdua, hdub, **kwargs).report())
finally:
hdulista.close(closed=closeda)
hdulistb.close(closed=closedb)
# If input is not a string, can feed HDU objects or HDUList directly,
# but can't currently handle extensions
elif isinstance(inputa, _ValidHDU) and has_extensions:
raise ValueError("Cannot use extension keywords when providing an HDU object.")
elif isinstance(inputa, _ValidHDU) and not has_extensions:
print(HDUDiff(inputa, inputb, **kwargs).report())
elif isinstance(inputa, HDUList) and has_extensions:
raise NotImplementedError(
"Extension specification with HDUList objects not implemented."
)
# This function is EXCLUSIVELY for printing the diff report to screen
# in a one-liner call, hence the use of print instead of logging
else:
print(FITSDiff(inputa, inputb, **kwargs).report()) |
Dump a table HDU to a file in ASCII format. The table may be
dumped in three separate files, one containing column definitions,
one containing header parameters, and one for table data.
Parameters
----------
filename : path-like or file-like
Input fits file.
datafile : path-like or file-like, optional
Output data file. The default is the root name of the input
fits file appended with an underscore, followed by the
extension number (ext), followed by the extension ``.txt``.
cdfile : path-like or file-like, optional
Output column definitions file. The default is `None`,
no column definitions output is produced.
hfile : path-like or file-like, optional
Output header parameters file. The default is `None`,
no header parameters output is produced.
ext : int
The number of the extension containing the table HDU to be
dumped.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
Notes
-----
The primary use for the `tabledump` function is to allow editing in a
standard text editor of the table data and parameters. The
`tableload` function can be used to reassemble the table from the
three ASCII files. | def tabledump(filename, datafile=None, cdfile=None, hfile=None, ext=1, overwrite=False):
"""
Dump a table HDU to a file in ASCII format. The table may be
dumped in three separate files, one containing column definitions,
one containing header parameters, and one for table data.
Parameters
----------
filename : path-like or file-like
Input fits file.
datafile : path-like or file-like, optional
Output data file. The default is the root name of the input
fits file appended with an underscore, followed by the
extension number (ext), followed by the extension ``.txt``.
cdfile : path-like or file-like, optional
Output column definitions file. The default is `None`,
no column definitions output is produced.
hfile : path-like or file-like, optional
Output header parameters file. The default is `None`,
no header parameters output is produced.
ext : int
The number of the extension containing the table HDU to be
dumped.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
Notes
-----
The primary use for the `tabledump` function is to allow editing in a
standard text editor of the table data and parameters. The
`tableload` function can be used to reassemble the table from the
three ASCII files.
"""
# allow file object to already be opened in any of the valid modes
# and leave the file in the same state (opened or closed) as when
# the function was called
mode, closed = _get_file_mode(filename, default="readonly")
f = fitsopen(filename, mode=mode)
# Create the default data file name if one was not provided
try:
if not datafile:
root, tail = os.path.splitext(f._file.name)
datafile = root + "_" + repr(ext) + ".txt"
# Dump the data from the HDU to the files
f[ext].dump(datafile, cdfile, hfile, overwrite)
finally:
if closed:
f.close() |
Create a table from the input ASCII files. The input is from up
to three separate files, one containing column definitions, one
containing header parameters, and one containing column data. The
header parameters file is not required. When the header
parameters file is absent a minimal header is constructed.
Parameters
----------
datafile : path-like or file-like
Input data file containing the table data in ASCII format.
cdfile : path-like or file-like
Input column definition file containing the names, formats,
display formats, physical units, multidimensional array
dimensions, undefined values, scale factors, and offsets
associated with the columns in the table.
hfile : path-like or file-like, optional
Input parameter definition file containing the header
parameter definitions to be associated with the table.
If `None`, a minimal header is constructed.
Notes
-----
The primary use for the `tableload` function is to allow the input of
ASCII data that was edited in a standard text editor of the table
data and parameters. The tabledump function can be used to create the
initial ASCII files. | def tableload(datafile, cdfile, hfile=None):
"""
Create a table from the input ASCII files. The input is from up
to three separate files, one containing column definitions, one
containing header parameters, and one containing column data. The
header parameters file is not required. When the header
parameters file is absent a minimal header is constructed.
Parameters
----------
datafile : path-like or file-like
Input data file containing the table data in ASCII format.
cdfile : path-like or file-like
Input column definition file containing the names, formats,
display formats, physical units, multidimensional array
dimensions, undefined values, scale factors, and offsets
associated with the columns in the table.
hfile : path-like or file-like, optional
Input parameter definition file containing the header
parameter definitions to be associated with the table.
If `None`, a minimal header is constructed.
Notes
-----
The primary use for the `tableload` function is to allow the input of
ASCII data that was edited in a standard text editor of the table
data and parameters. The tabledump function can be used to create the
initial ASCII files.
"""
return BinTableHDU.load(datafile, cdfile, hfile, replace=True) |
Open the input file, return the `HDUList` and the extension.
This supports several different styles of extension selection. See the
:func:`getdata()` documentation for the different possibilities. | def _getext(filename, mode, *args, ext=None, extname=None, extver=None, **kwargs):
"""
Open the input file, return the `HDUList` and the extension.
This supports several different styles of extension selection. See the
:func:`getdata()` documentation for the different possibilities.
"""
err_msg = "Redundant/conflicting extension arguments(s): {}".format(
{"args": args, "ext": ext, "extname": extname, "extver": extver}
)
# This code would be much simpler if just one way of specifying an
# extension were picked. But now we need to support all possible ways for
# the time being.
if len(args) == 1:
# Must be either an extension number, an extension name, or an
# (extname, extver) tuple
if _is_int(args[0]) or (isinstance(ext, tuple) and len(ext) == 2):
if ext is not None or extname is not None or extver is not None:
raise TypeError(err_msg)
ext = args[0]
elif isinstance(args[0], str):
# The first arg is an extension name; it could still be valid
# to provide an extver kwarg
if ext is not None or extname is not None:
raise TypeError(err_msg)
extname = args[0]
else:
# Take whatever we have as the ext argument; we'll validate it
# below
ext = args[0]
elif len(args) == 2:
# Must be an extname and extver
if ext is not None or extname is not None or extver is not None:
raise TypeError(err_msg)
extname = args[0]
extver = args[1]
elif len(args) > 2:
raise TypeError("Too many positional arguments.")
if ext is not None and not (
_is_int(ext)
or (
isinstance(ext, tuple)
and len(ext) == 2
and isinstance(ext[0], str)
and _is_int(ext[1])
)
):
raise ValueError(
"The ext keyword must be either an extension number "
"(zero-indexed) or a (extname, extver) tuple."
)
if extname is not None and not isinstance(extname, str):
raise ValueError("The extname argument must be a string.")
if extver is not None and not _is_int(extver):
raise ValueError("The extver argument must be an integer.")
if ext is None and extname is None and extver is None:
ext = 0
elif ext is not None and (extname is not None or extver is not None):
raise TypeError(err_msg)
elif extname:
if extver:
ext = (extname, extver)
else:
ext = (extname, 1)
elif extver and extname is None:
raise TypeError("extver alone cannot specify an extension.")
hdulist = fitsopen(filename, mode=mode, **kwargs)
return hdulist, ext |
Allow file object to already be opened in any of the valid modes and
and leave the file in the same state (opened or closed) as when
the function was called. | def _get_file_mode(filename, default="readonly"):
"""
Allow file object to already be opened in any of the valid modes and
and leave the file in the same state (opened or closed) as when
the function was called.
"""
mode = default
closed = fileobj_closed(filename)
fmode = fileobj_mode(filename)
if fmode is not None:
mode = FILE_MODES.get(fmode)
if mode is None:
raise OSError(
f"File mode of the input file object ({fmode!r}) cannot be used to "
"read/write FITS files."
)
return mode, closed |
Write a diff between two header keyword values or comments to the specified
file-like object. | def report_diff_keyword_attr(fileobj, attr, diffs, keyword, ind=0):
"""
Write a diff between two header keyword values or comments to the specified
file-like object.
"""
if keyword in diffs:
vals = diffs[keyword]
for idx, val in enumerate(vals):
if val is None:
continue
if idx == 0:
dup = ""
else:
dup = f"[{idx + 1}]"
fileobj.write(
textwrap.indent(
f" Keyword {keyword:8}{dup} has different {attr}:\n", ind * " "
)
)
report_diff_values(val[0], val[1], fileobj=fileobj, indent_width=ind + 1) |
Compatibility function for using the recarray base class's field method.
This incorporates the legacy functionality of returning string arrays as
Numeric-style chararray objects. | def _get_recarray_field(array, key):
"""
Compatibility function for using the recarray base class's field method.
This incorporates the legacy functionality of returning string arrays as
Numeric-style chararray objects.
"""
# Numpy >= 1.10.dev recarray no longer returns chararrays for strings
# This is currently needed for backwards-compatibility and for
# automatic truncation of trailing whitespace
field = np.recarray.field(array, key)
if field.dtype.char in ("S", "U") and not isinstance(field, chararray.chararray):
field = field.view(chararray.chararray)
return field |
Takes a unicode array and fills the output string array with the ASCII
encodings (if possible) of the elements of the input array. The two arrays
must be the same size (though not necessarily the same shape).
This is like an inplace version of `np.char.encode` though simpler since
it's only limited to ASCII, and hence the size of each character is
guaranteed to be 1 byte.
If any strings are non-ASCII an UnicodeArrayEncodeError is raised--this is
just a `UnicodeEncodeError` with an additional attribute for the index of
the item that couldn't be encoded. | def _ascii_encode(inarray, out=None):
"""
Takes a unicode array and fills the output string array with the ASCII
encodings (if possible) of the elements of the input array. The two arrays
must be the same size (though not necessarily the same shape).
This is like an inplace version of `np.char.encode` though simpler since
it's only limited to ASCII, and hence the size of each character is
guaranteed to be 1 byte.
If any strings are non-ASCII an UnicodeArrayEncodeError is raised--this is
just a `UnicodeEncodeError` with an additional attribute for the index of
the item that couldn't be encoded.
"""
out_dtype = np.dtype((f"S{inarray.dtype.itemsize // 4}", inarray.dtype.shape))
if out is not None:
out = out.view(out_dtype)
op_dtypes = [inarray.dtype, out_dtype]
op_flags = [["readonly"], ["writeonly", "allocate"]]
it = np.nditer(
[inarray, out], op_dtypes=op_dtypes, op_flags=op_flags, flags=["zerosize_ok"]
)
try:
for initem, outitem in it:
outitem[...] = initem.item().encode("ascii")
except UnicodeEncodeError as exc:
index = np.unravel_index(it.iterindex, inarray.shape)
raise _UnicodeArrayEncodeError(*(exc.args + (index,)))
return it.operands[1] |
Returns True if any fields in a structured array have Unicode dtype. | def _has_unicode_fields(array):
"""
Returns True if any fields in a structured array have Unicode dtype.
"""
dtypes = (d[0] for d in array.dtype.fields.values())
return any(d.kind == "U" for d in dtypes) |
Check if the FITS header keyword is a time column-specific keyword.
Parameters
----------
keyword : str
FITS keyword. | def is_time_column_keyword(keyword):
"""
Check if the FITS header keyword is a time column-specific keyword.
Parameters
----------
keyword : str
FITS keyword.
"""
return re.match(COLUMN_TIME_KEYWORD_REGEXP, keyword) is not None |
Given the global time reference frame information, verify that
each global time coordinate attribute will be given a valid value.
Parameters
----------
global_info : dict
Global time reference frame information. | def _verify_global_info(global_info):
"""
Given the global time reference frame information, verify that
each global time coordinate attribute will be given a valid value.
Parameters
----------
global_info : dict
Global time reference frame information.
"""
# Translate FITS deprecated scale into astropy scale, or else just convert
# to lower case for further checks.
global_info["scale"] = FITS_DEPRECATED_SCALES.get(
global_info["TIMESYS"], global_info["TIMESYS"].lower()
)
# Verify global time scale
if global_info["scale"] not in Time.SCALES:
# 'GPS' and 'LOCAL' are FITS recognized time scale values
# but are not supported by astropy.
if global_info["scale"] == "gps":
warnings.warn(
"Global time scale (TIMESYS) has a FITS recognized time scale "
'value "GPS". In Astropy, "GPS" is a time from epoch format '
"which runs synchronously with TAI; GPS is approximately 19 s "
"ahead of TAI. Hence, this format will be used.",
AstropyUserWarning,
)
# Assume that the values are in GPS format
global_info["scale"] = "tai"
global_info["format"] = "gps"
if global_info["scale"] == "local":
warnings.warn(
"Global time scale (TIMESYS) has a FITS recognized time scale "
'value "LOCAL". However, the standard states that "LOCAL" should be '
"tied to one of the existing scales because it is intrinsically "
"unreliable and/or ill-defined. Astropy will thus use the default "
'global time scale "UTC" instead of "LOCAL".',
AstropyUserWarning,
)
# Default scale 'UTC'
global_info["scale"] = "utc"
global_info["format"] = None
else:
raise AssertionError(
"Global time scale (TIMESYS) should have a FITS recognized "
"time scale value (got {!r}). The FITS standard states that "
"the use of local time scales should be restricted to alternate "
"coordinates.".format(global_info["TIMESYS"])
)
else:
# Scale is already set
global_info["format"] = None
# Check if geocentric global location is specified
obs_geo = [global_info[attr] for attr in OBSGEO_XYZ if attr in global_info]
# Location full specification is (X, Y, Z)
if len(obs_geo) == 3:
global_info["location"] = EarthLocation.from_geocentric(*obs_geo, unit=u.m)
else:
# Check if geodetic global location is specified (since geocentric failed)
# First warn the user if geocentric location is partially specified
if obs_geo:
warnings.warn(
f"The geocentric observatory location {obs_geo} is not completely "
"specified (X, Y, Z) and will be ignored.",
AstropyUserWarning,
)
# Check geodetic location
obs_geo = [global_info[attr] for attr in OBSGEO_LBH if attr in global_info]
if len(obs_geo) == 3:
global_info["location"] = EarthLocation.from_geodetic(*obs_geo)
else:
# Since both geocentric and geodetic locations are not specified,
# location will be None.
# Warn the user if geodetic location is partially specified
if obs_geo:
warnings.warn(
f"The geodetic observatory location {obs_geo} is not completely "
"specified (lon, lat, alt) and will be ignored.",
AstropyUserWarning,
)
global_info["location"] = None
# Get global time reference
# Keywords are listed in order of precedence, as stated by the standard
for key, format_ in (("MJDREF", "mjd"), ("JDREF", "jd"), ("DATEREF", "fits")):
if key in global_info:
global_info["ref_time"] = {"val": global_info[key], "format": format_}
break
else:
# If none of the three keywords is present, MJDREF = 0.0 must be assumed
global_info["ref_time"] = {"val": 0, "format": "mjd"} |
Given the column-specific time reference frame information, verify that
each column-specific time coordinate attribute has a valid value.
Return True if the coordinate column is time, or else return False.
Parameters
----------
global_info : dict
Global time reference frame information.
column_info : dict
Column-specific time reference frame override information. | def _verify_column_info(column_info, global_info):
"""
Given the column-specific time reference frame information, verify that
each column-specific time coordinate attribute has a valid value.
Return True if the coordinate column is time, or else return False.
Parameters
----------
global_info : dict
Global time reference frame information.
column_info : dict
Column-specific time reference frame override information.
"""
scale = column_info.get("TCTYP", None)
unit = column_info.get("TCUNI", None)
location = column_info.get("TRPOS", None)
if scale is not None:
# Non-linear coordinate types have "4-3" form and are not time coordinates
if TCTYP_RE_TYPE.match(scale[:5]) and TCTYP_RE_ALGO.match(scale[5:]):
return False
elif scale.lower() in Time.SCALES:
column_info["scale"] = scale.lower()
column_info["format"] = None
elif scale in FITS_DEPRECATED_SCALES.keys():
column_info["scale"] = FITS_DEPRECATED_SCALES[scale]
column_info["format"] = None
# TCTYPn (scale) = 'TIME' indicates that the column scale is
# controlled by the global scale.
elif scale == "TIME":
column_info["scale"] = global_info["scale"]
column_info["format"] = global_info["format"]
elif scale == "GPS":
warnings.warn(
(
f'Table column "{column_info}" has a FITS recognized time scale '
'value "GPS". In Astropy, "GPS" is a time from epoch format which '
"runs synchronously with TAI; GPS runs ahead of TAI approximately "
"by 19 s. Hence, this format will be used."
),
AstropyUserWarning,
)
column_info["scale"] = "tai"
column_info["format"] = "gps"
elif scale == "LOCAL":
warnings.warn(
(
f'Table column "{column_info}" has a FITS recognized time scale '
'value "LOCAL". However, the standard states that "LOCAL" should '
"be tied to one of the existing scales because it is intrinsically "
"unreliable and/or ill-defined. Astropy will thus use the global "
"time scale (TIMESYS) as the default."
),
AstropyUserWarning,
)
column_info["scale"] = global_info["scale"]
column_info["format"] = global_info["format"]
else:
# Coordinate type is either an unrecognized local time scale
# or a linear coordinate type
return False
# If TCUNIn is a time unit or TRPOSn is specified, the column is a time
# coordinate. This has to be tested since TCTYP (scale) is not specified.
elif (unit is not None and unit in FITS_TIME_UNIT) or location is not None:
column_info["scale"] = global_info["scale"]
column_info["format"] = global_info["format"]
# None of the conditions for time coordinate columns is satisfied
else:
return False
# Check if column-specific reference position TRPOSn is specified
if location is not None:
# Observatory position (location) needs to be specified only
# for 'TOPOCENTER'.
if location == "TOPOCENTER":
column_info["location"] = global_info["location"]
if column_info["location"] is None:
warnings.warn(
'Time column reference position "TRPOSn" value is "TOPOCENTER". '
"However, the observatory position is not properly specified. "
"The FITS standard does not support this and hence reference "
"position will be ignored.",
AstropyUserWarning,
)
else:
column_info["location"] = None
# Warn user about ignoring global reference position when TRPOSn is
# not specified
elif global_info["TREFPOS"] == "TOPOCENTER":
if global_info["location"] is not None:
warnings.warn(
'Time column reference position "TRPOSn" is not specified. The '
'default value for it is "TOPOCENTER", and the observatory position '
"has been specified. However, for supporting column-specific location, "
"reference position will be ignored for this column.",
AstropyUserWarning,
)
column_info["location"] = None
else:
column_info["location"] = None
# Get reference time
column_info["ref_time"] = global_info["ref_time"]
return True |
Check if a column without corresponding time column keywords in the
FITS header represents time or not. If yes, return the time column
information needed for its conversion to Time.
This is only applicable to the special-case where a column has the
name 'TIME' and a time unit. | def _get_info_if_time_column(col, global_info):
"""
Check if a column without corresponding time column keywords in the
FITS header represents time or not. If yes, return the time column
information needed for its conversion to Time.
This is only applicable to the special-case where a column has the
name 'TIME' and a time unit.
"""
# Column with TTYPEn = 'TIME' and lacking any TC*n or time
# specific keywords will be controlled by the global keywords.
if col.info.name.upper() == "TIME" and col.info.unit in FITS_TIME_UNIT:
column_info = {
"scale": global_info["scale"],
"format": global_info["format"],
"ref_time": global_info["ref_time"],
"location": None,
}
if global_info["TREFPOS"] == "TOPOCENTER":
column_info["location"] = global_info["location"]
if column_info["location"] is None:
warnings.warn(
f'Time column "{col.info.name}" reference position will be ignored '
"due to unspecified observatory position.",
AstropyUserWarning,
)
return column_info
return None |
Convert the table metadata for time informational keywords
to astropy Time.
Parameters
----------
table : `~astropy.table.Table`
The table whose time metadata is to be converted.
global_info : dict
Global time reference frame information. | def _convert_global_time(table, global_info):
"""
Convert the table metadata for time informational keywords
to astropy Time.
Parameters
----------
table : `~astropy.table.Table`
The table whose time metadata is to be converted.
global_info : dict
Global time reference frame information.
"""
# Read in Global Informational keywords as Time
for key in global_info:
# FITS uses a subset of ISO-8601 for DATE-xxx
if key not in table.meta:
try:
table.meta[key] = _convert_time_key(global_info, key)
except ValueError:
pass |
Convert a time metadata key to a Time object.
Parameters
----------
global_info : dict
Global time reference frame information.
key : str
Time key.
Returns
-------
astropy.time.Time
Raises
------
ValueError
If key is not a valid global time keyword. | def _convert_time_key(global_info, key):
"""
Convert a time metadata key to a Time object.
Parameters
----------
global_info : dict
Global time reference frame information.
key : str
Time key.
Returns
-------
astropy.time.Time
Raises
------
ValueError
If key is not a valid global time keyword.
"""
value = global_info[key]
if key.startswith("DATE"):
scale = "utc" if key == "DATE" else global_info["scale"]
precision = len(value.split(".")[-1]) if "." in value else 0
return Time(value, format="fits", scale=scale, precision=precision)
# MJD-xxx in MJD according to TIMESYS
elif key.startswith("MJD-"):
return Time(value, format="mjd", scale=global_info["scale"])
else:
raise ValueError("Key is not a valid global time keyword") |
Convert time columns to astropy Time columns.
Parameters
----------
col : `~astropy.table.Column`
The time coordinate column to be converted to Time.
column_info : dict
Column-specific time reference frame override information. | def _convert_time_column(col, column_info):
"""
Convert time columns to astropy Time columns.
Parameters
----------
col : `~astropy.table.Column`
The time coordinate column to be converted to Time.
column_info : dict
Column-specific time reference frame override information.
"""
# The code might fail while attempting to read FITS files not written by astropy.
try:
# ISO-8601 is the only string representation of time in FITS
if col.info.dtype.kind in ["S", "U"]:
# [+/-C]CCYY-MM-DD[Thh:mm:ss[.s...]] where the number of characters
# from index 20 to the end of string represents the precision
precision = max(int(col.info.dtype.str[2:]) - 20, 0)
return Time(
col,
format="fits",
scale=column_info["scale"],
precision=precision,
location=column_info["location"],
)
if column_info["format"] == "gps":
return Time(col, format="gps", location=column_info["location"])
# If reference value is 0 for JD or MJD, the column values can be
# directly converted to Time, as they are absolute (relative
# to a globally accepted zero point).
if column_info["ref_time"]["val"] == 0 and column_info["ref_time"][
"format"
] in ["jd", "mjd"]:
# (jd1, jd2) where jd = jd1 + jd2
if col.shape[-1] == 2 and col.ndim > 1:
return Time(
col[..., 0],
col[..., 1],
scale=column_info["scale"],
format=column_info["ref_time"]["format"],
location=column_info["location"],
)
else:
return Time(
col,
scale=column_info["scale"],
format=column_info["ref_time"]["format"],
location=column_info["location"],
)
# Reference time
ref_time = Time(
column_info["ref_time"]["val"],
scale=column_info["scale"],
format=column_info["ref_time"]["format"],
location=column_info["location"],
)
# Elapsed time since reference time
if col.shape[-1] == 2 and col.ndim > 1:
delta_time = TimeDelta(col[..., 0], col[..., 1])
else:
delta_time = TimeDelta(col)
return ref_time + delta_time
except Exception as err:
warnings.warn(
f'The exception "{err}" was encountered while trying to convert the time '
f'column "{col.info.name}" to Astropy Time.',
AstropyUserWarning,
)
return col |
Read FITS binary table time columns as `~astropy.time.Time`.
This method reads the metadata associated with time coordinates, as
stored in a FITS binary table header, converts time columns into
`~astropy.time.Time` columns and reads global reference times as
`~astropy.time.Time` instances.
Parameters
----------
hdr : `~astropy.io.fits.header.Header`
FITS Header
table : `~astropy.table.Table`
The table whose time columns are to be read as Time
Returns
-------
hdr : `~astropy.io.fits.header.Header`
Modified FITS Header (time metadata removed) | def fits_to_time(hdr, table):
"""
Read FITS binary table time columns as `~astropy.time.Time`.
This method reads the metadata associated with time coordinates, as
stored in a FITS binary table header, converts time columns into
`~astropy.time.Time` columns and reads global reference times as
`~astropy.time.Time` instances.
Parameters
----------
hdr : `~astropy.io.fits.header.Header`
FITS Header
table : `~astropy.table.Table`
The table whose time columns are to be read as Time
Returns
-------
hdr : `~astropy.io.fits.header.Header`
Modified FITS Header (time metadata removed)
"""
# Set defaults for global time scale, reference, etc.
global_info = {"TIMESYS": "UTC", "TREFPOS": "TOPOCENTER"}
# Set default dictionary for time columns
time_columns = defaultdict(OrderedDict)
# Make a "copy" (not just a view) of the input header, since it
# may get modified. the data is still a "view" (for now)
hcopy = hdr.copy(strip=True)
# Scan the header for global and column-specific time keywords
for key, value, comment in hdr.cards:
if key in TIME_KEYWORDS:
global_info[key] = value
hcopy.remove(key)
elif is_time_column_keyword(key):
base, idx = re.match(r"([A-Z]+)([0-9]+)", key).groups()
time_columns[int(idx)][base] = value
hcopy.remove(key)
elif value in OBSGEO_XYZ and re.match("TTYPE[0-9]+", key):
global_info[value] = table[value]
# Verify and get the global time reference frame information
_verify_global_info(global_info)
_convert_global_time(table, global_info)
# Columns with column-specific time (coordinate) keywords
if time_columns:
for idx, column_info in time_columns.items():
# Check if the column is time coordinate (not spatial)
if _verify_column_info(column_info, global_info):
colname = table.colnames[idx - 1]
# Convert to Time
table[colname] = _convert_time_column(table[colname], column_info)
# Check for special-cases of time coordinate columns
for idx, colname in enumerate(table.colnames):
if (idx + 1) not in time_columns:
column_info = _get_info_if_time_column(table[colname], global_info)
if column_info:
table[colname] = _convert_time_column(table[colname], column_info)
return hcopy |
Replace Time columns in a Table with non-mixin columns containing
each element as a vector of two doubles (jd1, jd2) and return a FITS
header with appropriate time coordinate keywords.
jd = jd1 + jd2 represents time in the Julian Date format with
high-precision.
Parameters
----------
table : `~astropy.table.Table`
The table whose Time columns are to be replaced.
Returns
-------
table : `~astropy.table.Table`
The table with replaced Time columns
hdr : `~astropy.io.fits.header.Header`
Header containing global time reference frame FITS keywords | def time_to_fits(table):
"""
Replace Time columns in a Table with non-mixin columns containing
each element as a vector of two doubles (jd1, jd2) and return a FITS
header with appropriate time coordinate keywords.
jd = jd1 + jd2 represents time in the Julian Date format with
high-precision.
Parameters
----------
table : `~astropy.table.Table`
The table whose Time columns are to be replaced.
Returns
-------
table : `~astropy.table.Table`
The table with replaced Time columns
hdr : `~astropy.io.fits.header.Header`
Header containing global time reference frame FITS keywords
"""
# Make a light copy of table (to the extent possible) and clear any indices along
# the way. Indices are not serialized and cause problems later, but they are not
# needed here so just drop. For Column subclasses take advantage of copy() method,
# but for others it is required to actually copy the data if there are attached
# indices. See #8077 and #9009 for further discussion.
new_cols = []
for col in table.itercols():
if isinstance(col, Column):
new_col = col.copy(copy_data=False) # Also drops any indices
else:
new_col = col_copy(col, copy_indices=False) if col.info.indices else col
new_cols.append(new_col)
newtable = table.__class__(new_cols, copy=False)
newtable.meta = table.meta
# Global time coordinate frame keywords
hdr = Header(
[
Card(keyword=key, value=val[0], comment=val[1])
for key, val in GLOBAL_TIME_INFO.items()
]
)
# Store coordinate column-specific metadata
newtable.meta["__coordinate_columns__"] = defaultdict(OrderedDict)
coord_meta = newtable.meta["__coordinate_columns__"]
time_cols = table.columns.isinstance(Time)
# Geocentric location
location = None
for col in time_cols:
# By default, Time objects are written in full precision, i.e. we store both
# jd1 and jd2 (serialize_method['fits'] = 'jd1_jd2'). Formatted values for
# Time can be stored if the user explicitly chooses to do so.
col_cls = MaskedColumn if col.masked else Column
if col.info.serialize_method["fits"] == "formatted_value":
newtable.replace_column(col.info.name, col_cls(col.value))
continue
# The following is necessary to deal with multi-dimensional ``Time`` objects
# (i.e. where Time.shape is non-trivial).
# Note: easier would be np.stack([col.jd1, col.jd2], axis=-1), but that
# fails for np.ma.MaskedArray, as it returns just the data, ignoring the mask.
jd12 = np.empty_like(col.jd1, shape=col.jd1.shape + (2,))
jd12[..., 0] = col.jd1
jd12[..., 1] = col.jd2
newtable.replace_column(col.info.name, col_cls(jd12, unit="d"))
# Time column-specific override keywords
coord_meta[col.info.name]["coord_type"] = col.scale.upper()
coord_meta[col.info.name]["coord_unit"] = "d"
# Time column reference position
if col.location is None:
coord_meta[col.info.name]["time_ref_pos"] = None
if location is not None:
warnings.warn(
(
f'Time Column "{col.info.name}" has no specified location, '
"but global Time Position is present, which will be the "
"default for this column in FITS specification."
),
AstropyUserWarning,
)
else:
coord_meta[col.info.name]["time_ref_pos"] = "TOPOCENTER"
# Compatibility of Time Scales and Reference Positions
if col.scale in BARYCENTRIC_SCALES:
warnings.warn(
(
f'Earth Location "TOPOCENTER" for Time Column "{col.info.name}" '
f'is incompatible with scale "{col.scale.upper()}".'
),
AstropyUserWarning,
)
if location is None:
# Set global geocentric location
location = col.location
if location.size > 1:
for dim in ("x", "y", "z"):
newtable.add_column(
Column(getattr(location, dim).to_value(u.m)),
name=f"OBSGEO-{dim.upper()}",
)
else:
hdr.extend(
[
Card(
keyword=f"OBSGEO-{dim.upper()}",
value=getattr(location, dim).to_value(u.m),
)
for dim in ("x", "y", "z")
]
)
elif np.any(location != col.location):
raise ValueError(
"Multiple Time Columns with different geocentric "
f"observatory locations ({location}, {col.location}) encountered."
"This is not supported by the FITS standard."
)
return newtable, hdr |
Determine the size of a FITS header block if a non-blank separator is used
between cards. | def _block_size(sep):
"""
Determine the size of a FITS header block if a non-blank separator is used
between cards.
"""
return BLOCK_SIZE + (len(sep) * (BLOCK_SIZE // Card.length - 1)) |
Bytes needed to pad the input stringlen to the next FITS block. | def _pad_length(stringlen):
"""Bytes needed to pad the input stringlen to the next FITS block."""
return (BLOCK_SIZE - (stringlen % BLOCK_SIZE)) % BLOCK_SIZE |
Subsets and Splits