response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Test variable length lists, multidim columns, object columns. | def test_specialized_columns(name, col, exp):
"""Test variable length lists, multidim columns, object columns."""
t = Table()
t[name] = col
out = StringIO()
t.write(out, format="ascii.ecsv")
hdr = _get_ecsv_header_dict(out.getvalue())
assert hdr["datatype"] == exp
t2 = Table.read(out.getvalue(), format="ascii.ecsv")
assert t2.colnames == t.colnames
for name in t2.colnames:
assert t2[name].dtype == t[name].dtype
for val1, val2 in zip(t2[name], t[name]):
if isinstance(val1, np.ndarray):
assert val1.dtype == val2.dtype
assert np.all(val1 == val2) |
Read ECSV file created by M. Taylor that includes scalar, fixed array,
variable array for all datatypes. This file has missing values for all
columns as both per-value null and blank entries for the entire column
value.
Note: original file was modified to include blank values in f_float and
f_double columns. | def test_full_subtypes():
"""Read ECSV file created by M. Taylor that includes scalar, fixed array,
variable array for all datatypes. This file has missing values for all
columns as both per-value null and blank entries for the entire column
value.
Note: original file was modified to include blank values in f_float and
f_double columns.
"""
t = Table.read(os.path.join(TEST_DIR, "data", "subtypes.ecsv"))
colnames = (
"i_index,"
"s_byte,s_short,s_int,s_long,s_float,s_double,s_string,s_boolean,"
"f_byte,f_short,f_int,f_long,f_float,f_double,f_string,f_boolean,"
"v_byte,v_short,v_int,v_long,v_float,v_double,v_string,v_boolean,"
"m_int,m_double"
).split(",")
assert t.colnames == colnames
type_map = {
"byte": "int8",
"short": "int16",
"int": "int32",
"long": "int64",
"float": "float32",
"double": "float64",
"string": "str",
"boolean": "bool",
}
for col in t.itercols():
info = col.info
if info.name == "i_index":
continue
assert isinstance(col, MaskedColumn)
type_name = info.name[2:] # short, int, etc
subtype = info.name[:1]
if subtype == "s": # Scalar
assert col.shape == (16,)
if subtype == "f": # Fixed array
assert col.shape == (16, 3)
if subtype == "v": # Variable array
assert col.shape == (16,)
assert info.dtype.name == "object"
for val in col:
assert isinstance(val, np.ndarray)
assert val.dtype.name.startswith(type_map[type_name])
assert len(val) in [0, 1, 2, 3]
else:
assert info.dtype.name.startswith(type_map[type_name]) |
Test blank field in subtypes. Similar to previous test but with explicit
checks of values | def test_masked_empty_subtypes():
"""Test blank field in subtypes. Similar to previous test but with explicit
checks of values"""
txt = """
# %ECSV 1.0
# ---
# datatype:
# - {name: o, datatype: string, subtype: json}
# - {name: f, datatype: string, subtype: 'int64[2]'}
# - {name: v, datatype: string, subtype: 'int64[null]'}
# schema: astropy-2.0
o f v
null [0,1] [1]
"" "" ""
[1,2] [2,3] [2,3]
"""
t = Table.read(txt, format="ascii.ecsv")
assert np.all(t["o"] == np.array([None, -1, [1, 2]], dtype=object))
assert np.all(t["o"].mask == [False, True, False])
exp = np.ma.array([[0, 1], [-1, -1], [2, 3]], mask=[[0, 0], [1, 1], [0, 0]])
assert np.all(t["f"] == exp)
assert np.all(t["f"].mask == exp.mask)
assert np.all(t["v"][0] == [1])
assert np.all(t["v"][2] == [2, 3])
assert np.all(t["v"].mask == [False, True, False]) |
Test null values in fixed and variable array subtypes. | def test_masked_vals_in_array_subtypes():
"""Test null values in fixed and variable array subtypes."""
t = Table()
t["f"] = np.ma.array([[1, 2], [3, 4]], mask=[[0, 1], [1, 0]], dtype=np.int64)
t["v"] = np.empty(2, dtype=object)
t["v"][0] = np.ma.array([1, 2], mask=[0, 1], dtype=np.int64)
t["v"][1] = np.ma.array([3, 4, 5], mask=[1, 0, 0], dtype=np.int64)
out = StringIO()
t.write(out, format="ascii.ecsv")
txt = """
# %ECSV 1.0
# ---
# datatype:
# - {name: f, datatype: string, subtype: 'int64[2]'}
# - {name: v, datatype: string, subtype: 'int64[null]'}
# schema: astropy-2.0
f v
[1,null] [1,null]
[null,4] [null,4,5]
"""
hdr = _get_ecsv_header_dict(out.getvalue())
hdr_exp = _get_ecsv_header_dict(txt)
assert hdr == hdr_exp
t2 = Table.read(out.getvalue(), format="ascii.ecsv")
assert t2.colnames == t.colnames
for name in t2.colnames:
assert t2[name].dtype == t[name].dtype
assert type(t2[name]) is type(t[name])
for val1, val2 in zip(t2[name], t[name]):
if isinstance(val1, np.ndarray):
assert val1.dtype == val2.dtype
if isinstance(val1, np.ma.MaskedArray):
assert np.all(val1.mask == val2.mask)
assert np.all(val1 == val2) |
Except for ECSV, guessing always requires at least 2 columns | def test_guess_ecsv_with_one_column():
"""Except for ECSV, guessing always requires at least 2 columns"""
txt = """
# %ECSV 1.0
# ---
# datatype:
# - {name: col, datatype: string, description: hello}
# schema: astropy-2.0
col
1
2
"""
t = ascii.read(txt)
assert t["col"].dtype.kind == "U" # would be int with basic format
assert t["col"].description == "hello" |
Nice, typical fixed format table | def test_read_normal():
"""Nice, typical fixed format table"""
table = """
# comment (with blank line above)
| Col1 | Col2 |
| 1.2 | "hello" |
| 2.4 |'s worlds|
"""
reader = ascii.get_reader(reader_cls=ascii.FixedWidth)
dat = reader.read(table)
assert_equal(dat.colnames, ["Col1", "Col2"])
assert_almost_equal(dat[1][0], 2.4)
assert_equal(dat[0][1], '"hello"')
assert_equal(dat[1][1], "'s worlds") |
Nice, typical fixed format table with col names provided | def test_read_normal_names():
"""Nice, typical fixed format table with col names provided"""
table = """
# comment (with blank line above)
| Col1 | Col2 |
| 1.2 | "hello" |
| 2.4 |'s worlds|
"""
reader = ascii.get_reader(reader_cls=ascii.FixedWidth, names=("name1", "name2"))
dat = reader.read(table)
assert_equal(dat.colnames, ["name1", "name2"])
assert_almost_equal(dat[1][0], 2.4) |
Nice, typical fixed format table with col names provided | def test_read_normal_names_include():
"""Nice, typical fixed format table with col names provided"""
table = """
# comment (with blank line above)
| Col1 | Col2 | Col3 |
| 1.2 | "hello" | 3 |
| 2.4 |'s worlds| 7 |
"""
reader = ascii.get_reader(
reader_cls=ascii.FixedWidth,
names=("name1", "name2", "name3"),
include_names=("name1", "name3"),
)
dat = reader.read(table)
assert_equal(dat.colnames, ["name1", "name3"])
assert_almost_equal(dat[1][0], 2.4)
assert_equal(dat[0][1], 3) |
Nice, typical fixed format table with col name excluded | def test_read_normal_exclude():
"""Nice, typical fixed format table with col name excluded"""
table = """
# comment (with blank line above)
| Col1 | Col2 |
| 1.2 | "hello" |
| 2.4 |'s worlds|
"""
reader = ascii.get_reader(reader_cls=ascii.FixedWidth, exclude_names=("Col1",))
dat = reader.read(table)
assert_equal(dat.colnames, ["Col2"])
assert_equal(dat[1][0], "'s worlds") |
Weird input table with data values chopped by col extent | def test_read_weird():
"""Weird input table with data values chopped by col extent"""
table = """
Col1 | Col2 |
1.2 "hello"
2.4 sdf's worlds
"""
reader = ascii.get_reader(reader_cls=ascii.FixedWidth)
dat = reader.read(table)
assert_equal(dat.colnames, ["Col1", "Col2"])
assert_almost_equal(dat[1][0], 2.4)
assert_equal(dat[0][1], '"hel')
assert_equal(dat[1][1], "df's wo") |
Table with double delimiters | def test_read_double():
"""Table with double delimiters"""
table = """
|| Name || Phone || TCP||
| John | 555-1234 |192.168.1.10X|
| Mary | 555-2134 |192.168.1.12X|
| Bob | 555-4527 | 192.168.1.9X|
"""
dat = ascii.read(table, format="fixed_width", guess=False)
assert_equal(tuple(dat.dtype.names), ("Name", "Phone", "TCP"))
assert_equal(dat[1][0], "Mary")
assert_equal(dat[0][1], "555-1234")
assert_equal(dat[2][2], "192.168.1.9") |
Table with space delimiter | def test_read_space_delimiter():
"""Table with space delimiter"""
table = """
Name --Phone- ----TCP-----
John 555-1234 192.168.1.10
Mary 555-2134 192.168.1.12
Bob 555-4527 192.168.1.9
"""
dat = ascii.read(table, format="fixed_width", guess=False, delimiter=" ")
assert_equal(tuple(dat.dtype.names), ("Name", "--Phone-", "----TCP-----"))
assert_equal(dat[1][0], "Mary")
assert_equal(dat[0][1], "555-1234")
assert_equal(dat[2][2], "192.168.1.9") |
Table with no header row and auto-column naming | def test_read_no_header_autocolumn():
"""Table with no header row and auto-column naming"""
table = """
| John | 555-1234 |192.168.1.10|
| Mary | 555-2134 |192.168.1.12|
| Bob | 555-4527 | 192.168.1.9|
"""
dat = ascii.read(
table, format="fixed_width", guess=False, header_start=None, data_start=0
)
assert_equal(tuple(dat.dtype.names), ("col1", "col2", "col3"))
assert_equal(dat[1][0], "Mary")
assert_equal(dat[0][1], "555-1234")
assert_equal(dat[2][2], "192.168.1.9") |
Table with no header row and with col names provided. Second
and third rows also have hanging spaces after final |. | def test_read_no_header_names():
"""Table with no header row and with col names provided. Second
and third rows also have hanging spaces after final |."""
table = """
| John | 555-1234 |192.168.1.10|
| Mary | 555-2134 |192.168.1.12|
| Bob | 555-4527 | 192.168.1.9|
"""
dat = ascii.read(
table,
format="fixed_width",
guess=False,
header_start=None,
data_start=0,
names=("Name", "Phone", "TCP"),
)
assert_equal(tuple(dat.dtype.names), ("Name", "Phone", "TCP"))
assert_equal(dat[1][0], "Mary")
assert_equal(dat[0][1], "555-1234")
assert_equal(dat[2][2], "192.168.1.9") |
Table with no header row and auto-column naming | def test_read_no_header_autocolumn_NoHeader():
"""Table with no header row and auto-column naming"""
table = """
| John | 555-1234 |192.168.1.10|
| Mary | 555-2134 |192.168.1.12|
| Bob | 555-4527 | 192.168.1.9|
"""
dat = ascii.read(table, format="fixed_width_no_header")
assert_equal(tuple(dat.dtype.names), ("col1", "col2", "col3"))
assert_equal(dat[1][0], "Mary")
assert_equal(dat[0][1], "555-1234")
assert_equal(dat[2][2], "192.168.1.9") |
Table with no header row and with col names provided. Second
and third rows also have hanging spaces after final |. | def test_read_no_header_names_NoHeader():
"""Table with no header row and with col names provided. Second
and third rows also have hanging spaces after final |."""
table = """
| John | 555-1234 |192.168.1.10|
| Mary | 555-2134 |192.168.1.12|
| Bob | 555-4527 | 192.168.1.9|
"""
dat = ascii.read(
table, format="fixed_width_no_header", names=("Name", "Phone", "TCP")
)
assert_equal(tuple(dat.dtype.names), ("Name", "Phone", "TCP"))
assert_equal(dat[1][0], "Mary")
assert_equal(dat[0][1], "555-1234")
assert_equal(dat[2][2], "192.168.1.9") |
Table with no delimiter with column start and end values specified. | def test_read_col_starts():
"""Table with no delimiter with column start and end values specified."""
table = """
# 5 9 17 18 28
# | | || |
John 555- 1234 192.168.1.10
Mary 555- 2134 192.168.1.12
Bob 555- 4527 192.168.1.9
"""
dat = ascii.read(
table,
format="fixed_width_no_header",
names=("Name", "Phone", "TCP"),
col_starts=(0, 9, 18),
col_ends=(5, 17, 28),
)
assert_equal(tuple(dat.dtype.names), ("Name", "Phone", "TCP"))
assert_equal(dat[0][1], "555- 1234")
assert_equal(dat[1][0], "Mary")
assert_equal(dat[1][2], "192.168.1.")
assert_equal(dat[2][2], "192.168.1") |
Table with no delimiter with only column start or end values specified | def test_read_detect_col_starts_or_ends():
"""Table with no delimiter with only column start or end values specified"""
table = """
#1 9 19 <== Column start indexes
#| | | <== Column start positions
#<------><--------><-------------> <== Inferred column positions
John 555- 1234 192.168.1.10
Mary 555- 2134 192.168.1.123
Bob 555- 4527 192.168.1.9
Bill 555-9875 192.255.255.255
"""
for kwargs in ({"col_starts": (1, 9, 19)}, {"col_ends": (8, 18, 33)}):
dat = ascii.read(
table,
format="fixed_width_no_header",
names=("Name", "Phone", "TCP"),
**kwargs,
)
assert_equal(tuple(dat.dtype.names), ("Name", "Phone", "TCP"))
assert_equal(dat[0][1], "555- 1234")
assert_equal(dat[1][0], "Mary")
assert_equal(dat[1][2], "192.168.1.123")
assert_equal(dat[3][2], "192.255.255.255") |
Write a table as a normal fixed width table. | def test_write_normal():
"""Write a table as a normal fixed width table."""
out = StringIO()
ascii.write(dat, out, format="fixed_width")
assert_equal_splitlines(
out.getvalue(),
"""\
| Col1 | Col2 | Col3 | Col4 |
| 1.2 | "hello" | 1 | a |
| 2.4 | 's worlds | 2 | 2 |
""",
) |
Write a table as a normal fixed width table. | def test_write_fill_values():
"""Write a table as a normal fixed width table."""
out = StringIO()
ascii.write(dat, out, format="fixed_width", fill_values=("a", "N/A"))
assert_equal_splitlines(
out.getvalue(),
"""\
| Col1 | Col2 | Col3 | Col4 |
| 1.2 | "hello" | 1 | N/A |
| 2.4 | 's worlds | 2 | 2 |
""",
) |
Write a table as a fixed width table with no padding. | def test_write_no_pad():
"""Write a table as a fixed width table with no padding."""
out = StringIO()
ascii.write(dat, out, format="fixed_width", delimiter_pad=None)
assert_equal_splitlines(
out.getvalue(),
"""\
|Col1| Col2|Col3|Col4|
| 1.2| "hello"| 1| a|
| 2.4|'s worlds| 2| 2|
""",
) |
Write a table as a fixed width table with no bookend. | def test_write_no_bookend():
"""Write a table as a fixed width table with no bookend."""
out = StringIO()
ascii.write(dat, out, format="fixed_width", bookend=False)
assert_equal_splitlines(
out.getvalue(),
"""\
Col1 | Col2 | Col3 | Col4
1.2 | "hello" | 1 | a
2.4 | 's worlds | 2 | 2
""",
) |
Write a table as a fixed width table with no delimiter. | def test_write_no_delimiter():
"""Write a table as a fixed width table with no delimiter."""
out = StringIO()
ascii.write(dat, out, format="fixed_width", bookend=False, delimiter=None)
assert_equal_splitlines(
out.getvalue(),
"""\
Col1 Col2 Col3 Col4
1.2 "hello" 1 a
2.4 's worlds 2 2
""",
) |
Write a table as a normal fixed width table. | def test_write_noheader_normal():
"""Write a table as a normal fixed width table."""
out = StringIO()
ascii.write(dat, out, format="fixed_width_no_header")
assert_equal_splitlines(
out.getvalue(),
"""\
| 1.2 | "hello" | 1 | a |
| 2.4 | 's worlds | 2 | 2 |
""",
) |
Write a table as a fixed width table with no padding. | def test_write_noheader_no_pad():
"""Write a table as a fixed width table with no padding."""
out = StringIO()
ascii.write(dat, out, format="fixed_width_no_header", delimiter_pad=None)
assert_equal_splitlines(
out.getvalue(),
"""\
|1.2| "hello"|1|a|
|2.4|'s worlds|2|2|
""",
) |
Write a table as a fixed width table with no bookend. | def test_write_noheader_no_bookend():
"""Write a table as a fixed width table with no bookend."""
out = StringIO()
ascii.write(dat, out, format="fixed_width_no_header", bookend=False)
assert_equal_splitlines(
out.getvalue(),
"""\
1.2 | "hello" | 1 | a
2.4 | 's worlds | 2 | 2
""",
) |
Write a table as a fixed width table with no delimiter. | def test_write_noheader_no_delimiter():
"""Write a table as a fixed width table with no delimiter."""
out = StringIO()
ascii.write(dat, out, format="fixed_width_no_header", bookend=False, delimiter=None)
assert_equal_splitlines(
out.getvalue(),
"""\
1.2 "hello" 1 a
2.4 's worlds 2 2
""",
) |
Write a table as a fixed width table with no delimiter. | def test_write_formats():
"""Write a table as a fixed width table with no delimiter."""
out = StringIO()
ascii.write(
dat,
out,
format="fixed_width",
formats={"Col1": "%-8.3f", "Col2": "%-15s"},
)
assert_equal_splitlines(
out.getvalue(),
"""\
| Col1 | Col2 | Col3 | Col4 |
| 1.200 | "hello" | 1 | a |
| 2.400 | 's worlds | 2 | 2 |
""",
) |
Typical fixed format table with two header lines (with some cruft
thrown in to test column positioning | def test_read_twoline_normal():
"""Typical fixed format table with two header lines (with some cruft
thrown in to test column positioning"""
table = """
Col1 Col2
---- ---------
1.2xx"hello"
2.4 's worlds
"""
dat = ascii.read(table, format="fixed_width_two_line")
assert_equal(dat.dtype.names, ("Col1", "Col2"))
assert_almost_equal(dat[1][0], 2.4)
assert_equal(dat[0][1], '"hello"')
assert_equal(dat[1][1], "'s worlds") |
Read restructured text table | def test_read_twoline_ReST():
"""Read restructured text table"""
table = """
======= ===========
Col1 Col2
======= ===========
1.2 "hello"
2.4 's worlds
======= ===========
"""
dat = ascii.read(
table,
format="fixed_width_two_line",
header_start=1,
position_line=2,
data_end=-1,
)
assert_equal(dat.dtype.names, ("Col1", "Col2"))
assert_almost_equal(dat[1][0], 2.4)
assert_equal(dat[0][1], '"hello"')
assert_equal(dat[1][1], "'s worlds") |
Read text table designed for humans and test having position line
before the header line | def test_read_twoline_human():
"""Read text table designed for humans and test having position line
before the header line"""
table = """
+------+----------+
| Col1 | Col2 |
+------|----------+
| 1.2 | "hello" |
| 2.4 | 's worlds|
+------+----------+
"""
dat = ascii.read(
table,
format="fixed_width_two_line",
delimiter="+",
header_start=1,
position_line=0,
data_start=3,
data_end=-1,
)
assert_equal(dat.dtype.names, ("Col1", "Col2"))
assert_almost_equal(dat[1][0], 2.4)
assert_equal(dat[0][1], '"hello"')
assert_equal(dat[1][1], "'s worlds") |
Test failure if too many different character are on position line.
The position line shall consist of only one character in addition to
the delimiter. | def test_read_twoline_fail():
"""Test failure if too many different character are on position line.
The position line shall consist of only one character in addition to
the delimiter.
"""
table = """
| Col1 | Col2 |
|------|==========|
| 1.2 | "hello" |
| 2.4 | 's worlds|
"""
with pytest.raises(InconsistentTableError) as excinfo:
ascii.read(table, format="fixed_width_two_line", delimiter="|", guess=False)
assert (
"Position line should only contain delimiters and one other character"
in str(excinfo.value)
) |
Test failure when position line uses characters prone to ambiguity
Characters in position line must be part an allowed set because
normal letters or numbers will lead to ambiguous tables. | def test_read_twoline_wrong_marker():
"""Test failure when position line uses characters prone to ambiguity
Characters in position line must be part an allowed set because
normal letters or numbers will lead to ambiguous tables.
"""
table = """
| Col1 | Col2 |
|aaaaaa|aaaaaaaaaa|
| 1.2 | "hello" |
| 2.4 | 's worlds|
"""
with pytest.raises(InconsistentTableError) as excinfo:
ascii.read(table, format="fixed_width_two_line", delimiter="|", guess=False)
assert "Characters in position line must be part" in str(excinfo.value) |
Write a table as a normal fixed width table. | def test_write_twoline_normal():
"""Write a table as a normal fixed width table."""
out = StringIO()
ascii.write(dat, out, format="fixed_width_two_line")
assert_equal_splitlines(
out.getvalue(),
"""\
Col1 Col2 Col3 Col4
---- --------- ---- ----
1.2 "hello" 1 a
2.4 's worlds 2 2
""",
) |
Write a table as a fixed width table with no padding. | def test_write_twoline_no_pad():
"""Write a table as a fixed width table with no padding."""
out = StringIO()
ascii.write(
dat,
out,
format="fixed_width_two_line",
delimiter_pad=" ",
position_char="=",
)
assert_equal_splitlines(
out.getvalue(),
"""\
Col1 Col2 Col3 Col4
==== ========= ==== ====
1.2 "hello" 1 a
2.4 's worlds 2 2
""",
) |
Write a table as a fixed width table with no bookend. | def test_write_twoline_no_bookend():
"""Write a table as a fixed width table with no bookend."""
out = StringIO()
ascii.write(dat, out, format="fixed_width_two_line", bookend=True, delimiter="|")
assert_equal_splitlines(
out.getvalue(),
"""\
|Col1| Col2|Col3|Col4|
|----|---------|----|----|
| 1.2| "hello"| 1| a|
| 2.4|'s worlds| 2| 2|
""",
) |
Test fix in #8511 where data_start is being ignored | def test_fixedwidthnoheader_splitting():
"""Test fix in #8511 where data_start is being ignored"""
tbl = """\
AAA y z
1 2 3
4 5 6
7 8 9
"""
names = ["a", "b", "c"]
dat = ascii.read(
tbl,
data_start=1,
data_end=3,
delimiter=" ",
names=names,
format="fixed_width_no_header",
)
assert dat.colnames == names
assert np.all(dat["a"] == [1, 4])
assert np.all(dat["b"] == [2, 5])
assert np.all(dat["c"] == [3, 6]) |
Test to make sure the class SoupString behaves properly. | def test_soupstring():
"""
Test to make sure the class SoupString behaves properly.
"""
soup = BeautifulSoup(
"<html><head></head><body><p>foo</p></body></html>", "html.parser"
)
soup_str = html.SoupString(soup)
assert isinstance(soup_str, str)
assert isinstance(soup_str, html.SoupString)
assert soup_str == "<html><head></head><body><p>foo</p></body></html>"
assert soup_str.soup is soup |
Test to make sure the class ListWriter behaves properly. | def test_listwriter():
"""
Test to make sure the class ListWriter behaves properly.
"""
lst = []
writer = html.ListWriter(lst)
for i in range(5):
writer.write(i)
for ch in "abcde":
writer.write(ch)
assert lst == [0, 1, 2, 3, 4, "a", "b", "c", "d", "e"] |
Test to make sure that identify_table() returns whether the
given BeautifulSoup tag is the correct table to process. | def test_identify_table():
"""
Test to make sure that identify_table() returns whether the
given BeautifulSoup tag is the correct table to process.
"""
# Should return False on non-<table> tags and None
soup = BeautifulSoup("<html><body></body></html>", "html.parser")
assert html.identify_table(soup, {}, 0) is False
assert html.identify_table(None, {}, 0) is False
soup = BeautifulSoup(
'<table id="foo"><tr><th>A</th></tr><tr><td>B</td></tr></table>',
"html.parser",
).table
assert html.identify_table(soup, {}, 2) is False
assert html.identify_table(soup, {}, 1) is True # Default index of 1
# Same tests, but with explicit parameter
assert html.identify_table(soup, {"table_id": 2}, 1) is False
assert html.identify_table(soup, {"table_id": 1}, 1) is True
# Test identification by string ID
assert html.identify_table(soup, {"table_id": "bar"}, 1) is False
assert html.identify_table(soup, {"table_id": "foo"}, 1) is True |
Test reading a table with missing data | def test_missing_data():
"""
Test reading a table with missing data
"""
# First with default where blank => '0'
table_in = [
"<table>",
"<tr><th>A</th></tr>",
"<tr><td></td></tr>",
"<tr><td>1</td></tr>",
"</table>",
]
dat = Table.read(table_in, format="ascii.html")
assert dat.masked is False
assert np.all(dat["A"].mask == [True, False])
assert dat["A"].dtype.kind == "i"
# Now with a specific value '...' => missing
table_in = [
"<table>",
"<tr><th>A</th></tr>",
"<tr><td>...</td></tr>",
"<tr><td>1</td></tr>",
"</table>",
]
dat = Table.read(table_in, format="ascii.html", fill_values=[("...", "0")])
assert dat.masked is False
assert np.all(dat["A"].mask == [True, False])
assert dat["A"].dtype.kind == "i" |
Test reading a table and renaming cols | def test_rename_cols():
"""
Test reading a table and renaming cols
"""
table_in = [
"<table>",
"<tr><th>A</th> <th>B</th></tr>",
"<tr><td>1</td><td>2</td></tr>",
"</table>",
]
# Swap column names
dat = Table.read(table_in, format="ascii.html", names=["B", "A"])
assert dat.colnames == ["B", "A"]
assert len(dat) == 1
# Swap column names and only include A (the renamed version)
dat = Table.read(
table_in, format="ascii.html", names=["B", "A"], include_names=["A"]
)
assert dat.colnames == ["A"]
assert len(dat) == 1
assert np.all(dat["A"] == 2) |
Test reading a table with no column header | def test_no_names():
"""
Test reading a table with no column header
"""
table_in = ["<table>", "<tr><td>1</td></tr>", "<tr><td>2</td></tr>", "</table>"]
dat = Table.read(table_in, format="ascii.html")
assert dat.colnames == ["col1"]
assert len(dat) == 2
dat = Table.read(table_in, format="ascii.html", names=["a"])
assert dat.colnames == ["a"]
assert len(dat) == 2 |
Raise an exception with an informative error message if table_id
is not found. | def test_identify_table_fail():
"""
Raise an exception with an informative error message if table_id
is not found.
"""
table_in = ['<table id="foo"><tr><th>A</th></tr>', "<tr><td>B</td></tr></table>"]
with pytest.raises(core.InconsistentTableError) as err:
Table.read(
table_in, format="ascii.html", htmldict={"table_id": "bad_id"}, guess=False
)
assert err.match("ERROR: HTML table id 'bad_id' not found$")
with pytest.raises(core.InconsistentTableError) as err:
Table.read(table_in, format="ascii.html", htmldict={"table_id": 3}, guess=False)
assert err.match("ERROR: HTML table number 3 not found$") |
Make sure the user can specify which back-end parser to use
and that an error is raised if the parser is invalid. | def test_backend_parsers():
"""
Make sure the user can specify which back-end parser to use
and that an error is raised if the parser is invalid.
"""
for parser in ("lxml", "xml", "html.parser", "html5lib"):
try:
Table.read(
"data/html2.html",
format="ascii.html",
htmldict={"parser": parser},
guess=False,
)
except FeatureNotFound:
if parser == "html.parser":
raise
# otherwise ignore if the dependency isn't present
# reading should fail if the parser is invalid
with pytest.raises(FeatureNotFound):
Table.read(
"data/html2.html",
format="ascii.html",
htmldict={"parser": "foo"},
guess=False,
) |
This should return an OptionalTableImportError if BeautifulSoup
is not installed. | def test_htmlinputter_no_bs4():
"""
This should return an OptionalTableImportError if BeautifulSoup
is not installed.
"""
inputter = html.HTMLInputter()
with pytest.raises(core.OptionalTableImportError):
inputter.process_lines([]) |
Test to ensure that HTMLInputter correctly converts input
into a list of SoupStrings representing table elements. | def test_htmlinputter():
"""
Test to ensure that HTMLInputter correctly converts input
into a list of SoupStrings representing table elements.
"""
f = "data/html.html"
with open(f) as fd:
table = fd.read()
inputter = html.HTMLInputter()
inputter.html = {}
# In absence of table_id, defaults to the first table
expected = [
"<tr><th>Column 1</th><th>Column 2</th><th>Column 3</th></tr>",
"<tr><td>1</td><td>a</td><td>1.05</td></tr>",
"<tr><td>2</td><td>b</td><td>2.75</td></tr>",
"<tr><td>3</td><td>c</td><td>-1.25</td></tr>",
]
assert [str(x) for x in inputter.get_lines(table)] == expected
# Should raise an InconsistentTableError if the table is not found
inputter.html = {"table_id": 4}
with pytest.raises(core.InconsistentTableError):
inputter.get_lines(table)
# Identification by string ID
inputter.html["table_id"] = "second"
expected = [
"<tr><th>Column A</th><th>Column B</th><th>Column C</th></tr>",
"<tr><td>4</td><td>d</td><td>10.5</td></tr>",
"<tr><td>5</td><td>e</td><td>27.5</td></tr>",
"<tr><td>6</td><td>f</td><td>-12.5</td></tr>",
]
assert [str(x) for x in inputter.get_lines(table)] == expected
# Identification by integer index
inputter.html["table_id"] = 3
expected = [
"<tr><th>C1</th><th>C2</th><th>C3</th></tr>",
"<tr><td>7</td><td>g</td><td>105.0</td></tr>",
"<tr><td>8</td><td>h</td><td>275.0</td></tr>",
"<tr><td>9</td><td>i</td><td>-125.0</td></tr>",
]
assert [str(x) for x in inputter.get_lines(table)] == expected |
Test to make sure that HTMLSplitter correctly inputs lines
of type SoupString to return a generator that gives all
header and data elements. | def test_htmlsplitter():
"""
Test to make sure that HTMLSplitter correctly inputs lines
of type SoupString to return a generator that gives all
header and data elements.
"""
splitter = html.HTMLSplitter()
lines = [
html.SoupString(
BeautifulSoup(
"<table><tr><th>Col 1</th><th>Col 2</th></tr></table>", "html.parser"
).tr
),
html.SoupString(
BeautifulSoup(
"<table><tr><td>Data 1</td><td>Data 2</td></tr></table>", "html.parser"
).tr
),
]
expected_data = [["Col 1", "Col 2"], ["Data 1", "Data 2"]]
assert list(splitter(lines)) == expected_data
# Make sure the presence of a non-SoupString triggers a TypeError
lines.append("<tr><td>Data 3</td><td>Data 4</td></tr>")
with pytest.raises(TypeError):
list(splitter(lines))
# Make sure that passing an empty list triggers an error
with pytest.raises(core.InconsistentTableError):
list(splitter([])) |
Test to ensure that the start_line method of HTMLHeader
returns the first line of header data. Uses t/html.html
for sample input. | def test_htmlheader_start():
"""
Test to ensure that the start_line method of HTMLHeader
returns the first line of header data. Uses t/html.html
for sample input.
"""
f = "data/html.html"
with open(f) as fd:
table = fd.read()
inputter = html.HTMLInputter()
inputter.html = {}
header = html.HTMLHeader()
lines = inputter.get_lines(table)
assert (
str(lines[header.start_line(lines)])
== "<tr><th>Column 1</th><th>Column 2</th><th>Column 3</th></tr>"
)
inputter.html["table_id"] = "second"
lines = inputter.get_lines(table)
assert (
str(lines[header.start_line(lines)])
== "<tr><th>Column A</th><th>Column B</th><th>Column C</th></tr>"
)
inputter.html["table_id"] = 3
lines = inputter.get_lines(table)
assert (
str(lines[header.start_line(lines)])
== "<tr><th>C1</th><th>C2</th><th>C3</th></tr>"
)
# start_line should return None if no valid header is found
lines = [
html.SoupString(
BeautifulSoup("<table><tr><td>Data</td></tr></table>", "html.parser").tr
),
html.SoupString(BeautifulSoup("<p>Text</p>", "html.parser").p),
]
assert header.start_line(lines) is None
# Should raise an error if a non-SoupString is present
lines.append("<tr><th>Header</th></tr>")
with pytest.raises(TypeError):
header.start_line(lines) |
Test to ensure that the start_line and end_lines methods
of HTMLData returns the first line of table data. Uses
t/html.html for sample input. | def test_htmldata():
"""
Test to ensure that the start_line and end_lines methods
of HTMLData returns the first line of table data. Uses
t/html.html for sample input.
"""
f = "data/html.html"
with open(f) as fd:
table = fd.read()
inputter = html.HTMLInputter()
inputter.html = {}
data = html.HTMLData()
lines = inputter.get_lines(table)
assert (
str(lines[data.start_line(lines)])
== "<tr><td>1</td><td>a</td><td>1.05</td></tr>"
)
# end_line returns the index of the last data element + 1
assert (
str(lines[data.end_line(lines) - 1])
== "<tr><td>3</td><td>c</td><td>-1.25</td></tr>"
)
inputter.html["table_id"] = "second"
lines = inputter.get_lines(table)
assert (
str(lines[data.start_line(lines)])
== "<tr><td>4</td><td>d</td><td>10.5</td></tr>"
)
assert (
str(lines[data.end_line(lines) - 1])
== "<tr><td>6</td><td>f</td><td>-12.5</td></tr>"
)
inputter.html["table_id"] = 3
lines = inputter.get_lines(table)
assert (
str(lines[data.start_line(lines)])
== "<tr><td>7</td><td>g</td><td>105.0</td></tr>"
)
assert (
str(lines[data.end_line(lines) - 1])
== "<tr><td>9</td><td>i</td><td>-125.0</td></tr>"
)
# start_line should raise an error if no table data exists
lines = [
html.SoupString(BeautifulSoup("<div></div>", "html.parser").div),
html.SoupString(BeautifulSoup("<p>Text</p>", "html.parser").p),
]
with pytest.raises(core.InconsistentTableError):
data.start_line(lines)
# end_line should return None if no table data exists
assert data.end_line(lines) is None
# Should raise an error if a non-SoupString is present
lines.append("<tr><td>Data</td></tr>")
with pytest.raises(TypeError):
data.start_line(lines)
with pytest.raises(TypeError):
data.end_line(lines) |
Test to make sure that the HTML writer writes multidimensional
columns (those with iterable elements) using the colspan
attribute of <th>. | def test_multicolumn_write():
"""
Test to make sure that the HTML writer writes multidimensional
columns (those with iterable elements) using the colspan
attribute of <th>.
"""
col1 = [1, 2, 3]
col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)]
col3 = [("a", "a", "a"), ("b", "b", "b"), ("c", "c", "c")]
table = Table([col1, col2, col3], names=("C1", "C2", "C3"))
expected = """\
<html>
<head>
<meta charset="utf-8"/>
<meta content="text/html;charset=UTF-8" http-equiv="Content-type"/>
</head>
<body>
<table>
<thead>
<tr>
<th>C1</th>
<th colspan="2">C2</th>
<th colspan="3">C3</th>
</tr>
</thead>
<tr>
<td>1</td>
<td>1.0</td>
<td>1.0</td>
<td>a</td>
<td>a</td>
<td>a</td>
</tr>
<tr>
<td>2</td>
<td>2.0</td>
<td>2.0</td>
<td>b</td>
<td>b</td>
<td>b</td>
</tr>
<tr>
<td>3</td>
<td>3.0</td>
<td>3.0</td>
<td>c</td>
<td>c</td>
<td>c</td>
</tr>
</table>
</body>
</html>
"""
out = html.HTML().write(table)[0].strip()
assert out == expected.strip() |
Test to make sure that the HTML writer writes multidimensional
columns (those with iterable elements) using the colspan
attribute of <th>. | def test_multicolumn_write_escape():
"""
Test to make sure that the HTML writer writes multidimensional
columns (those with iterable elements) using the colspan
attribute of <th>.
"""
col1 = [1, 2, 3]
col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)]
col3 = [("<a></a>", "<a></a>", "a"), ("<b></b>", "b", "b"), ("c", "c", "c")]
table = Table([col1, col2, col3], names=("C1", "C2", "C3"))
expected = """\
<html>
<head>
<meta charset="utf-8"/>
<meta content="text/html;charset=UTF-8" http-equiv="Content-type"/>
</head>
<body>
<table>
<thead>
<tr>
<th>C1</th>
<th colspan="2">C2</th>
<th colspan="3">C3</th>
</tr>
</thead>
<tr>
<td>1</td>
<td>1.0</td>
<td>1.0</td>
<td><a></a></td>
<td><a></a></td>
<td>a</td>
</tr>
<tr>
<td>2</td>
<td>2.0</td>
<td>2.0</td>
<td><b></b></td>
<td>b</td>
<td>b</td>
</tr>
<tr>
<td>3</td>
<td>3.0</td>
<td>3.0</td>
<td>c</td>
<td>c</td>
<td>c</td>
</tr>
</table>
</body>
</html>
"""
out = html.HTML(htmldict={"raw_html_cols": "C3"}).write(table)[0].strip()
assert out == expected.strip() |
Test to make sure that the HTML writer will not use
multi-dimensional columns if the multicol parameter
is False. | def test_write_no_multicols():
"""
Test to make sure that the HTML writer will not use
multi-dimensional columns if the multicol parameter
is False.
"""
col1 = [1, 2, 3]
col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)]
col3 = [("a", "a", "a"), ("b", "b", "b"), ("c", "c", "c")]
table = Table([col1, col2, col3], names=("C1", "C2", "C3"))
expected = """\
<html>
<head>
<meta charset="utf-8"/>
<meta content="text/html;charset=UTF-8" http-equiv="Content-type"/>
</head>
<body>
<table>
<thead>
<tr>
<th>C1</th>
<th>C2</th>
<th>C3</th>
</tr>
</thead>
<tr>
<td>1</td>
<td>1.0 .. 1.0</td>
<td>a .. a</td>
</tr>
<tr>
<td>2</td>
<td>2.0 .. 2.0</td>
<td>b .. b</td>
</tr>
<tr>
<td>3</td>
<td>3.0 .. 3.0</td>
<td>c .. c</td>
</tr>
</table>
</body>
</html>
"""
assert html.HTML({"multicol": False}).write(table)[0].strip() == expected.strip() |
Test to make sure that the HTML reader inputs multidimensional
columns (those with iterable elements) using the colspan
attribute of <th>.
Ensure that any string element within a multidimensional column
casts all elements to string prior to type conversion operations. | def test_multicolumn_read():
"""
Test to make sure that the HTML reader inputs multidimensional
columns (those with iterable elements) using the colspan
attribute of <th>.
Ensure that any string element within a multidimensional column
casts all elements to string prior to type conversion operations.
"""
table = Table.read("data/html2.html", format="ascii.html")
str_type = np.dtype((str, 21))
expected = Table(
np.array(
[(["1", "2.5000000000000000001"], 3), (["1a", "1"], 3.5)],
dtype=[("A", str_type, (2,)), ("B", "<f8")],
)
)
assert np.all(table == expected) |
Test that columns can contain raw HTML which is not escaped. | def test_raw_html_write():
"""
Test that columns can contain raw HTML which is not escaped.
"""
t = Table([["<em>x</em>"], ["<em>y</em>"]], names=["a", "b"])
# One column contains raw HTML (string input)
out = StringIO()
t.write(out, format="ascii.html", htmldict={"raw_html_cols": "a"})
expected = """\
<tr>
<td><em>x</em></td>
<td><em>y</em></td>
</tr>"""
assert expected in out.getvalue()
# One column contains raw HTML (list input)
out = StringIO()
t.write(out, format="ascii.html", htmldict={"raw_html_cols": ["a"]})
assert expected in out.getvalue()
# Two columns contains raw HTML (list input)
out = StringIO()
t.write(out, format="ascii.html", htmldict={"raw_html_cols": ["a", "b"]})
expected = """\
<tr>
<td><em>x</em></td>
<td><em>y</em></td>
</tr>"""
assert expected in out.getvalue() |
Test that columns can contain raw HTML which is not escaped. | def test_raw_html_write_clean():
"""
Test that columns can contain raw HTML which is not escaped.
"""
import bleach
t = Table(
[["<script>x</script>"], ["<p>y</p>"], ["<em>y</em>"]], names=["a", "b", "c"]
)
# Confirm that <script> and <p> get escaped but not <em>
out = StringIO()
t.write(out, format="ascii.html", htmldict={"raw_html_cols": t.colnames})
expected = """\
<tr>
<td><script>x</script></td>
<td><p>y</p></td>
<td><em>y</em></td>
</tr>"""
assert expected in out.getvalue()
# Confirm that we can whitelist <p>
out = StringIO()
t.write(
out,
format="ascii.html",
htmldict={
"raw_html_cols": t.colnames,
"raw_html_clean_kwargs": {"tags": list(bleach.ALLOWED_TAGS) + ["p"]},
},
)
expected = """\
<tr>
<td><script>x</script></td>
<td><p>y</p></td>
<td><em>y</em></td>
</tr>"""
assert expected in out.getvalue() |
Test that passing fill_values should replace any matching row | def test_write_table_html_fill_values():
"""
Test that passing fill_values should replace any matching row
"""
buffer_output = StringIO()
t = Table([[1], [2]], names=("a", "b"))
ascii.write(t, buffer_output, fill_values=("1", "Hello world"), format="html")
t_expected = Table([["Hello world"], [2]], names=("a", "b"))
buffer_expected = StringIO()
ascii.write(t_expected, buffer_expected, format="html")
assert buffer_output.getvalue() == buffer_expected.getvalue() |
Test that passing optional column in fill_values should only replace
matching columns | def test_write_table_html_fill_values_optional_columns():
"""
Test that passing optional column in fill_values should only replace
matching columns
"""
buffer_output = StringIO()
t = Table([[1], [1]], names=("a", "b"))
ascii.write(t, buffer_output, fill_values=("1", "Hello world", "b"), format="html")
t_expected = Table([[1], ["Hello world"]], names=("a", "b"))
buffer_expected = StringIO()
ascii.write(t_expected, buffer_expected, format="html")
assert buffer_output.getvalue() == buffer_expected.getvalue() |
Test that passing masked values in fill_values should only replace
masked columns or values | def test_write_table_html_fill_values_masked():
"""
Test that passing masked values in fill_values should only replace
masked columns or values
"""
buffer_output = StringIO()
t = Table([[1], [1]], names=("a", "b"), masked=True, dtype=("i4", "i8"))
t["a"] = np.ma.masked
ascii.write(t, buffer_output, fill_values=(ascii.masked, "TEST"), format="html")
t_expected = Table([["TEST"], [1]], names=("a", "b"))
buffer_expected = StringIO()
ascii.write(t_expected, buffer_expected, format="html")
assert buffer_output.getvalue() == buffer_expected.getvalue() |
Test to make sure that the HTML writer writes multidimensional
columns with correctly replaced fill_values. | def test_multicolumn_table_html_fill_values():
"""
Test to make sure that the HTML writer writes multidimensional
columns with correctly replaced fill_values.
"""
col1 = [1, 2, 3]
col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)]
col3 = [("a", "a", "a"), ("b", "b", "b"), ("c", "c", "c")]
buffer_output = StringIO()
t = Table([col1, col2, col3], names=("C1", "C2", "C3"))
ascii.write(t, buffer_output, fill_values=("a", "z"), format="html")
col1 = [1, 2, 3]
col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)]
col3 = [("z", "z", "z"), ("b", "b", "b"), ("c", "c", "c")]
buffer_expected = StringIO()
t_expected = Table([col1, col2, col3], names=("C1", "C2", "C3"))
ascii.write(t_expected, buffer_expected, format="html")
assert buffer_output.getvalue() == buffer_expected.getvalue() |
Test that passing masked values in fill_values should only replace
masked columns or values for multidimensional tables | def test_multi_column_write_table_html_fill_values_masked():
"""
Test that passing masked values in fill_values should only replace
masked columns or values for multidimensional tables
"""
buffer_output = StringIO()
t = Table([[1, 2, 3, 4], ["--", "a", "--", "b"]], names=("a", "b"), masked=True)
t["a"][0:2] = np.ma.masked
t["b"][0:2] = np.ma.masked
ascii.write(t, buffer_output, fill_values=[(ascii.masked, "MASKED")], format="html")
t_expected = Table(
[["MASKED", "MASKED", 3, 4], ["MASKED", "MASKED", "--", "b"]], names=("a", "b")
)
buffer_expected = StringIO()
ascii.write(t_expected, buffer_expected, format="html")
print(buffer_expected.getvalue())
assert buffer_output.getvalue() == buffer_expected.getvalue() |
Test to make sure that the HTML writer writes out using the
supplied formatting. | def test_write_table_formatted_columns():
"""
Test to make sure that the HTML writer writes out using the
supplied formatting.
"""
col1 = [1, 2]
col2 = [1.234567e-11, -9.876543e11]
formats = {"C1": "04d", "C2": ".2e"}
table = Table([col1, col2], names=formats.keys())
expected = """\
<html>
<head>
<meta charset="utf-8"/>
<meta content="text/html;charset=UTF-8" http-equiv="Content-type"/>
</head>
<body>
<table>
<thead>
<tr>
<th>C1</th>
<th>C2</th>
</tr>
</thead>
<tr>
<td>0001</td>
<td>1.23e-11</td>
</tr>
<tr>
<td>0002</td>
<td>-9.88e+11</td>
</tr>
</table>
</body>
</html>
"""
with StringIO() as sp:
table.write(sp, format="html", formats=formats)
out = sp.getvalue().strip()
assert out == expected.strip() |
Test reading an HTML table with unicode values | def test_read_html_unicode():
"""
Test reading an HTML table with unicode values
"""
table_in = [
"<table>",
"<tr><td>Δ</td></tr>",
"<tr><td>Δ</td></tr>",
"</table>",
]
dat = Table.read(table_in, format="ascii.html")
assert np.all(dat["col1"] == ["Δ", "Δ"]) |
Test a (non-string) fill value.
Even for an unmasked tables, the fill_value should show up in the
table header. | def test_out_with_nonstring_null():
"""Test a (non-string) fill value.
Even for an unmasked tables, the fill_value should show up in the
table header.
"""
table = Table([[3]], masked=True)
out = StringIO()
ascii.write(table, out, format="ipac", fill_values=[(masked, -99999)])
expected_out = """\
| col0|
| long|
| |
|-99999|
3
"""
assert out.getvalue().strip().splitlines() == expected_out.splitlines() |
Make every non-comment line lower case. | def lowercase_header(value):
"""Make every non-comment line lower case."""
lines = []
for line in value.splitlines():
if not line.startswith("!"):
line = line.lower()
lines.append(line)
return "\n".join(lines) |
Pytest fixture to run a test case with tilde-prefixed paths.
In the tilde-path case, environment variables are temporarily
modified so that '~' resolves to the data directory. | def home_is_data(monkeypatch, request):
"""
Pytest fixture to run a test case with tilde-prefixed paths.
In the tilde-path case, environment variables are temporarily
modified so that '~' resolves to the data directory.
"""
path = get_pkg_data_path("data")
# For Unix
monkeypatch.setenv("HOME", path)
# For Windows
monkeypatch.setenv("USERPROFILE", path) |
Test reading an extremely large integer, which falls through to
string due to an overflow error (#2234). The C parsers used to
return inf (kind 'f') for this. | def test_convert_overflow(fast_reader):
"""
Test reading an extremely large integer, which falls through to
string due to an overflow error (#2234). The C parsers used to
return inf (kind 'f') for this.
"""
expected_kind = "U"
with pytest.warns(
AstropyWarning, match="OverflowError converting to IntType in column a"
):
dat = ascii.read(
["a", "1" * 10000], format="basic", fast_reader=fast_reader, guess=False
)
assert dat["a"].dtype.kind == expected_kind |
Exact example from #9701: When using ascii.read with both the names and
converters arguments, the converters dictionary ignores the user-supplied
names and requires that you know the guessed names. | def test_read_specify_converters_with_names():
"""
Exact example from #9701: When using ascii.read with both the names and
converters arguments, the converters dictionary ignores the user-supplied
names and requires that you know the guessed names.
"""
csv_text = ["a,b,c", "1,2,3", "4,5,6"]
names = ["A", "B", "C"]
converters = {
"A": [ascii.convert_numpy(float)],
"B": [ascii.convert_numpy(int)],
"C": [ascii.convert_numpy(str)],
}
t = ascii.read(csv_text, format="csv", names=names, converters=converters)
assert t["A"].dtype.kind == "f"
assert t["B"].dtype.kind == "i"
assert t["C"].dtype.kind == "U" |
Make sure reading a table with guess=True gives the expected result when
the names arg is specified. | def test_guess_with_names_arg():
"""
Make sure reading a table with guess=True gives the expected result when
the names arg is specified.
"""
# This is a NoHeader format table and so `names` should replace
# the default col0, col1 names. It fails as a Basic format
# table when guessing because the column names would be '1', '2'.
dat = ascii.read(["1,2", "3,4"], names=("a", "b"))
assert len(dat) == 2
assert dat.colnames == ["a", "b"]
# This is a Basic format table and the first row
# gives the column names 'c', 'd', which get replaced by 'a', 'b'
dat = ascii.read(["c,d", "3,4"], names=("a", "b"))
assert len(dat) == 1
assert dat.colnames == ["a", "b"]
# This is also a Basic format table and the first row
# gives the column names 'c', 'd', which get replaced by 'a', 'b'
dat = ascii.read(["c d", "e f"], names=("a", "b"))
assert len(dat) == 1
assert dat.colnames == ["a", "b"] |
When the format is explicitly given then disable the strict column name checking in
guessing. | def test_guess_with_format_arg():
"""
When the format is explicitly given then disable the strict column name checking in
guessing.
"""
dat = ascii.read(["1,2", "3,4"], format="basic")
assert len(dat) == 1
assert dat.colnames == ["1", "2"]
dat = ascii.read(["1,2", "3,4"], names=("a", "b"), format="basic")
assert len(dat) == 1
assert dat.colnames == ["a", "b"]
# For good measure check the same in the unified I/O interface
dat = Table.read(["1,2", "3,4"], format="ascii.basic")
assert len(dat) == 1
assert dat.colnames == ["1", "2"]
dat = Table.read(["1,2", "3,4"], format="ascii.basic", names=("a", "b"))
assert len(dat) == 1
assert dat.colnames == ["a", "b"] |
When the delimiter is explicitly given then do not try others in guessing. | def test_guess_with_delimiter_arg():
"""
When the delimiter is explicitly given then do not try others in guessing.
"""
fields = ["10.1E+19", "3.14", "2048", "-23"]
values = [1.01e20, 3.14, 2048, -23]
# Default guess should recognise CSV with optional spaces
t0 = ascii.read(asciiIO(", ".join(fields)), guess=True)
for n, v in zip(t0.colnames, values):
assert t0[n][0] == v
# Forcing space as delimiter produces type str columns ('10.1E+19,')
t1 = ascii.read(asciiIO(", ".join(fields)), guess=True, delimiter=" ")
for n, v in zip(t1.colnames[:-1], fields[:-1]):
assert t1[n][0] == v + "," |
Test that a bad value of `names` raises an exception. | def test_read_with_names_arg(fast_reader):
"""
Test that a bad value of `names` raises an exception.
"""
# CParser only uses columns in `names` and thus reports mismatch in num_col
with pytest.raises(ascii.InconsistentTableError):
ascii.read(["c d", "e f"], names=("a",), guess=False, fast_reader=fast_reader) |
Test that INDEF is correctly interpreted as a missing value | def test_daophot_indef():
"""Test that INDEF is correctly interpreted as a missing value"""
table = ascii.read("data/daophot2.dat", format="daophot")
for col in table.itercols():
# Four columns have all INDEF values and are masked, rest are normal Column
if col.name in ("OTIME", "MAG", "MERR", "XAIRMASS"):
assert np.all(col.mask)
else:
assert not hasattr(col, "mask") |
Test specific data types which are different from what would be
inferred automatically based only data values. DAOphot reader uses
the header information to assign types. | def test_daophot_types():
"""
Test specific data types which are different from what would be
inferred automatically based only data values. DAOphot reader uses
the header information to assign types.
"""
table = ascii.read("data/daophot2.dat", format="daophot")
assert table["LID"].dtype.char in "fd" # float or double
assert table["MAG"].dtype.char in "fd" # even without any data values
assert (
table["PIER"].dtype.char in "US"
) # string (data values are consistent with int)
assert table["ID"].dtype.kind == "i" |
compare array column by column with expectation | def check_fill_values(data):
"""compare array column by column with expectation"""
assert not hasattr(data["a"], "mask")
assert_true((data["a"] == ["1", "a"]).all())
assert_true((data["b"].mask == [False, True]).all())
# Check that masked value is "do not care" in comparison
assert_true((data["b"] == [2, -999]).all())
data["b"].mask = False # explicitly unmask for comparison
assert_true((data["b"] == [2, 1]).all()) |
Read a file using guess with one of the typical guess_kwargs explicitly set. | def test_set_guess_kwarg():
"""Read a file using guess with one of the typical guess_kwargs explicitly set."""
data = ascii.read("data/space_delim_no_header.dat", delimiter=",", guess=True)
assert data.dtype.names == ("1 3.4 hello",)
assert len(data) == 1 |
Read RDB data with inconsistent data type (except failure) | def test_read_rdb_wrong_type(fast_reader):
"""Read RDB data with inconsistent data type (except failure)"""
table = """col1\tcol2
N\tN
1\tHello"""
with pytest.raises(ValueError):
ascii.read(table, format="rdb", fast_reader=fast_reader) |
Read a table with empty values and ensure that corresponding entries are masked | def test_default_missing(fast_reader):
"""
Read a table with empty values and ensure that corresponding entries are masked
"""
table = "\n".join( # noqa: FLY002
[
"a,b,c,d",
"1,3,,",
"2, , 4.0 , ss ",
]
)
dat = ascii.read(table, fast_reader=fast_reader)
assert dat.masked is False
assert dat.pformat() == [
" a b c d ",
"--- --- --- ---",
" 1 3 -- --",
" 2 -- 4.0 ss",
]
# Single row table with a single missing element
table = """ a \n "" """
dat = ascii.read(table, fast_reader=fast_reader)
assert dat.pformat() == [" a ", "---", " --"]
assert dat["a"].dtype.kind == "i"
# Same test with a fixed width reader
table = "\n".join( # noqa: FLY002
[
" a b c d ",
"--- --- --- ---",
" 1 3 ",
" 2 4.0 ss",
]
)
dat = ascii.read(table, format="fixed_width_two_line")
assert dat.masked is False
assert dat.pformat() == [
" a b c d ",
"--- --- --- ---",
" 1 3 -- --",
" 2 -- 4.0 ss",
]
dat = ascii.read(table, format="fixed_width_two_line", fill_values=None)
assert dat.masked is False
assert dat.pformat() == [
" a b c d ",
"--- --- --- ---",
" 1 3 ",
" 2 4.0 ss",
]
dat = ascii.read(table, format="fixed_width_two_line", fill_values=[])
assert dat.masked is False
assert dat.pformat() == [
" a b c d ",
"--- --- --- ---",
" 1 3 ",
" 2 4.0 ss",
] |
Set up information about the columns, number of rows, and reader params to
read a bunch of test files and verify columns and number of rows. | def get_testfiles(name=None):
"""Set up information about the columns, number of rows, and reader params to
read a bunch of test files and verify columns and number of rows."""
testfiles = [
{
"cols": ("agasc_id", "n_noids", "n_obs"),
"name": "data/apostrophe.rdb",
"nrows": 2,
"opts": {"format": "rdb"},
},
{
"cols": ("agasc_id", "n_noids", "n_obs"),
"name": "data/apostrophe.tab",
"nrows": 2,
"opts": {"format": "tab"},
},
{
"cols": (
"Index",
"RAh",
"RAm",
"RAs",
"DE-",
"DEd",
"DEm",
"DEs",
"Match",
"Class",
"AK",
"Fit",
),
"name": "data/cds.dat",
"nrows": 1,
"opts": {"format": "cds"},
},
{
"cols": (
"Index",
"RAh",
"RAm",
"RAs",
"DE-",
"DEd",
"DEm",
"DEs",
"Match",
"Class",
"AK",
"Fit",
),
"name": "data/cds.dat",
"nrows": 1,
"opts": {"format": "mrt"},
},
# Test malformed CDS file (issues #2241 #467)
{
"cols": (
"Index",
"RAh",
"RAm",
"RAs",
"DE-",
"DEd",
"DEm",
"DEs",
"Match",
"Class",
"AK",
"Fit",
),
"name": "data/cds_malformed.dat",
"nrows": 1,
"opts": {"format": "cds", "data_start": "guess"},
},
{
"cols": ("a", "b", "c"),
"name": "data/commented_header.dat",
"nrows": 2,
"opts": {"format": "commented_header"},
},
{
"cols": ("a", "b", "c"),
"name": "data/commented_header2.dat",
"nrows": 2,
"opts": {"format": "commented_header", "header_start": -1},
},
{
"cols": ("col1", "col2", "col3", "col4", "col5"),
"name": "data/continuation.dat",
"nrows": 2,
"opts": {
"inputter_cls": ascii.ContinuationLinesInputter,
"format": "no_header",
},
},
{
"cols": (
"ID",
"XCENTER",
"YCENTER",
"MAG",
"MERR",
"MSKY",
"NITER",
"SHARPNESS",
"CHI",
"PIER",
"PERROR",
),
"name": "data/daophot.dat",
"nrows": 2,
"opts": {"format": "daophot"},
},
{
"cols": (
"NUMBER",
"FLUX_ISO",
"FLUXERR_ISO",
"VALU-ES",
"VALU-ES_1",
"FLAG",
),
"name": "data/sextractor.dat",
"nrows": 3,
"opts": {"format": "sextractor"},
},
{
"cols": ("ra", "dec", "sai", "v2", "sptype"),
"name": "data/ipac.dat",
"nrows": 2,
"opts": {"format": "ipac"},
},
{
"cols": (
"col0",
"objID",
"osrcid",
"xsrcid",
"SpecObjID",
"ra",
"dec",
"obsid",
"ccdid",
"z",
"modelMag_i",
"modelMagErr_i",
"modelMag_r",
"modelMagErr_r",
"expo",
"theta",
"rad_ecf_39",
"detlim90",
"fBlim90",
),
"name": "data/nls1_stackinfo.dbout",
"nrows": 58,
"opts": {"data_start": 2, "delimiter": "|", "guess": False},
},
{
"cols": (
"Index",
"RAh",
"RAm",
"RAs",
"DE-",
"DEd",
"DEm",
"DEs",
"Match",
"Class",
"AK",
"Fit",
),
"name": "data/no_data_cds.dat",
"nrows": 0,
"opts": {"format": "cds"},
},
{
"cols": (
"Index",
"RAh",
"RAm",
"RAs",
"DE-",
"DEd",
"DEm",
"DEs",
"Match",
"Class",
"AK",
"Fit",
),
"name": "data/no_data_cds.dat",
"nrows": 0,
"opts": {"format": "mrt"},
},
{
"cols": (
"ID",
"XCENTER",
"YCENTER",
"MAG",
"MERR",
"MSKY",
"NITER",
"SHARPNESS",
"CHI",
"PIER",
"PERROR",
),
"name": "data/no_data_daophot.dat",
"nrows": 0,
"opts": {"format": "daophot"},
},
{
"cols": ("NUMBER", "FLUX_ISO", "FLUXERR_ISO", "VALUES", "VALUES_1", "FLAG"),
"name": "data/no_data_sextractor.dat",
"nrows": 0,
"opts": {"format": "sextractor"},
},
{
"cols": ("ra", "dec", "sai", "v2", "sptype"),
"name": "data/no_data_ipac.dat",
"nrows": 0,
"opts": {"format": "ipac"},
},
{
"cols": ("ra", "v2"),
"name": "data/ipac.dat",
"nrows": 2,
"opts": {"format": "ipac", "include_names": ["ra", "v2"]},
},
{
"cols": ("a", "b", "c"),
"name": "data/no_data_with_header.dat",
"nrows": 0,
"opts": {},
},
{
"cols": ("agasc_id", "n_noids", "n_obs"),
"name": "data/short.rdb",
"nrows": 7,
"opts": {"format": "rdb"},
},
{
"cols": ("agasc_id", "n_noids", "n_obs"),
"name": "data/short.tab",
"nrows": 7,
"opts": {"format": "tab"},
},
{
"cols": ("test 1a", "test2", "test3", "test4"),
"name": "data/simple.txt",
"nrows": 2,
"opts": {"quotechar": "'"},
},
{
"cols": ("top1", "top2", "top3", "top4"),
"name": "data/simple.txt",
"nrows": 1,
"opts": {"quotechar": "'", "header_start": 1, "data_start": 2},
},
{
"cols": ("top1", "top2", "top3", "top4"),
"name": "data/simple.txt",
"nrows": 1,
"opts": {"quotechar": "'", "header_start": 1},
},
{
"cols": ("top1", "top2", "top3", "top4"),
"name": "data/simple.txt",
"nrows": 2,
"opts": {"quotechar": "'", "header_start": 1, "data_start": 1},
},
{
"cols": ("obsid", "redshift", "X", "Y", "object", "rad"),
"name": "data/simple2.txt",
"nrows": 3,
"opts": {"delimiter": "|"},
},
{
"cols": ("obsid", "redshift", "X", "Y", "object", "rad"),
"name": "data/simple3.txt",
"nrows": 2,
"opts": {"delimiter": "|"},
},
{
"cols": ("col1", "col2", "col3", "col4", "col5", "col6"),
"name": "data/simple4.txt",
"nrows": 3,
"opts": {"format": "no_header", "delimiter": "|"},
},
{
"cols": ("col1", "col2", "col3"),
"name": "data/space_delim_no_header.dat",
"nrows": 2,
"opts": {"format": "no_header"},
},
{
"cols": ("col1", "col2", "col3"),
"name": "data/space_delim_no_header.dat",
"nrows": 2,
"opts": {"format": "no_header", "header_start": None},
},
{
"cols": ("obsid", "offset", "x", "y", "name", "oaa"),
"name": "data/space_delim_blank_lines.txt",
"nrows": 3,
"opts": {},
},
{
"cols": ("zabs1.nh", "p1.gamma", "p1.ampl", "statname", "statval"),
"name": "data/test4.dat",
"nrows": 9,
"opts": {},
},
{
"cols": ("a", "b", "c"),
"name": "data/fill_values.txt",
"nrows": 2,
"opts": {"delimiter": ","},
},
{
"name": "data/whitespace.dat",
"cols": ("quoted colname with tab\tinside", "col2", "col3"),
"nrows": 2,
"opts": {"delimiter": r"\s"},
},
{
"name": "data/simple_csv.csv",
"cols": ("a", "b", "c"),
"nrows": 2,
"opts": {"format": "csv"},
},
{
"name": "data/simple_csv_missing.csv",
"cols": ("a", "b", "c"),
"nrows": 2,
"skip": True,
"opts": {"format": "csv"},
},
{
"cols": ("cola", "colb", "colc"),
"name": "data/latex1.tex",
"nrows": 2,
"opts": {"format": "latex"},
},
{
"cols": ("Facility", "Id", "exposure", "date"),
"name": "data/latex2.tex",
"nrows": 3,
"opts": {"format": "aastex"},
},
{
"cols": ("cola", "colb", "colc"),
"name": "data/latex3.tex",
"nrows": 2,
"opts": {"format": "latex"},
},
{
"cols": ("Col1", "Col2", "Col3", "Col4"),
"name": "data/fixed_width_2_line.txt",
"nrows": 2,
"opts": {"format": "fixed_width_two_line"},
},
]
try:
import bs4 # noqa: F401
testfiles.append(
{
"cols": ("Column 1", "Column 2", "Column 3"),
"name": "data/html.html",
"nrows": 3,
"opts": {"format": "html"},
}
)
except ImportError:
pass
if name is not None:
# If there are multiple matches then return a list, else return just
# the one match.
out = [x for x in testfiles if x["name"] == name]
if len(out) == 1:
out = out[0]
else:
out = testfiles
return out |
Check certain Readers throw an exception if ``header_start`` is set
For certain Readers it does not make sense to set the ``header_start``, they
throw an exception if you try.
This was implemented in response to issue #885. | def test_header_start_exception():
"""Check certain Readers throw an exception if ``header_start`` is set
For certain Readers it does not make sense to set the ``header_start``, they
throw an exception if you try.
This was implemented in response to issue #885.
"""
for readerclass in [
ascii.NoHeader,
ascii.SExtractor,
ascii.Ipac,
ascii.BaseReader,
ascii.FixedWidthNoHeader,
ascii.Cds,
ascii.Mrt,
ascii.Daophot,
]:
with pytest.raises(ValueError):
ascii.core._get_reader(readerclass, header_start=5) |
Check for a regression introduced by #1935. Pseudo-CSV file with
commented header line. | def test_csv_table_read():
"""
Check for a regression introduced by #1935. Pseudo-CSV file with
commented header line.
"""
lines = ["# a, b", "1, 2", "3, 4"]
t = ascii.read(lines)
assert t.colnames == ["a", "b"] |
Check that the names argument list can overlap with the existing column names.
This tests the issue in #1991. | def test_overlapping_names(fast_reader):
"""
Check that the names argument list can overlap with the existing column names.
This tests the issue in #1991.
"""
t = ascii.read(["a b", "1 2"], names=["b", "a"], fast_reader=fast_reader)
assert t.colnames == ["b", "a"] |
Make sure that the SExtractor reader correctly inputs descriptions and units. | def test_sextractor_units():
"""
Make sure that the SExtractor reader correctly inputs descriptions and units.
"""
table = ascii.read("data/sextractor2.dat", format="sextractor", guess=False)
expected_units = [
None,
Unit("pix"),
Unit("pix"),
Unit("mag"),
Unit("mag"),
None,
Unit("pix**2"),
Unit("m**(-6)"),
Unit("mag * arcsec**(-2)"),
]
expected_descrs = [
"Running object number",
"Windowed position estimate along x",
"Windowed position estimate along y",
"Kron-like elliptical aperture magnitude",
"RMS error for AUTO magnitude",
"Extraction flags",
None,
"Barycenter position along MAMA x axis",
"Peak surface brightness above background",
]
for i, colname in enumerate(table.colnames):
assert table[colname].unit == expected_units[i]
assert table[colname].description == expected_descrs[i] |
Make sure that the SExtractor reader handles the last column correctly when it is array-like. | def test_sextractor_last_column_array():
"""
Make sure that the SExtractor reader handles the last column correctly when it is array-like.
"""
table = ascii.read("data/sextractor3.dat", format="sextractor", guess=False)
expected_columns = [
"X_IMAGE",
"Y_IMAGE",
"ALPHA_J2000",
"DELTA_J2000",
"MAG_AUTO",
"MAGERR_AUTO",
"MAG_APER",
"MAG_APER_1",
"MAG_APER_2",
"MAG_APER_3",
"MAG_APER_4",
"MAG_APER_5",
"MAG_APER_6",
"MAGERR_APER",
"MAGERR_APER_1",
"MAGERR_APER_2",
"MAGERR_APER_3",
"MAGERR_APER_4",
"MAGERR_APER_5",
"MAGERR_APER_6",
]
expected_units = [
Unit("pix"),
Unit("pix"),
Unit("deg"),
Unit("deg"),
Unit("mag"),
Unit("mag"),
Unit("mag"),
Unit("mag"),
Unit("mag"),
Unit("mag"),
Unit("mag"),
Unit("mag"),
Unit("mag"),
Unit("mag"),
Unit("mag"),
Unit("mag"),
Unit("mag"),
Unit("mag"),
Unit("mag"),
Unit("mag"),
]
expected_descrs = (
[
"Object position along x",
None,
"Right ascension of barycenter (J2000)",
"Declination of barycenter (J2000)",
"Kron-like elliptical aperture magnitude",
"RMS error for AUTO magnitude",
]
+ ["Fixed aperture magnitude vector"] * 7
+ ["RMS error vector for fixed aperture mag."] * 7
)
for i, colname in enumerate(table.colnames):
assert table[colname].name == expected_columns[i]
assert table[colname].unit == expected_units[i]
assert table[colname].description == expected_descrs[i] |
Check that lists of strings where some strings consist of just a newline
("
") are parsed correctly.
| def test_list_with_newlines():
"""
Check that lists of strings where some strings consist of just a newline
("\n") are parsed correctly.
"""
t = ascii.read(["abc", "123\n", "456\n", "\n", "\n"])
assert t.colnames == ["abc"]
assert len(t) == 2
assert t[0][0] == 123
assert t[1][0] == 456 |
Check that Csv reader does not have ignore lines with the # comment
character which is defined for most Basic readers. | def test_commented_csv():
"""
Check that Csv reader does not have ignore lines with the # comment
character which is defined for most Basic readers.
"""
t = ascii.read(["#a,b", "1,2", "#3,4"], format="csv")
assert t.colnames == ["#a", "b"]
assert len(t) == 2
assert t["#a"][1] == "#3" |
Make sure that line comments are included in the ``meta`` attribute
of the output Table. | def test_meta_comments():
"""
Make sure that line comments are included in the ``meta`` attribute
of the output Table.
"""
t = ascii.read(["#comment1", "# comment2 \t", "a,b,c", "1,2,3"])
assert t.colnames == ["a", "b", "c"]
assert t.meta["comments"] == ["comment1", "comment2"] |
Check the error message when guess fails | def test_guess_fail():
"""
Check the error message when guess fails
"""
with pytest.raises(ascii.InconsistentTableError) as err:
ascii.read("asfdasdf\n1 2 3", format="basic")
assert "** To figure out why the table did not read, use guess=False and" in str(
err.value
)
# Test the case with guessing enabled but for a format that has no free params
with pytest.raises(ValueError) as err:
ascii.read("asfdasdf\n1 2 3", format="ipac")
assert (
"At least one header line beginning and ending with delimiter required"
in str(err.value)
)
# Test the case with guessing enabled but with all params specified
with pytest.raises(ValueError) as err:
ascii.read(
"asfdasdf\n1 2 3",
format="basic",
quotechar='"',
delimiter=" ",
fast_reader=False,
)
assert "Number of header columns (1) inconsistent with data columns (3)" in str(
err.value
) |
Test guessing a file object. Fixes #3013 and similar issue noted in #3019. | def test_guessing_file_object():
"""
Test guessing a file object. Fixes #3013 and similar issue noted in #3019.
"""
with open("data/ipac.dat.bz2", "rb") as fd:
t = ascii.read(fd)
assert t.colnames == ["ra", "dec", "sai", "v2", "sptype"] |
Check that the screen output of ``print tab`` can be read. See #3025. | def test_pformat_roundtrip():
"""Check that the screen output of ``print tab`` can be read. See #3025."""
"""Read a table with empty values and ensure that corresponding entries are masked"""
table = "\n".join( # noqa: FLY002
[
"a,b,c,d",
"1,3,1.11,1",
"2, 2, 4.0 , ss ",
]
)
dat = ascii.read(table)
out = ascii.read(dat.pformat())
assert len(dat) == len(out)
assert dat.colnames == out.colnames
for c in dat.colnames:
assert np.all(dat[c] == out[c]) |
Regression test for #3319.
This tables looks so close to a daophot table, that the daophot reader gets
quite far before it fails with an AttributeError.
Note that this table will actually be read as Commented Header table with
the columns ['some', 'header', 'info']. | def test_almost_but_not_quite_daophot():
"""Regression test for #3319.
This tables looks so close to a daophot table, that the daophot reader gets
quite far before it fails with an AttributeError.
Note that this table will actually be read as Commented Header table with
the columns ['some', 'header', 'info'].
"""
lines = [
"# some header info",
"#F header info beginning with 'F'",
"1 2 3",
"4 5 6",
"7 8 9",
]
dat = ascii.read(lines)
assert len(dat) == 3 |
Test that comments in commented_header are as expected with header_start
at different positions, and that the table round-trips. | def test_commented_header_comments(fast):
"""
Test that comments in commented_header are as expected with header_start
at different positions, and that the table round-trips.
"""
comments = ["comment 1", "comment 2", "comment 3"]
lines = ["# a b", "# comment 1", "# comment 2", "# comment 3", "1 2", "3 4"]
dat = ascii.read(lines, format="commented_header", fast_reader=fast)
assert dat.meta["comments"] == comments
assert dat.colnames == ["a", "b"]
out = StringIO()
ascii.write(dat, out, format="commented_header", fast_writer=fast)
assert out.getvalue().splitlines() == lines
lines.insert(1, lines.pop(0))
dat = ascii.read(lines, format="commented_header", header_start=1, fast_reader=fast)
assert dat.meta["comments"] == comments
assert dat.colnames == ["a", "b"]
lines.insert(2, lines.pop(1))
dat = ascii.read(lines, format="commented_header", header_start=2, fast_reader=fast)
assert dat.meta["comments"] == comments
assert dat.colnames == ["a", "b"]
dat = ascii.read(
lines, format="commented_header", header_start=-2, fast_reader=fast
)
assert dat.meta["comments"] == comments
assert dat.colnames == ["a", "b"]
lines.insert(3, lines.pop(2))
dat = ascii.read(
lines, format="commented_header", header_start=-1, fast_reader=fast
)
assert dat.meta["comments"] == comments
assert dat.colnames == ["a", "b"]
lines = ["# a b", "1 2", "3 4"]
dat = ascii.read(lines, format="commented_header", fast_reader=fast)
assert "comments" not in dat.meta
assert dat.colnames == ["a", "b"] |
Test the routine for guessing if a table input to ascii.read is probably HTML | def test_probably_html(home_is_data):
"""
Test the routine for guessing if a table input to ascii.read is probably HTML
"""
for tabl0 in (
"data/html.html",
"~/html.html",
"http://blah.com/table.html",
"https://blah.com/table.html",
"file://blah/table.htm",
"ftp://blah.com/table.html",
"file://blah.com/table.htm",
" <! doctype html > hello world",
"junk < table baz> <tr foo > <td bar> </td> </tr> </table> junk",
[
"junk < table baz>",
" <tr foo >",
" <td bar> ",
"</td> </tr>",
"</table> junk",
],
(" <! doctype html > ", " hello world"),
):
assert _probably_html(tabl0) is True
for tabl0 in (
"data/html.htms",
"Xhttp://blah.com/table.html",
" https://blah.com/table.htm",
"fole://blah/table.htm",
" < doctype html > hello world",
"junk < tble baz> <tr foo > <td bar> </td> </tr> </table> junk",
[
"junk < table baz>",
" <t foo >",
" <td bar> ",
"</td> </tr>",
"</table> junk",
],
(" <! doctype htm > ", " hello world"),
[[1, 2, 3]],
):
assert _probably_html(tabl0) is False |
Test that an input file which is completely empty fails in the expected way.
Test that an input file with one line but no newline succeeds. | def test_table_with_no_newline():
"""
Test that an input file which is completely empty fails in the expected way.
Test that an input file with one line but no newline succeeds.
"""
# With guessing
table = BytesIO()
with pytest.raises(ascii.InconsistentTableError):
ascii.read(table)
# Without guessing
table = BytesIO()
with pytest.raises(ValueError) as err:
ascii.read(table, guess=False, fast_reader=False, format="basic")
assert "No header line found" in str(err.value)
table = BytesIO()
t = ascii.read(table, guess=False, fast_reader=True, format="fast_basic")
assert not t and t.as_array().size == 0
# Put a single line of column names but with no newline
for kwargs in [
{},
{"guess": False, "fast_reader": False, "format": "basic"},
{"guess": False, "fast_reader": True, "format": "fast_basic"},
]:
table = BytesIO()
table.write(b"a b")
t = ascii.read(table, **kwargs)
assert t.colnames == ["a", "b"]
assert len(t) == 0 |
Test that context information (upstream exception message) from column
conversion error is provided. | def test_column_conversion_error():
"""
Test that context information (upstream exception message) from column
conversion error is provided.
"""
ipac = """\
| col0 |
| double |
1 2
"""
with pytest.raises(ValueError) as err:
ascii.read(ipac, guess=False, format="ipac")
assert "Column col0 failed to convert:" in str(err.value)
with pytest.raises(ValueError) as err:
ascii.read(["a b", "1 2"], guess=False, format="basic", converters={"a": []})
assert "no converters" in str(err.value) |
Test code that forces "C" locale while calling fast reader (#4364) | def test_non_C_locale_with_fast_reader():
"""Test code that forces "C" locale while calling fast reader (#4364)"""
current = locale.setlocale(locale.LC_ALL)
try:
if platform.system() == "Darwin":
locale.setlocale(locale.LC_ALL, "fr_FR")
else:
locale.setlocale(locale.LC_ALL, "fr_FR.utf8")
for fast_reader in (
True,
False,
{"use_fast_converter": False},
{"use_fast_converter": True},
):
t = ascii.read(
["a b", "1.5 2"], format="basic", guess=False, fast_reader=fast_reader
)
assert t["a"].dtype.kind == "f"
except locale.Error as e:
pytest.skip(f"Locale error: {e}")
finally:
locale.setlocale(locale.LC_ALL, current) |
Test that a char column of a Table is assigned no unit and not
a dimensionless unit. | def test_no_units_for_char_columns():
"""Test that a char column of a Table is assigned no unit and not
a dimensionless unit."""
t1 = Table([["A"]], names="B")
out = StringIO()
ascii.write(t1, out, format="ipac")
t2 = ascii.read(out.getvalue(), format="ipac", guess=False)
assert t2["B"].unit is None |
Regression test for #5336, #5338. | def test_initial_column_fill_values():
"""Regression test for #5336, #5338."""
class TestHeader(ascii.BasicHeader):
def _set_cols_from_names(self):
self.cols = [ascii.Column(name=x) for x in self.names]
# Set some initial fill values
for col in self.cols:
col.fill_values = {"--": "0"}
class Tester(ascii.Basic):
header_class = TestHeader
reader = ascii.get_reader(reader_cls=Tester)
assert (
reader.read(
"""# Column definition is the first uncommented line
# Default delimiter is the space character.
a b c
# Data starts after the header column definition, blank lines ignored
-- 2 3
4 5 6 """
)["a"][0]
is np.ma.masked
) |
Test that latex/aastex file with no trailing backslash can be read. | def test_latex_no_trailing_backslash():
"""
Test that latex/aastex file with no trailing backslash can be read.
"""
lines = r"""
\begin{table}
\begin{tabular}{ccc}
a & b & c \\
1 & 1.0 & c \\ % comment
3\% & 3.0 & e % comment
\end{tabular}
\end{table}
"""
dat = ascii.read(lines, format="latex")
assert dat.colnames == ["a", "b", "c"]
assert np.all(dat["a"] == ["1", r"3\%"])
assert np.all(dat["c"] == ["c", "e"]) |
Test chunked reading for different input types: file path, file object,
and string input. | def test_read_chunks_input_types():
"""
Test chunked reading for different input types: file path, file object,
and string input.
"""
fpath = "data/test5.dat"
t1 = ascii.read(
fpath,
header_start=1,
data_start=3,
)
with open(fpath) as fd1, open(fpath) as fd2:
for fp in (fpath, fd1, fd2.read()):
t_gen = ascii.read(
fp,
header_start=1,
data_start=3,
guess=False,
format="fast_basic",
fast_reader={"chunk_size": 400, "chunk_generator": True},
)
ts = list(t_gen)
for t in ts:
for col, col1 in zip(t.columns.values(), t1.columns.values()):
assert col.name == col1.name
assert col.dtype.kind == col1.dtype.kind
assert len(ts) == 4
t2 = table.vstack(ts)
assert np.all(t1 == t2)
with open(fpath) as fd1, open(fpath) as fd2:
for fp in (fpath, fd1, fd2.read()):
# Now read the full table in chunks
t3 = ascii.read(
fp, header_start=1, data_start=3, fast_reader={"chunk_size": 300}
)
assert np.all(t1 == t3) |
Subsets and Splits