response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Test grouping a table with mixin columns
def test_group_mixins(): """ Test grouping a table with mixin columns """ # Setup mixins idx = np.arange(4) x = np.array([3.0, 1.0, 2.0, 1.0]) q = x * u.m lon = coordinates.Longitude(x * u.deg) lat = coordinates.Latitude(x * u.deg) # For Time do J2000.0 + few * 0.1 ns (this requires > 64 bit precision) tm = time.Time(2000, format="jyear") + time.TimeDelta(x * 1e-10, format="sec") sc = coordinates.SkyCoord(ra=lon, dec=lat) aw = table_helpers.ArrayWrapper(x) nd = np.array([(3, "c"), (1, "a"), (2, "b"), (1, "a")], dtype="<i4,|S1").view( NdarrayMixin ) qt = QTable( [idx, x, q, lon, lat, tm, sc, aw, nd], names=["idx", "x", "q", "lon", "lat", "tm", "sc", "aw", "nd"], ) # Test group_by with each supported mixin type mixin_keys = ["x", "q", "lon", "lat", "tm", "sc", "aw", "nd"] for key in mixin_keys: qtg = qt.group_by(key) # Test that it got the sort order correct assert np.all(qtg["idx"] == [1, 3, 2, 0]) # Test that the groups are right # Note: skip testing SkyCoord column because that doesn't have equality for name in ["x", "q", "lon", "lat", "tm", "aw", "nd"]: assert np.all(qt[name][[1, 3]] == qtg.groups[0][name]) assert np.all(qt[name][[2]] == qtg.groups[1][name]) assert np.all(qt[name][[0]] == qtg.groups[2][name]) # Test that unique also works with mixins since most of the work is # done with group_by(). This is using *every* mixin as key. uqt = unique(qt, keys=mixin_keys) assert len(uqt) == 3 assert np.all(uqt["idx"] == [1, 2, 0]) assert np.all(uqt["x"] == [1.0, 2.0, 3.0]) # Column group_by() with mixins idxg = qt["idx"].group_by(qt[mixin_keys]) assert np.all(idxg == [1, 3, 2, 0])
Test that aggregating unsupported mixins produces a warning only
def test_group_mixins_unsupported(col): """Test that aggregating unsupported mixins produces a warning only""" t = Table([[1, 1], [3, 4], col], names=["a", "b", "mix"]) tg = t.group_by("a") with pytest.warns(AstropyUserWarning, match="Cannot aggregate column 'mix'"): tg.groups.aggregate(np.sum)
Test that group_by preserves the order of the table. This table has 5 groups with an average of 200 rows per group, so it is not statistically possible that the groups will be in order by chance. This tests explicitly the case where grouping is done via the index sort. See: https://github.com/astropy/astropy/issues/14882
def test_group_stable_sort(add_index): """Test that group_by preserves the order of the table. This table has 5 groups with an average of 200 rows per group, so it is not statistically possible that the groups will be in order by chance. This tests explicitly the case where grouping is done via the index sort. See: https://github.com/astropy/astropy/issues/14882 """ a = np.random.randint(0, 5, 1000) b = np.arange(len(a)) t = Table([a, b], names=["a", "b"]) if add_index: t.add_index("a") tg = t.group_by("a") for grp in tg.groups: assert np.all(grp["b"] == np.sort(grp["b"]))
Test the info() method of printing a summary of table column attributes
def test_table_info_attributes(table_types): """ Test the info() method of printing a summary of table column attributes """ a = np.array([1, 2, 3], dtype="int32") b = np.array([1, 2, 3], dtype="float32") c = np.array(["a", "c", "e"], dtype="|S1") t = table_types.Table([a, b, c], names=["a", "b", "c"]) # Minimal output for a typical table tinfo = t.info(out=None) subcls = ["class"] if table_types.Table.__name__ == "MyTable" else [] assert tinfo.colnames == [ "name", "dtype", "shape", "unit", "format", "description", "class", "n_bad", "length", ] assert np.all(tinfo["name"] == ["a", "b", "c"]) assert np.all(tinfo["dtype"] == ["int32", "float32", dtype_info_name("S1")]) if subcls: assert np.all(tinfo["class"] == ["MyColumn"] * 3) # All output fields including a mixin column t["d"] = [1, 2, 3] * u.m t["d"].description = "quantity" t["a"].format = "%02d" t["e"] = time.Time([1, 2, 3], format="mjd") t["e"].info.description = "time" t["f"] = coordinates.SkyCoord([1, 2, 3], [1, 2, 3], unit="deg") t["f"].info.description = "skycoord" tinfo = t.info(out=None) assert np.all(tinfo["name"] == "a b c d e f".split()) assert np.all( tinfo["dtype"] == ["int32", "float32", dtype_info_name("S1"), "float64", "object", "object"] ) assert np.all(tinfo["unit"] == ["", "", "", "m", "", "deg,deg"]) assert np.all(tinfo["format"] == ["%02d", "", "", "", "", ""]) assert np.all(tinfo["description"] == ["", "", "", "quantity", "time", "skycoord"]) cls = t.ColumnClass.__name__ assert np.all(tinfo["class"] == [cls, cls, cls, cls, "Time", "SkyCoord"]) # Test that repr(t.info) is same as t.info() out = StringIO() t.info(out=out) assert repr(t.info) == out.getvalue()
Test the info() method of printing a summary of table column statistics
def test_table_info_stats(table_types): """ Test the info() method of printing a summary of table column statistics """ a = np.array([1, 2, 1, 2], dtype="int32") b = np.array([1, 2, 1, 2], dtype="float32") c = np.array(["a", "c", "e", "f"], dtype="|S1") d = time.Time([1, 2, 1, 2], format="mjd", scale="tai") t = table_types.Table([a, b, c, d], names=["a", "b", "c", "d"]) # option = 'stats' masked = "masked=True " if t.masked else "" out = StringIO() t.info("stats", out=out) table_header_line = f"<{t.__class__.__name__} {masked}length=4>" exp = [ table_header_line, "name mean std min max", "---- ---- --- --- ---", " a 1.5 0.5 1 2", " b 1.5 0.5 1 2", " c -- -- -- --", " d 1.5 -- 1.0 2.0", ] assert out.getvalue().splitlines() == exp # option = ['attributes', 'stats'] tinfo = t.info(["attributes", "stats"], out=None) assert tinfo.colnames == [ "name", "dtype", "shape", "unit", "format", "description", "class", "mean", "std", "min", "max", "n_bad", "length", ] assert np.all(tinfo["mean"] == ["1.5", "1.5", "--", "1.5"]) assert np.all(tinfo["std"] == ["0.5", "0.5", "--", "--"]) assert np.all(tinfo["min"] == ["1", "1", "--", "1.0"]) assert np.all(tinfo["max"] == ["2", "2", "--", "2.0"]) out = StringIO() t.info("stats", out=out) exp = [ table_header_line, "name mean std min max", "---- ---- --- --- ---", " a 1.5 0.5 1 2", " b 1.5 0.5 1 2", " c -- -- -- --", " d 1.5 -- 1.0 2.0", ] assert out.getvalue().splitlines() == exp # option = ['attributes', custom] custom = data_info_factory( names=["sum", "first"], funcs=[np.sum, lambda col: col[0]] ) out = StringIO() tinfo = t.info(["attributes", custom], out=None) assert tinfo.colnames == [ "name", "dtype", "shape", "unit", "format", "description", "class", "sum", "first", "n_bad", "length", ] assert np.all(tinfo["name"] == ["a", "b", "c", "d"]) assert np.all( tinfo["dtype"] == ["int32", "float32", dtype_info_name("S1"), "object"] ) assert np.all(tinfo["sum"] == ["6", "6", "--", "--"]) assert np.all(tinfo["first"] == ["1", "1", "a", "1.0"])
Test getting info for just a column.
def test_data_info(): """ Test getting info for just a column. """ cols = [ table.Column( [1.0, 2.0, np.nan], name="name", description="description", unit="m/s" ), table.MaskedColumn( [1.0, 2.0, 3.0], name="name", description="description", unit="m/s", mask=[False, False, True], ), ] for c in cols: # Test getting the full ordered dict cinfo = c.info(out=None) assert cinfo == OrderedDict( [ ("name", "name"), ("dtype", "float64"), ("shape", ""), ("unit", "m / s"), ("format", ""), ("description", "description"), ("class", type(c).__name__), ("n_bad", 1), ("length", 3), ] ) # Test the console (string) version which omits trivial values out = StringIO() c.info(out=out) exp = [ "name = name", "dtype = float64", "unit = m / s", "description = description", f"class = {type(c).__name__}", "n_bad = 1", "length = 3", ] assert out.getvalue().splitlines() == exp # repr(c.info) gives the same as c.info() assert repr(c.info) == out.getvalue() # Test stats info cinfo = c.info("stats", out=None) assert cinfo == OrderedDict( [ ("name", "name"), ("mean", "1.5"), ("std", "0.5"), ("min", "1"), ("max", "2"), ("n_bad", 1), ("length", 3), ] )
Make sure info works with scalar values
def test_scalar_info(): """ Make sure info works with scalar values """ c = time.Time("2000:001") cinfo = c.info(out=None) assert cinfo["n_bad"] == 0 assert "length" not in cinfo
Test that class info column is suppressed only for identical non-mixin columns.
def test_class_attribute(): """ Test that class info column is suppressed only for identical non-mixin columns. """ vals = [[1] * u.m, [2] * u.m] texp = [ "<Table length=1>", "name dtype unit", "---- ------- ----", "col0 float64 m", "col1 float64 m", ] qexp = [ "<QTable length=1>", "name dtype unit class ", "---- ------- ---- --------", "col0 float64 m Quantity", "col1 float64 m Quantity", ] for table_cls, exp in ((table.Table, texp), (table.QTable, qexp)): t = table_cls(vals) out = StringIO() t.info(out=out) assert out.getvalue().splitlines() == exp
Unit test of context manager to set info.serialize_method. Normally just used to set this for writing a Table to file (FITS, ECSV, HDF5).
def test_info_serialize_method(): """ Unit test of context manager to set info.serialize_method. Normally just used to set this for writing a Table to file (FITS, ECSV, HDF5). """ t = table.Table( { "tm": time.Time([1, 2], format="cxcsec"), "sc": coordinates.SkyCoord([1, 2], [1, 2], unit="deg"), "mc": table.MaskedColumn([1, 2], mask=[True, False]), "mc2": table.MaskedColumn([1, 2], mask=[True, False]), } ) origs = {} for name in ("tm", "mc", "mc2"): origs[name] = deepcopy(t[name].info.serialize_method) # Test setting by name and getting back to originals with serialize_method_as(t, {"tm": "test_tm", "mc": "test_mc"}): for name in ("tm", "mc"): assert all( t[name].info.serialize_method[key] == "test_" + name for key in t[name].info.serialize_method ) assert t["mc2"].info.serialize_method == origs["mc2"] assert not hasattr(t["sc"].info, "serialize_method") for name in ("tm", "mc", "mc2"): assert t[name].info.serialize_method == origs[name] # dict compare assert not hasattr(t["sc"].info, "serialize_method") # Test setting by name and class, where name takes precedence. Also # test that it works for subclasses. with serialize_method_as( t, {"tm": "test_tm", "mc": "test_mc", table.Column: "test_mc2"} ): for name in ("tm", "mc", "mc2"): assert all( t[name].info.serialize_method[key] == "test_" + name for key in t[name].info.serialize_method ) assert not hasattr(t["sc"].info, "serialize_method") for name in ("tm", "mc", "mc2"): assert t[name].info.serialize_method == origs[name] # dict compare assert not hasattr(t["sc"].info, "serialize_method") # Test supplying a single string that all applies to all columns with # a serialize_method. with serialize_method_as(t, "test"): for name in ("tm", "mc", "mc2"): assert all( t[name].info.serialize_method[key] == "test" for key in t[name].info.serialize_method ) assert not hasattr(t["sc"].info, "serialize_method") for name in ("tm", "mc", "mc2"): assert t[name].info.serialize_method == origs[name] # dict compare assert not hasattr(t["sc"].info, "serialize_method")
Unit test of context manager to set info.serialize_method. Normally just used to set this for writing a Table to file (FITS, ECSV, HDF5).
def test_info_serialize_method_exception(): """ Unit test of context manager to set info.serialize_method. Normally just used to set this for writing a Table to file (FITS, ECSV, HDF5). """ t = simple_table(masked=True) origs = deepcopy(t["a"].info.serialize_method) try: with serialize_method_as(t, "test"): assert all( t["a"].info.serialize_method[key] == "test" for key in t["a"].info.serialize_method ) raise ZeroDivisionError() except ZeroDivisionError: pass assert t["a"].info.serialize_method == origs
Test fix for #10393
def test_init_table_with_names_and_structured_dtype(has_data): """Test fix for #10393""" arr = np.ones(2, dtype=np.dtype([("a", "i4"), ("b", "f4")])) data_args = [arr] if has_data else [] t = Table(*data_args, names=["x", "y"], dtype=arr.dtype) assert t.colnames == ["x", "y"] assert str(t["x"].dtype) == "int32" assert str(t["y"].dtype) == "float32" assert len(t) == (2 if has_data else 0)
Test that initializing from an ndarray structured array with a multi-dim column works for both copy=False and True and that the referencing is as expected.
def test_init_and_ref_from_multidim_ndarray(table_type): """ Test that initializing from an ndarray structured array with a multi-dim column works for both copy=False and True and that the referencing is as expected. """ for copy in (False, True): nd = np.array( [(1, [10, 20]), (3, [30, 40])], dtype=[("a", "i8"), ("b", "i8", (2,))] ) t = table_type(nd, copy=copy) assert t.colnames == ["a", "b"] assert t["a"].shape == (2,) assert t["b"].shape == (2, 2) t["a"][0] = -200 t["b"][1][1] = -100 if copy: assert nd["a"][0] == 1 assert nd["b"][1][1] == 40 else: assert nd["a"][0] == -200 assert nd["b"][1][1] == -100
Test that initializing from a dict works for both copy=False and True and that the referencing is as expected.
def test_init_and_ref_from_dict(table_type, copy): """ Test that initializing from a dict works for both copy=False and True and that the referencing is as expected. """ x1 = np.arange(10.0) x2 = np.zeros(10) col_dict = {"x1": x1, "x2": x2} t = table_type(col_dict, copy=copy) assert set(t.colnames) == {"x1", "x2"} assert t["x1"].shape == (10,) assert t["x2"].shape == (10,) t["x1"][0] = -200 t["x2"][1] = -100 if copy: assert x1[0] == 0.0 assert x2[1] == 0.0 else: assert x1[0] == -200 assert x2[1] == -100
Test fix for a problem introduced in #10636 (see https://github.com/astropy/astropy/pull/10636#issuecomment-676847515)
def test_add_none_object_column(): """Test fix for a problem introduced in #10636 (see https://github.com/astropy/astropy/pull/10636#issuecomment-676847515) """ t = Table(data={"a": [1, 2, 3]}) t["b"] = None assert all(val is None for val in t["b"]) assert t["b"].dtype.kind == "O"
Test fix for #11327
def test_init_Table_from_list_of_quantity(): """Test fix for #11327""" # Variation on original example in #11327 at the Table level data = [{"x": 5 * u.m, "y": 1 * u.m}, {"x": 10 * u.m, "y": 3}] t = Table(data) assert t["x"].unit is u.m assert t["y"].unit is None assert t["x"].dtype.kind == "f" assert t["y"].dtype.kind == "O" assert np.all(t["x"] == [5, 10]) assert t["y"][0] == 1 * u.m assert t["y"][1] == 3
Test fix for #14336 where providing units to QTable init fails. This applies when the input is a Quantity.
def test_init_QTable_and_set_units(): """ Test fix for #14336 where providing units to QTable init fails. This applies when the input is a Quantity. """ t = QTable([[1, 2] * u.km, [1, 2]], units={"col0": u.m, "col1": u.s}) assert t["col0"].unit == u.m assert np.all(t["col0"].value == [1000, 2000]) assert t["col1"].unit == u.s assert np.all(t["col1"].value == [1, 2])
Test issue in #2997
def test_setting_from_masked_column(): """Test issue in #2997""" mask_b = np.array([True, True, False, False]) for select in (mask_b, slice(0, 2)): t = Table(masked=True) t["a"] = Column([1, 2, 3, 4]) t["b"] = MaskedColumn([11, 22, 33, 44], mask=mask_b) t["c"] = MaskedColumn([111, 222, 333, 444], mask=[True, False, True, False]) t["b"][select] = t["c"][select] assert t["b"][1] == t[1]["b"] assert t["b"][0] is np.ma.masked # Original state since t['c'][0] is masked assert t["b"][1] == 222 # New from t['c'] since t['c'][1] is unmasked assert t["b"][2] == 33 assert t["b"][3] == 44 assert np.all( t["b"].mask == t.mask["b"] ) # Avoid t.mask in general, this is for testing mask_before_add = t.mask.copy() t["d"] = np.arange(len(t)) assert np.all(t.mask["b"] == mask_before_add["b"])
Test that masked column fill_value is coerced into the correct column type.
def test_coercing_fill_value_type(): """ Test that masked column fill_value is coerced into the correct column type. """ # This is the original example posted on the astropy@scipy mailing list t = Table({"a": ["1"]}, masked=True) t["a"].set_fill_value("0") t2 = Table(t, names=["a"], dtype=[np.int32]) assert isinstance(t2["a"].fill_value, np.int32) # Unit test the same thing. c = MaskedColumn(["1"]) c.set_fill_value("0") c2 = MaskedColumn(c, dtype=np.int32) assert isinstance(c2.fill_value, np.int32)
Test that the mask is copied when copying a table (issue #7362).
def test_mask_copy(): """Test that the mask is copied when copying a table (issue #7362).""" c = MaskedColumn([1, 2], mask=[False, True]) c2 = MaskedColumn(c, copy=True) c2.mask[0] = True assert np.all(c.mask == [False, True]) assert np.all(c2.mask == [True, True])
Test that as_array() and Table.mask attr work with masked mixin columns
def test_masked_as_array_with_mixin(): """Test that as_array() and Table.mask attr work with masked mixin columns""" t = Table() t["a"] = Time([1, 2], format="cxcsec") t["b"] = [3, 4] t["c"] = [5, 6] * u.m # With no mask, the output should be ndarray ta = t.as_array() assert isinstance(ta, np.ndarray) and not isinstance(ta, np.ma.MaskedArray) # With a mask, output is MaskedArray t["a"][1] = np.ma.masked ta = t.as_array() assert isinstance(ta, np.ma.MaskedArray) assert np.all(ta["a"].mask == [False, True]) assert np.isclose(ta["a"][0].cxcsec, 1.0) assert not np.any(ta["b"].mask) assert not np.any(ta["c"].mask) # Check table ``mask`` property tm = t.mask assert np.all(tm["a"] == [False, True]) assert not np.any(tm["b"]) assert not np.any(tm["c"])
Test that adding a MaskedColumn with a unit to QTable creates a MaskedQuantity.
def test_masked_column_with_unit_in_qtable(): """Test that adding a MaskedColumn with a unit to QTable creates a MaskedQuantity.""" MaskedQuantity = Masked(u.Quantity) t = QTable() t["a"] = MaskedColumn([1, 2]) assert isinstance(t["a"], MaskedColumn) t["b"] = MaskedColumn([1, 2], unit=u.m) assert isinstance(t["b"], MaskedQuantity) assert not np.any(t["b"].mask) t["c"] = MaskedColumn([1, 2], unit=u.m, mask=[True, False]) assert isinstance(t["c"], MaskedQuantity) assert np.all(t["c"].mask == [True, False])
Check that we don't finalize MaskedColumn too often. Regression test for gh-6721.
def test_mask_slicing_count_array_finalize(): """Check that we don't finalize MaskedColumn too often. Regression test for gh-6721. """ # Create a new BaseColumn class that counts how often # ``__array_finalize__`` is called. class MyBaseColumn(BaseColumn): counter = 0 def __array_finalize__(self, obj): super().__array_finalize__(obj) MyBaseColumn.counter += 1 # Base a new MaskedColumn class on it. The normal MaskedColumn # hardcodes the initialization to BaseColumn, so we exchange that. class MyMaskedColumn(MaskedColumn, Column, MyBaseColumn): def __new__(cls, *args, **kwargs): self = super().__new__(cls, *args, **kwargs) self._baseclass = MyBaseColumn return self # Creation really needs 2 finalizations (once for the BaseColumn # call inside ``__new__`` and once when the view as a MaskedColumn # is taken), but since the first is hardcoded, we do not capture it # and thus the count is only 1. c = MyMaskedColumn([1, 2], mask=[False, True]) assert MyBaseColumn.counter == 1 # slicing should need only one ``__array_finalize__`` (used to be 3). c0 = c[:] assert MyBaseColumn.counter == 2 # repr should need none (used to be 2!!) repr(c0) assert MyBaseColumn.counter == 2
Required attributes for a column can be set.
def test_attributes(mixin_cols): """ Required attributes for a column can be set. """ m = mixin_cols["m"] m.info.name = "a" assert m.info.name == "a" m.info.description = "a" assert m.info.description == "a" # Cannot set unit for these classes if isinstance( m, ( u.Quantity, coordinates.SkyCoord, time.Time, time.TimeDelta, coordinates.BaseRepresentationOrDifferential, coordinates.StokesCoord, ), ): with pytest.raises(AttributeError): m.info.unit = u.m else: m.info.unit = u.m assert m.info.unit is u.m m.info.format = "a" assert m.info.format == "a" m.info.meta = {"a": 1} assert m.info.meta == {"a": 1} with pytest.raises(AttributeError): m.info.bad_attr = 1 with pytest.raises(AttributeError): m.info.bad_attr
Make a table with the columns in mixin_cols, which is an ordered dict of three cols: 'a' and 'b' are table_types.Column type, and 'm' is a mixin.
def test_make_table(table_types, mixin_cols): """ Make a table with the columns in mixin_cols, which is an ordered dict of three cols: 'a' and 'b' are table_types.Column type, and 'm' is a mixin. """ t = table_types.Table(mixin_cols) check_mixin_type(t, t["m"], mixin_cols["m"]) cols = list(mixin_cols.values()) t = table_types.Table(cols, names=("i", "a", "b", "m")) check_mixin_type(t, t["m"], mixin_cols["m"]) t = table_types.Table(cols) check_mixin_type(t, t["col3"], mixin_cols["m"])
Test that table with mixin column can be written by io.ascii for every pure Python writer. No validation of the output is done, this just confirms no exceptions.
def test_io_ascii_write(): """ Test that table with mixin column can be written by io.ascii for every pure Python writer. No validation of the output is done, this just confirms no exceptions. """ from astropy.io.ascii.connect import _get_connectors_table t = QTable(MIXIN_COLS) for fmt in _get_connectors_table(): if fmt["Write"] and ".fast_" not in fmt["Format"]: out = StringIO() t.write(out, format=fmt["Format"])
Test that table with Quantity mixin column can be round-tripped by io.votable. Note that FITS and HDF5 mixin support are tested (much more thoroughly) in their respective subpackage tests (io/fits/tests/test_connect.py and io/misc/tests/test_hdf5.py).
def test_votable_quantity_write(tmp_path): """ Test that table with Quantity mixin column can be round-tripped by io.votable. Note that FITS and HDF5 mixin support are tested (much more thoroughly) in their respective subpackage tests (io/fits/tests/test_connect.py and io/misc/tests/test_hdf5.py). """ t = QTable() t["a"] = u.Quantity([1, 2, 4], unit="nm") filename = tmp_path / "table-tmp" t.write(filename, format="votable", overwrite=True) qt = QTable.read(filename, format="votable") assert isinstance(qt["a"], u.Quantity) assert qt["a"].unit == "nm"
Test that table with Time mixin columns can be written by io.fits. Validation of the output is done. Test that io.fits writes a table containing Time mixin columns that can be partially round-tripped (metadata scale, location). Note that we postpone checking the "local" scale, since that cannot be done with format 'cxcsec', as it requires an epoch.
def test_io_time_write_fits_standard(tmp_path, table_types): """ Test that table with Time mixin columns can be written by io.fits. Validation of the output is done. Test that io.fits writes a table containing Time mixin columns that can be partially round-tripped (metadata scale, location). Note that we postpone checking the "local" scale, since that cannot be done with format 'cxcsec', as it requires an epoch. """ t = table_types([[1, 2], ["string", "column"]]) for scale in time.STANDARD_TIME_SCALES: t["a" + scale] = time.Time( [[1, 2], [3, 4]], format="cxcsec", scale=scale, location=EarthLocation(-2446354, 4237210, 4077985, unit="m"), ) t["b" + scale] = time.Time( ["1999-01-01T00:00:00.123456789", "2010-01-01T00:00:00"], scale=scale ) t["c"] = [3.0, 4.0] filename = tmp_path / "table-tmp" # Show that FITS format succeeds with pytest.warns(AstropyUserWarning) as record: t.write(filename, format="fits", overwrite=True) # The exact sequence probably # does not matter too much, so we'll just try to match the *set* of warnings warnings = {wm.message.args[0] for wm in record} expected = { ( 'Time Column "btai" has no specified location, ' "but global Time Position is present, " "which will be the default for this column in FITS specification." ), ( 'Time Column "btdb" has no specified location, ' "but global Time Position is present, " "which will be the default for this column in FITS specification." ), ( 'Time Column "btcg" has no specified location, ' "but global Time Position is present, " "which will be the default for this column in FITS specification." ), ( 'Time Column "btt" has no specified location, ' "but global Time Position is present, which will be the default " "for this column in FITS specification." ), ( 'Time Column "butc" has no specified location, ' "but global Time Position is present, " "which will be the default for this column in FITS specification." ), ( 'Earth Location "TOPOCENTER" for Time Column "atdb"' ' is incompatible with scale "TDB".' ), ( 'Earth Location "TOPOCENTER" for Time Column "atcb"' ' is incompatible with scale "TCB".' ), ( 'Time Column "but1" has no specified location, ' "but global Time Position is present, " "which will be the default for this column in FITS specification." ), ( 'Time Column "btcb" has no specified location, ' "but global Time Position is present, " "which will be the default for this column in FITS specification." ), } assert warnings == expected, f"Got some unexpected warnings\n{warnings - expected}" with pytest.warns( AstropyUserWarning, match='Time column reference position "TRPOSn" is not specified', ): tm = table_types.read(filename, format="fits", astropy_native=True) for scale in time.STANDARD_TIME_SCALES: for ab in ("a", "b"): name = ab + scale # Assert that the time columns are read as Time assert isinstance(tm[name], time.Time) # Assert that the scales round-trip assert tm[name].scale == t[name].scale # Assert that the format is jd assert tm[name].format == "jd" # Assert that the location round-trips assert tm[name].location == t[name].location # Finally assert that the column data round-trips assert (tm[name] == t[name]).all() for name in ("col0", "col1", "c"): # Assert that the non-time columns are read as Column assert isinstance(tm[name], Column) # Assert that the non-time columns' data round-trips assert (tm[name] == t[name]).all() # Test for conversion of time data to its value, as defined by its format for scale in time.STANDARD_TIME_SCALES: for ab in ("a", "b"): name = ab + scale t[name].info.serialize_method["fits"] = "formatted_value" t.write(filename, format="fits", overwrite=True) tm = table_types.read(filename, format="fits") for scale in time.STANDARD_TIME_SCALES: for ab in ("a", "b"): name = ab + scale assert not isinstance(tm[name], time.Time) assert (tm[name] == t[name].value).all()
Test that table with a Time mixin with scale local can also be written by io.fits. Like ``test_io_time_write_fits_standard`` above, but avoiding ``cxcsec`` format, which requires an epoch and thus cannot be used for a local time scale.
def test_io_time_write_fits_local(tmp_path, table_types): """ Test that table with a Time mixin with scale local can also be written by io.fits. Like ``test_io_time_write_fits_standard`` above, but avoiding ``cxcsec`` format, which requires an epoch and thus cannot be used for a local time scale. """ t = table_types([[1, 2], ["string", "column"]]) t["a_local"] = time.Time( [[50001, 50002], [50003, 50004]], format="mjd", scale="local", location=EarthLocation(-2446354, 4237210, 4077985, unit="m"), ) t["b_local"] = time.Time( ["1999-01-01T00:00:00.123456789", "2010-01-01T00:00:00"], scale="local" ) t["c"] = [3.0, 4.0] filename = tmp_path / "table-tmp" # Show that FITS format succeeds with pytest.warns( AstropyUserWarning, match='Time Column "b_local" has no specified location' ): t.write(filename, format="fits", overwrite=True) with pytest.warns( AstropyUserWarning, match='Time column reference position "TRPOSn" is not specified.', ): tm = table_types.read(filename, format="fits", astropy_native=True) for ab in ("a", "b"): name = ab + "_local" # Assert that the time columns are read as Time assert isinstance(tm[name], time.Time) # Assert that the scales round-trip assert tm[name].scale == t[name].scale # Assert that the format is jd assert tm[name].format == "jd" # Assert that the location round-trips assert tm[name].location == t[name].location # Finally assert that the column data round-trips assert (tm[name] == t[name]).all() for name in ("col0", "col1", "c"): # Assert that the non-time columns are read as Column assert isinstance(tm[name], Column) # Assert that the non-time columns' data round-trips assert (tm[name] == t[name]).all() # Test for conversion of time data to its value, as defined by its format. for ab in ("a", "b"): name = ab + "_local" t[name].info.serialize_method["fits"] = "formatted_value" t.write(filename, format="fits", overwrite=True) tm = table_types.read(filename, format="fits") for ab in ("a", "b"): name = ab + "_local" assert not isinstance(tm[name], time.Time) assert (tm[name] == t[name].value).all()
Test that table with mixin columns (excluding Quantity) cannot be written by io.votable.
def test_votable_mixin_write_fail(mixin_cols): """ Test that table with mixin columns (excluding Quantity) cannot be written by io.votable. """ t = QTable(mixin_cols) # Only do this test if there are unsupported column types (i.e. anything besides # BaseColumn and Quantity class instances). unsupported_cols = t.columns.not_isinstance((BaseColumn, u.Quantity)) if not unsupported_cols: pytest.skip("no unsupported column types") out = StringIO() with pytest.raises(ValueError) as err: t.write(out, format="votable") assert "cannot write table with mixin column(s)" in str(err.value)
Join tables with mixin cols. Use column "i" as proxy for what the result should be for each mixin.
def test_join(table_types): """ Join tables with mixin cols. Use column "i" as proxy for what the result should be for each mixin. """ t1 = table_types.Table() t1["a"] = table_types.Column(["a", "b", "b", "c"]) t1["i"] = table_types.Column([0, 1, 2, 3]) for name, col in MIXIN_COLS.items(): t1[name] = col t2 = table_types.Table(t1) t2["a"] = ["b", "c", "a", "d"] for name, col in MIXIN_COLS.items(): t1[name].info.description = name t2[name].info.description = name + "2" for join_type in ("inner", "left"): t12 = join(t1, t2, keys="a", join_type=join_type) idx1 = t12["i_1"] idx2 = t12["i_2"] for name, col in MIXIN_COLS.items(): name1 = name + "_1" name2 = name + "_2" assert_table_name_col_equal(t12, name1, col[idx1]) assert_table_name_col_equal(t12, name2, col[idx2]) assert t12[name1].info.description == name assert t12[name2].info.description == name + "2" for join_type in ("outer", "right"): with pytest.raises(NotImplementedError) as exc: t12 = join(t1, t2, keys="a", join_type=join_type) assert "join requires masking column" in str(exc.value) with pytest.raises(TypeError) as exc: t12 = join(t1, t2, keys=["a", "skycoord"]) assert "one or more key columns are not sortable" in str(exc.value) # Join does work for a mixin which is a subclass of np.ndarray with pytest.warns( MergeConflictWarning, match="In merged column 'quantity' the 'description' attribute does not match", ): t12 = join(t1, t2, keys=["quantity"]) assert np.all(t12["a_1"] == t1["a"])
Hstack tables with mixin cols. Use column "i" as proxy for what the result should be for each mixin.
def test_hstack(table_types): """ Hstack tables with mixin cols. Use column "i" as proxy for what the result should be for each mixin. """ t1 = table_types.Table() t1["i"] = table_types.Column([0, 1, 2, 3]) for name, col in MIXIN_COLS.items(): t1[name] = col t1[name].info.description = name t1[name].info.meta = {"a": 1} for join_type in ("inner", "outer"): for chop in (True, False): t2 = table_types.Table(t1) if chop: t2 = t2[:-1] if join_type == "outer": with pytest.raises(NotImplementedError) as exc: t12 = hstack([t1, t2], join_type=join_type) assert "hstack requires masking column" in str(exc.value) continue t12 = hstack([t1, t2], join_type=join_type) idx1 = t12["i_1"] idx2 = t12["i_2"] for name, col in MIXIN_COLS.items(): name1 = name + "_1" name2 = name + "_2" assert_table_name_col_equal(t12, name1, col[idx1]) assert_table_name_col_equal(t12, name2, col[idx2]) for attr in ("description", "meta"): assert getattr(t1[name].info, attr) == getattr( t12[name1].info, attr ) assert getattr(t2[name].info, attr) == getattr( t12[name2].info, attr )
Assert all(t[name] == col), with special handling for known mixin cols.
def assert_table_name_col_equal(t, name, col): """ Assert all(t[name] == col), with special handling for known mixin cols. """ if isinstance(col, coordinates.SkyCoord): assert np.all(t[name].ra == col.ra) assert np.all(t[name].dec == col.dec) elif isinstance(col, coordinates.BaseRepresentationOrDifferential): assert np.all(representation_equal(t[name], col)) elif isinstance(col, u.Quantity): if type(t) is QTable: assert np.all(t[name] == col) elif isinstance(col, table_helpers.ArrayWrapper): assert np.all(t[name].data == col.data) else: assert np.all(t[name] == col)
Test that slicing / indexing table gives right values and col attrs inherit
def test_get_items(mixin_cols): """ Test that slicing / indexing table gives right values and col attrs inherit """ attrs = ("name", "unit", "dtype", "format", "description", "meta") m = mixin_cols["m"] m.info.name = "m" m.info.format = "{0}" m.info.description = "d" m.info.meta = {"a": 1} t = QTable([m]) for item in ([1, 3], np.array([0, 2]), slice(1, 3)): t2 = t[item] m2 = m[item] assert_table_name_col_equal(t2, "m", m[item]) for attr in attrs: assert getattr(t2["m"].info, attr) == getattr(m.info, attr) assert getattr(m2.info, attr) == getattr(m.info, attr)
Test copy, pickle, and init from class roundtrip preserve info. This tests not only the mixin classes but a regular column as well.
def test_info_preserved_pickle_copy_init(mixin_cols): """ Test copy, pickle, and init from class roundtrip preserve info. This tests not only the mixin classes but a regular column as well. """ def pickle_roundtrip(c): return pickle.loads(pickle.dumps(c)) def init_from_class(c): return c.__class__(c) attrs = ("name", "unit", "dtype", "format", "description", "meta") for colname in ("i", "m"): m = mixin_cols[colname] m.info.name = colname m.info.format = "{0}" m.info.description = "d" m.info.meta = {"a": 1} for func in (copy.copy, copy.deepcopy, pickle_roundtrip, init_from_class): m2 = func(m) for attr in attrs: # non-native byteorder not preserved by last 2 func, _except_ for structured dtype if ( attr != "dtype" or getattr(m.info.dtype, "isnative", True) or m.info.dtype.name.startswith("void") or func in (copy.copy, copy.deepcopy) ): original = getattr(m.info, attr) else: # func does not preserve byteorder, check against (native) type. original = m.info.dtype.newbyteorder("=") assert getattr(m2.info, attr) == original
Check whether data attributes in col1 and col2 share memory. If copy=True, this should not be the case for any, while if copy=False, all should share memory.
def check_share_memory(col1, col2, copy): """Check whether data attributes in col1 and col2 share memory. If copy=True, this should not be the case for any, while if copy=False, all should share memory. """ if isinstance(col1, SkyCoord): # For SkyCoord, .info does not access actual data by default, # but rather attributes like .ra, which are copies. map1 = col1.data.info._represent_as_dict() map2 = col2.data.info._represent_as_dict() else: map1 = col1.info._represent_as_dict() map2 = col2.info._represent_as_dict() # Check array attributes only (in principle, could iterate on, e.g., # differentials in representations, but this is enough for table). shared = [ np.may_share_memory(v1, v2) for (v1, v2) in zip(map1.values(), map2.values()) if isinstance(v1, np.ndarray) and v1.shape ] if copy: assert not any(shared) else: assert all(shared)
Test that adding a column preserves values and attributes. For copy=True, the data should be independent; for copy=False, the data should be shared, but the instance independent.
def test_add_column(mixin_cols, copy): """ Test that adding a column preserves values and attributes. For copy=True, the data should be independent; for copy=False, the data should be shared, but the instance independent. """ attrs = ("name", "unit", "dtype", "format", "description", "meta") m = mixin_cols["m"] assert m.info.name is None # Make sure adding column in various ways doesn't touch info. t = QTable([m], names=["a"], copy=copy) assert m.info.name is None check_share_memory(m, t["a"], copy=copy) t["new"] = m assert m.info.name is None check_share_memory(m, t["new"], copy=True) m.info.name = "m" m.info.format = "{0}" m.info.description = "d" m.info.meta = {"a": 1} t = QTable([m], copy=copy) assert t.colnames == ["m"] check_share_memory(m, t["m"], copy=copy) t = QTable([m], names=["m1"], copy=copy) assert m.info.name == "m" assert t.colnames == ["m1"] check_share_memory(m, t["m1"], copy=copy) # Add columns m2, m3, m4 by two different methods and test expected equality t["m2"] = m check_share_memory(m, t["m2"], copy=True) m.info.name = "m3" t.add_columns([m], copy=copy) check_share_memory(m, t["m3"], copy=copy) for name in ("m2", "m3"): assert_table_name_col_equal(t, name, m) for attr in attrs: if attr != "name": assert getattr(t["m1"].info, attr) == getattr(t[name].info, attr) # Also check that one can set using a scalar. s = m[0] if type(s) is type(m) and "info" in s.__dict__: # We're not going to worry about testing classes for which scalars # are a different class than the real array, or where info is not copied. t["s"] = m[0] assert_table_name_col_equal(t, "s", m[0]) check_share_memory(m, t["s"], copy=True) for attr in attrs: if attr != "name": assert getattr(t["m1"].info, attr) == getattr(t["s"].info, attr) # While we're add it, also check a length-1 table. t = QTable([m[1:2]], names=["m"], copy=copy) check_share_memory(m, t["m"], copy=copy) if type(s) is type(m) and "info" in s.__dict__: t["s"] = m[0] assert_table_name_col_equal(t, "s", m[0]) for attr in attrs: if attr != "name": assert getattr(t["m1"].info, attr) == getattr(t["s"].info, attr)
Vstack tables with mixin cols.
def test_vstack(): """ Vstack tables with mixin cols. """ t1 = QTable(MIXIN_COLS) t2 = QTable(MIXIN_COLS) with pytest.raises(NotImplementedError): vstack([t1, t2])
Test inserting a row, which works for Column, Quantity, Time and SkyCoord.
def test_insert_row(mixin_cols): """ Test inserting a row, which works for Column, Quantity, Time and SkyCoord. """ t = QTable(mixin_cols) t0 = t.copy() t["m"].info.description = "d" idxs = [0, -1, 1, 2, 3] if isinstance( t["m"], (u.Quantity, Column, time.Time, time.TimeDelta, coordinates.SkyCoord) ): t.insert_row(1, t[-1]) for name in t.colnames: col = t[name] if isinstance(col, coordinates.SkyCoord): assert skycoord_equal(col, t0[name][idxs]) else: assert np.all(col == t0[name][idxs]) assert t["m"].info.description == "d" else: with pytest.raises(ValueError) as exc: t.insert_row(1, t[-1]) assert "Unable to insert row" in str(exc.value)
Insert a row into a QTable with the wrong unit
def test_insert_row_bad_unit(): """ Insert a row into a QTable with the wrong unit """ t = QTable([[1] * u.m]) with pytest.raises(ValueError) as exc: t.insert_row(0, (2 * u.m / u.s,)) assert "'m / s' (speed/velocity) and 'm' (length) are not convertible" in str( exc.value )
Test that converting to numpy array creates an object dtype and that each instance in the array has the expected type.
def test_convert_np_array(mixin_cols): """ Test that converting to numpy array creates an object dtype and that each instance in the array has the expected type. """ t = QTable(mixin_cols) ta = t.as_array() m = mixin_cols["m"] dtype_kind = m.dtype.kind if hasattr(m, "dtype") else "O" assert ta["m"].dtype.kind == dtype_kind
Test that assignment of an int, slice, and fancy index works. Along the way test that copying table works.
def test_assignment_and_copy(): """ Test that assignment of an int, slice, and fancy index works. Along the way test that copying table works. """ for name in ("quantity", "arraywrap"): m = MIXIN_COLS[name] t0 = QTable([m], names=["m"]) for i0, i1 in ( (1, 2), (slice(0, 2), slice(1, 3)), (np.array([1, 2]), np.array([2, 3])), ): t = t0.copy() t["m"][i0] = m[i1] if name == "arraywrap": assert np.all(t["m"].data[i0] == m.data[i1]) assert np.all(t0["m"].data[i0] == m.data[i0]) assert np.all(t0["m"].data[i0] != t["m"].data[i0]) else: assert np.all(t["m"][i0] == m[i1]) assert np.all(t0["m"][i0] == m[i0]) assert np.all(t0["m"][i0] != t["m"][i0])
Test that a table round trips from QTable => Table => QTable
def test_conversion_qtable_table(): """ Test that a table round trips from QTable => Table => QTable """ qt = QTable(MIXIN_COLS) names = qt.colnames for name in names: qt[name].info.description = name t = Table(qt) for name in names: assert t[name].info.description == name if name == "quantity": assert np.all(t["quantity"] == qt["quantity"].value) assert np.all(t["quantity"].unit is qt["quantity"].unit) assert isinstance(t["quantity"], t.ColumnClass) else: assert_table_name_col_equal(t, name, qt[name]) qt2 = QTable(qt) for name in names: assert qt2[name].info.description == name assert_table_name_col_equal(qt2, name, qt[name])
Test for mixin-related regression described in #3321.
def test_setitem_as_column_name(): """ Test for mixin-related regression described in #3321. """ t = Table() t["a"] = ["x", "y"] t["b"] = "b" # Previously was failing with KeyError assert np.all(t["a"] == ["x", "y"]) assert np.all(t["b"] == ["b", "b"])
Test that table representation of quantities does not have unit
def test_quantity_representation(): """ Test that table representation of quantities does not have unit """ t = QTable([[1, 2] * u.m]) assert t.pformat() == [ "col0", " m ", "----", " 1.0", " 2.0", ]
Test that Representations are represented correctly.
def test_representation_representation(): """ Test that Representations are represented correctly. """ # With no unit we get "None" in the unit row c = coordinates.CartesianRepresentation([0], [1], [0], unit=u.one) t = Table([c]) assert t.pformat() == [ " col0 ", "------------", "(0., 1., 0.)", ] c = coordinates.CartesianRepresentation([0], [1], [0], unit="m") t = Table([c]) assert t.pformat() == [ " col0 ", " m ", "------------", "(0., 1., 0.)", ] c = coordinates.SphericalRepresentation([10] * u.deg, [20] * u.deg, [1] * u.pc) t = Table([c]) assert t.pformat() == [ " col0 ", " deg, deg, pc ", "--------------", "(10., 20., 1.)", ] c = coordinates.UnitSphericalRepresentation([10] * u.deg, [20] * u.deg) t = Table([c]) assert t.pformat() == [ " col0 ", " deg ", "----------", "(10., 20.)", ] c = coordinates.SphericalCosLatDifferential( [10] * u.mas / u.yr, [2] * u.mas / u.yr, [10] * u.km / u.s ) t = Table([c]) assert t.pformat() == [ " col0 ", "mas / yr, mas / yr, km / s", "--------------------------", " (10., 2., 10.)", ]
Test that skycoord representation works, both in the way that the values are output and in changing the frame representation.
def test_skycoord_representation(): """ Test that skycoord representation works, both in the way that the values are output and in changing the frame representation. """ # With no unit we get "None" in the unit row c = coordinates.SkyCoord([0], [1], [0], representation_type="cartesian") t = Table([c]) assert t.pformat() == [ " col0 ", "None,None,None", "--------------", " 0.0,1.0,0.0", ] # Test that info works with a dynamically changed representation c = coordinates.SkyCoord([0], [1], [0], unit="m", representation_type="cartesian") t = Table([c]) assert t.pformat() == [ " col0 ", " m,m,m ", "-----------", "0.0,1.0,0.0", ] t["col0"].representation_type = "unitspherical" assert t.pformat() == [ " col0 ", "deg,deg ", "--------", "90.0,0.0", ] t["col0"].representation_type = "cylindrical" assert t.pformat() == [ " col0 ", " m,deg,m ", "------------", "1.0,90.0,0.0", ]
Test directly adding various forms of structured ndarray columns to a table. Adding as NdarrayMixin is expected to be somewhat unusual after #12644 (which provides full support for structured array Column's). This test shows that the end behavior is the same in both cases.
def test_ndarray_mixin(as_ndarray_mixin): """ Test directly adding various forms of structured ndarray columns to a table. Adding as NdarrayMixin is expected to be somewhat unusual after #12644 (which provides full support for structured array Column's). This test shows that the end behavior is the same in both cases. """ a = np.array([(1, "a"), (2, "b"), (3, "c"), (4, "d")], dtype="<i4,|U1") b = np.array( [(10, "aa"), (20, "bb"), (30, "cc"), (40, "dd")], dtype=[("x", "i4"), ("y", "U2")], ) c = np.rec.fromrecords( [(100.0, "raa"), (200.0, "rbb"), (300.0, "rcc"), (400.0, "rdd")], names=["rx", "ry"], ) d = np.arange(8, dtype="i8").reshape(4, 2) if as_ndarray_mixin: a = a.view(NdarrayMixin) b = b.view(NdarrayMixin) c = c.view(NdarrayMixin) d = d.view(NdarrayMixin) class_exp = NdarrayMixin else: class_exp = Column # Add one during initialization and the next as a new column. t = Table([a], names=["a"]) t["b"] = b t["c"] = c t["d"] = d assert isinstance(t["a"], class_exp) assert t["a"][1][1] == a[1][1] assert t["a"][2][0] == a[2][0] assert t[1]["a"][1] == a[1][1] assert t[2]["a"][0] == a[2][0] assert isinstance(t["b"], class_exp) assert t["b"][1]["x"] == b[1]["x"] assert t["b"][1]["y"] == b[1]["y"] assert t[1]["b"]["x"] == b[1]["x"] assert t[1]["b"]["y"] == b[1]["y"] assert isinstance(t["c"], class_exp) assert t["c"][1]["rx"] == c[1]["rx"] assert t["c"][1]["ry"] == c[1]["ry"] assert t[1]["c"]["rx"] == c[1]["rx"] assert t[1]["c"]["ry"] == c[1]["ry"] assert isinstance(t["d"], class_exp) assert t["d"][1][0] == d[1][0] assert t["d"][1][1] == d[1][1] assert t[1]["d"][0] == d[1][0] assert t[1]["d"][1] == d[1][1] assert t.pformat(show_dtype=True) == [ " a [f0, f1] b [x, y] c [rx, ry] d ", "(int32, str1) (int32, str2) (float64, str3) int64[2]", "------------- ------------- --------------- --------", " (1, 'a') (10, 'aa') (100., 'raa') 0 .. 1", " (2, 'b') (20, 'bb') (200., 'rbb') 2 .. 3", " (3, 'c') (30, 'cc') (300., 'rcc') 4 .. 5", " (4, 'd') (40, 'dd') (400., 'rdd') 6 .. 7", ]
The QuantityInfo info class for Quantity implements a possible_string_format_functions() method that overrides the standard pprint._possible_string_format_functions() function. Test this.
def test_possible_string_format_functions(): """ The QuantityInfo info class for Quantity implements a possible_string_format_functions() method that overrides the standard pprint._possible_string_format_functions() function. Test this. """ t = QTable([[1, 2] * u.m]) t["col0"].info.format = "%.3f" assert t.pformat() == [ " col0", " m ", "-----", "1.000", "2.000", ] t["col0"].info.format = "hi {:.3f}" assert t.pformat() == [ " col0 ", " m ", "--------", "hi 1.000", "hi 2.000", ] t["col0"].info.format = ".4f" assert t.pformat() == [ " col0 ", " m ", "------", "1.0000", "2.0000", ]
Rename a mixin column.
def test_rename_mixin_columns(mixin_cols): """ Rename a mixin column. """ t = QTable(mixin_cols) tc = t.copy() t.rename_column("m", "mm") assert t.colnames == ["i", "a", "b", "mm"] if isinstance(t["mm"], table_helpers.ArrayWrapper): assert np.all(t["mm"].data == tc["m"].data) elif isinstance(t["mm"], coordinates.SkyCoord): assert np.all(t["mm"].ra == tc["m"].ra) assert np.all(t["mm"].dec == tc["m"].dec) elif isinstance(t["mm"], coordinates.BaseRepresentationOrDifferential): assert np.all(representation_equal(t["mm"], tc["m"])) else: assert np.all(t["mm"] == tc["m"])
If the unit is invalid for a column that gets serialized this would cause an exception. Fixed in #7481.
def test_represent_mixins_as_columns_unit_fix(): """ If the unit is invalid for a column that gets serialized this would cause an exception. Fixed in #7481. """ t = Table({"a": [1, 2]}, masked=True) t["a"].unit = "not a valid unit" t["a"].mask[1] = True serialize.represent_mixins_as_columns(t)
If the mixin defines a primary data column, that should get the description, format, etc., so no __info__ should be needed.
def test_primary_data_column_gets_description(): """ If the mixin defines a primary data column, that should get the description, format, etc., so no __info__ should be needed. """ t = QTable({"a": [1, 2] * u.m}) t["a"].info.description = "parrot" t["a"].info.format = "7.2f" tser = serialize.represent_mixins_as_columns(t) assert "__info__" not in tser.meta["__serialized_columns__"]["a"] assert tser["a"].format == "7.2f" assert tser["a"].description == "parrot"
If a mixin input to a table has no info, it should stay that way. This since having 'info' slows down slicing, etc. See gh-11066.
def test_ensure_input_info_is_unchanged(table_cls, copy): """If a mixin input to a table has no info, it should stay that way. This since having 'info' slows down slicing, etc. See gh-11066. """ q = [1, 2] * u.m assert "info" not in q.__dict__ t = table_cls([q], names=["q"], copy=copy) assert "info" not in q.__dict__ t = table_cls([q], copy=copy) assert "info" not in q.__dict__ t = table_cls({"q": q}, copy=copy) assert "info" not in q.__dict__ t["q2"] = q assert "info" not in q.__dict__ sc = SkyCoord([1, 2], [2, 3], unit="deg") t["sc"] = sc assert "info" not in sc.__dict__
Make a mixin column class that does not trigger the machinery to generate a pure column representation
def test_bad_info_class(): """Make a mixin column class that does not trigger the machinery to generate a pure column representation""" class MyArrayWrapper(ArrayWrapper): info = ParentDtypeInfo() t = Table() t["tm"] = MyArrayWrapper([0, 1, 2]) out = StringIO() match = ( r"failed to represent column 'tm' \(MyArrayWrapper\) as one or more Column" r" subclasses" ) with pytest.raises(TypeError, match=match): represent_mixins_as_columns(t)
Test that allowed combinations are those expected.
def test_common_dtype(): """ Test that allowed combinations are those expected. """ dtype = [ ("int", int), ("uint8", np.uint8), ("float32", np.float32), ("float64", np.float64), ("str", "S2"), ("uni", "U2"), ("bool", bool), ("object", np.object_), ] arr = np.empty(1, dtype=dtype) fail = set() succeed = set() for name1, type1 in dtype: for name2, type2 in dtype: try: np_utils.common_dtype([arr[name1], arr[name2]]) succeed.add(f"{name1} {name2}") except np_utils.TableMergeError: fail.add(f"{name1} {name2}") # known bad combinations bad = { "str int", "str bool", "uint8 bool", "uint8 str", "object float32", "bool object", "uni uint8", "int str", "bool str", "bool float64", "bool uni", "str float32", "uni float64", "uni object", "bool uint8", "object float64", "float32 bool", "str uint8", "uni bool", "float64 bool", "float64 object", "int bool", "uni int", "uint8 object", "int uni", "uint8 uni", "float32 uni", "object uni", "bool float32", "uni float32", "object str", "int object", "str float64", "object int", "float64 uni", "bool int", "object bool", "object uint8", "float32 object", "str object", "float64 str", "float32 str", } assert fail == bad good = { "float64 int", "int int", "uint8 float64", "uint8 int", "str uni", "float32 float32", "float64 float64", "float64 uint8", "float64 float32", "int uint8", "int float32", "uni str", "int float64", "uint8 float32", "float32 int", "float32 uint8", "bool bool", "uint8 uint8", "str str", "float32 float64", "object object", "uni uni", } assert succeed == good
Check that col.mask == exp_mask
def check_mask(col, exp_mask): """Check that col.mask == exp_mask""" if hasattr(col, "mask"): # Coerce expected mask into dtype of col.mask. In particular this is # needed for types like EarthLocation where the mask is a structured # array. exp_mask = np.array(exp_mask).astype(col.mask.dtype) out = np.all(col.mask == exp_mask) else: # With no mask the check is OK if all the expected mask values # are False (i.e. no auto-conversion to MaskedQuantity if it was # not required by the join). out = np.all(exp_mask == False) # noqa: E712 return out
Test the keep_order argument for table.join. See https://github.com/astropy/astropy/issues/11619. This defines a left and right table which have an ``id`` column that is not sorted and not unique. Each table has common and unique ``id`` key values along with an ``order`` column to keep track of the original order.
def test_join_keep_sort_order(join_type): """Test the keep_order argument for table.join. See https://github.com/astropy/astropy/issues/11619. This defines a left and right table which have an ``id`` column that is not sorted and not unique. Each table has common and unique ``id`` key values along with an ``order`` column to keep track of the original order. """ keep_supported = join_type in ["left", "right", "inner"] t1 = Table() t1["id"] = [2, 8, 2, 0, 0, 1] # Join key t1["order"] = np.arange(len(t1)) # Original table order t2 = Table() t2["id"] = [2, 0, 1, 9, 0, 1] # Join key t2["order"] = np.arange(len(t2)) # Original table order # No keys arg is allowed for cartesian join. keys_kwarg = {} if join_type == "cartesian" else {"keys": "id"} # Now do table joints with keep_order=False and keep_order=True. t12f = table.join(t1, t2, join_type=join_type, keep_order=False, **keys_kwarg) # For keep_order=True there should be a warning if keep_order is not supported for # the join type. ctx = ( nullcontext() if keep_supported else pytest.warns( UserWarning, match=r"keep_order=True is only supported for left, right, and inner joins", ) ) with ctx: t12t = table.join(t1, t2, join_type=join_type, keep_order=True, **keys_kwarg) assert len(t12f) == len(t12t) assert t12f.colnames == t12t.colnames # Define expected sorting of join table for keep_order=False. Cartesian joins are # always sorted by the native order of the left table, otherwise the table is sorted # by the sort key ``id``. sort_key_false = "order_1" if join_type == "cartesian" else "id" # For keep_order=True the "order" column is sorted if keep is supported otherwise # the table is sorted as for keep_order=False. if keep_supported: sort_key_true = "order_2" if join_type == "right" else "order_1" else: sort_key_true = sort_key_false assert np.all(t12f[sort_key_false] == sorted(t12f[sort_key_false])) assert np.all(t12t[sort_key_true] == sorted([t12t[sort_key_true]]))
Test that exception in join(..., keep_order=True) leaves table unchanged
def test_join_keep_sort_order_exception(): """Test that exception in join(..., keep_order=True) leaves table unchanged""" t1 = Table([[1, 2]], names=["id"]) t2 = Table([[2, 3]], names=["id"]) with pytest.raises( TableMergeError, match=r"Left table does not have key column 'not-a-key'" ): table.join(t1, t2, keys="not-a-key", join_type="inner", keep_order=True) assert t1.colnames == ["id"] assert t2.colnames == ["id"]
Test for issue #5617 when vstack'ing bytes columns in Py3. This is really an upstream numpy issue numpy/numpy/#8403.
def test_vstack_bytes(operation_table_type): """ Test for issue #5617 when vstack'ing bytes columns in Py3. This is really an upstream numpy issue numpy/numpy/#8403. """ t = operation_table_type([[b"a"]], names=["a"]) assert t["a"].itemsize == 1 t2 = table.vstack([t, t]) assert len(t2) == 2 assert t2["a"].itemsize == 1
Test for problem related to issue #5617 when vstack'ing *unicode* columns. In this case the character size gets multiplied by 4.
def test_vstack_unicode(): """ Test for problem related to issue #5617 when vstack'ing *unicode* columns. In this case the character size gets multiplied by 4. """ t = table.Table([["a"]], names=["a"]) assert t["a"].itemsize == 4 # 4-byte / char for U dtype t2 = table.vstack([t, t]) assert len(t2) == 2 assert t2["a"].itemsize == 4
Test for table join using non-ndarray key columns.
def test_join_mixins_time_quantity(): """ Test for table join using non-ndarray key columns. """ tm1 = Time([2, 1, 2], format="cxcsec") q1 = [2, 1, 1] * u.m idx1 = [1, 2, 3] tm2 = Time([2, 3], format="cxcsec") q2 = [2, 3] * u.m idx2 = [10, 20] t1 = Table([tm1, q1, idx1], names=["tm", "q", "idx"]) t2 = Table([tm2, q2, idx2], names=["tm", "q", "idx"]) # Output: # # <Table length=4> # tm q idx_1 idx_2 # m # object float64 int64 int64 # ------------------ ------- ----- ----- # 0.9999999999969589 1.0 2 -- # 2.00000000000351 1.0 3 -- # 2.00000000000351 2.0 1 10 # 3.000000000000469 3.0 -- 20 t12 = table.join(t1, t2, join_type="outer", keys=["tm", "q"]) # Key cols are lexically sorted assert np.all(t12["tm"] == Time([1, 2, 2, 3], format="cxcsec")) assert np.all(t12["q"] == [1, 1, 2, 3] * u.m) assert np.all(t12["idx_1"] == np.ma.array([2, 3, 1, 0], mask=[0, 0, 0, 1])) assert np.all(t12["idx_2"] == np.ma.array([0, 0, 10, 20], mask=[1, 1, 0, 0]))
Test for table join using non-ndarray key columns that are not sortable.
def test_join_mixins_not_sortable(): """ Test for table join using non-ndarray key columns that are not sortable. """ sc = SkyCoord([1, 2], [3, 4], unit="deg,deg") t1 = Table([sc, [1, 2]], names=["sc", "idx1"]) t2 = Table([sc, [10, 20]], names=["sc", "idx2"]) with pytest.raises(TypeError, match="one or more key columns are not sortable"): table.join(t1, t2, keys="sc")
Regression test for #10823.
def test_argsort_time_column(): """Regression test for #10823.""" times = Time(["2016-01-01", "2018-01-01", "2017-01-01"]) t = Table([times], names=["time"]) i = t.argsort("time") assert np.all(i == times.argsort())
Test fix for #9473 and #6545 - and another regression test for #10823.
def test_sort_indexed_table(): """Test fix for #9473 and #6545 - and another regression test for #10823.""" t = Table([[1, 3, 2], [6, 4, 5]], names=("a", "b")) t.add_index("a") t.sort("a") assert np.all(t["a"] == [1, 2, 3]) assert np.all(t["b"] == [6, 5, 4]) t.sort("b") assert np.all(t["b"] == [4, 5, 6]) assert np.all(t["a"] == [3, 2, 1]) times = ["2016-01-01", "2018-01-01", "2017-01-01"] tm = Time(times) t2 = Table([tm, [3, 2, 1]], names=["time", "flux"]) t2.sort("flux") assert np.all(t2["flux"] == [1, 2, 3]) t2.sort("time") assert np.all(t2["flux"] == [3, 1, 2]) assert np.all(t2["time"] == tm[[0, 2, 1]]) # Using the table as a TimeSeries implicitly sets the index, so # this test is a bit different from the above. from astropy.timeseries import TimeSeries ts = TimeSeries(time=times) ts["flux"] = [3, 2, 1] ts.sort("flux") assert np.all(ts["flux"] == [1, 2, 3]) ts.sort("time") assert np.all(ts["flux"] == [3, 1, 2]) assert np.all(ts["time"] == tm[[0, 2, 1]])
Test that outer join, hstack and vstack fail for a mixin column which does not support masking.
def test_masking_required_exception(): """ Test that outer join, hstack and vstack fail for a mixin column which does not support masking. """ col = table.NdarrayMixin([0, 1, 2, 3]) t1 = table.QTable([[1, 2, 3, 4], col], names=["a", "b"]) t2 = table.QTable([[1, 2], col[:2]], names=["a", "c"]) with pytest.raises(NotImplementedError) as err: table.vstack([t1, t2], join_type="outer") assert "vstack unavailable" in str(err.value) with pytest.raises(NotImplementedError) as err: table.hstack([t1, t2], join_type="outer") assert "hstack requires masking" in str(err.value) with pytest.raises(NotImplementedError) as err: table.join(t1, t2, join_type="outer") assert "join requires masking" in str(err.value)
Regression test for https://github.com/astropy/astropy/issues/4098
def test_pickle_multidimensional_column(protocol): """Regression test for https://github.com/astropy/astropy/issues/4098""" a = np.zeros((3, 2)) c = Column(a, name="a") cs = pickle.dumps(c) cp = pickle.loads(cs) assert np.all(c == cp) assert c.shape == cp.shape assert cp.attrs_equal(c) assert repr(c) == repr(cp)
Ensure that any indices that have been added will survive pickling.
def test_pickle_indexed_table(protocol): """ Ensure that any indices that have been added will survive pickling. """ t = simple_table() t.add_index("a") t.add_index(["a", "b"]) ts = pickle.dumps(t) tp = pickle.loads(ts) assert len(t.indices) == len(tp.indices) for index, indexp in zip(t.indices, tp.indices): assert np.all(index.data.data == indexp.data.data) assert index.data.data.colnames == indexp.data.data.colnames
Test for #148, that np.float32 cannot by itself be formatted as float, but has to be converted to a python float.
def test_pprint_npfloat32(): """ Test for #148, that np.float32 cannot by itself be formatted as float, but has to be converted to a python float. """ dat = np.array([1.0, 2.0], dtype=np.float32) t = Table([dat], names=["a"]) t["a"].format = "5.2f" assert str(t["a"]) == " a \n-----\n 1.00\n 2.00"
Test for #1346 and #4944. Make sure a bytestring (dtype=S<N>) in Python 3 is printed correctly (without the "b" prefix like b'string').
def test_pprint_py3_bytes(): """ Test for #1346 and #4944. Make sure a bytestring (dtype=S<N>) in Python 3 is printed correctly (without the "b" prefix like b'string'). """ val = bytes("val", encoding="utf-8") blah = "bläh".encode() dat = np.array([val, blah], dtype=[("col", "S10")]) t = table.Table(dat) assert t["col"].pformat() == ["col ", "----", " val", "bläh"]
Regression test for #2213, making sure a nameless column can be printed using None as the name.
def test_pprint_nameless_col(): """Regression test for #2213, making sure a nameless column can be printed using None as the name. """ col = table.Column([1.0, 2.0]) assert str(col).startswith("None")
Test HTML printing
def test_html(): """Test HTML printing""" dat = np.array([1.0, 2.0], dtype=np.float32) t = Table([dat], names=["a"]) lines = t.pformat(html=True) assert lines == [ f'<table id="table{id(t)}">', "<thead><tr><th>a</th></tr></thead>", "<tr><td>1.0</td></tr>", "<tr><td>2.0</td></tr>", "</table>", ] lines = t.pformat(html=True, tableclass="table-striped") assert lines == [ f'<table id="table{id(t)}" class="table-striped">', "<thead><tr><th>a</th></tr></thead>", "<tr><td>1.0</td></tr>", "<tr><td>2.0</td></tr>", "</table>", ] lines = t.pformat(html=True, tableclass=["table", "table-striped"]) assert lines == [ f'<table id="table{id(t)}" class="table table-striped">', "<thead><tr><th>a</th></tr></thead>", "<tr><td>1.0</td></tr>", "<tr><td>2.0</td></tr>", "</table>", ]
Test for #5802 (fix for #5800 where format_func key is not unique)
def test_auto_format_func(): """Test for #5802 (fix for #5800 where format_func key is not unique)""" t = Table([[1, 2] * u.m]) t["col0"].format = "%f" t.pformat() # Force caching of format function qt = QTable(t) qt.pformat()
Test printing a bytestring column with a value that fails decoding to utf-8 and gets replaced by U+FFFD. See https://docs.python.org/3/library/codecs.html#codecs.replace_errors
def test_decode_replace(): """ Test printing a bytestring column with a value that fails decoding to utf-8 and gets replaced by U+FFFD. See https://docs.python.org/3/library/codecs.html#codecs.replace_errors """ t = Table([[b"Z\xf0"]]) assert t.pformat() == [ "col0", "----", " Z\ufffd", ]
Newlines and tabs are escaped in table repr
def test_embedded_newline_tab(): """Newlines and tabs are escaped in table repr""" t = Table( rows=[ ["a", "b \n c \t \n d"], ["x", "y\n"], ] ) exp = [ r"col0 col1 ", r"---- --------------", r" a b \n c \t \n d", r" x y\n", ] assert t.pformat_all() == exp
Test of fix for #13836 when a zero-dim column is present
def test_multidims_with_zero_dim(): """Test of fix for #13836 when a zero-dim column is present""" t = Table() t["a"] = ["a", "b"] t["b"] = np.ones(shape=(2, 0, 1), dtype=np.float64) exp = [ " a b ", "str1 float64[0,1]", "---- ------------", " a ", " b ", ] assert t.pformat_all(show_dtype=True) == exp
Numpy < 1.8 has a bug in masked array that prevents access a row if there is a column with object type.
def test_masked_row_with_object_col(): """ Numpy < 1.8 has a bug in masked array that prevents access a row if there is a column with object type. """ t = table.Table([[1]], dtype=["O"], masked=True) t["col0"].mask = False assert t[0]["col0"] == 1 t["col0"].mask = True assert t[0]["col0"] is np.ma.masked
Test getting and setting a row using a tuple or list of column names
def test_row_tuple_column_slice(): """ Test getting and setting a row using a tuple or list of column names """ t = table.QTable( [ [1, 2, 3] * u.m, [10.0, 20.0, 30.0], [100.0, 200.0, 300.0], ["x", "y", "z"], ], names=["a", "b", "c", "d"], ) # Get a row for index=1 r1 = t[1] # Column slice with tuple of col names r1_abc = r1["a", "b", "c"] # Row object for these cols r1_abc_repr = [ "<Row index=1>", " a b c ", " m ", "float64 float64 float64", "------- ------- -------", " 2.0 20.0 200.0", ] assert repr(r1_abc).splitlines() == r1_abc_repr # Column slice with list of col names r1_abc = r1[["a", "b", "c"]] assert repr(r1_abc).splitlines() == r1_abc_repr # Make sure setting on a tuple or slice updates parent table and row r1["c"] = 1000 r1["a", "b"] = 1000 * u.cm, 100.0 assert r1["a"] == 10 * u.m assert r1["b"] == 100 assert t["a"][1] == 10 * u.m assert t["b"][1] == 100.0 assert t["c"][1] == 1000 # Same but using a list of column names instead of tuple r1[["a", "b"]] = 2000 * u.cm, 200.0 assert r1["a"] == 20 * u.m assert r1["b"] == 200 assert t["a"][1] == 20 * u.m assert t["b"][1] == 200.0 # Set column slice of column slice r1_abc["a", "c"] = -1 * u.m, -10 assert t["a"][1] == -1 * u.m assert t["b"][1] == 200.0 assert t["c"][1] == -10.0 # Bad column name with pytest.raises(KeyError) as err: t[1]["a", "not_there"] assert "'not_there'" in str(err.value) # Too many values with pytest.raises(ValueError) as err: t[1]["a", "b"] = 1 * u.m, 2, 3 assert "right hand side must be a sequence" in str(err.value) # Something without a length with pytest.raises(ValueError) as err: t[1]["a", "b"] = 1 assert "right hand side must be a sequence" in str(err.value)
Test that setting a row that fails part way through does not change the table at all.
def test_row_tuple_column_slice_transaction(): """ Test that setting a row that fails part way through does not change the table at all. """ t = table.QTable( [ [10.0, 20.0, 30.0], [1, 2, 3] * u.m, ], names=["a", "b"], ) tc = t.copy() # First one succeeds but second fails. with pytest.raises(ValueError) as err: t[1]["a", "b"] = (-1, -1 * u.s) # Bad unit assert "'s' (time) and 'm' (length) are not convertible" in str(err.value) assert t[1] == tc[1]
Test that accessing a row with an unsigned integer works as with a signed integer. Similarly tests that printing such a row works. This is non-trivial: adding a signed and unsigned 64 bit integer in numpy results in a float, which is an invalid slice index. Regression test for gh-7464.
def test_uint_indexing(): """ Test that accessing a row with an unsigned integer works as with a signed integer. Similarly tests that printing such a row works. This is non-trivial: adding a signed and unsigned 64 bit integer in numpy results in a float, which is an invalid slice index. Regression test for gh-7464. """ t = table.Table([[1.0, 2.0, 3.0]], names="a") assert t["a"][1] == 2.0 assert t["a"][np.int64(1)] == 2.0 assert t["a"][np.uint64(1)] == 2.0 assert t[np.uint64(1)]["a"] == 2.0 trepr = [ "<Row index=1>", " a ", "float64", "-------", " 2.0", ] assert repr(t[1]).splitlines() == trepr assert repr(t[np.int64(1)]).splitlines() == trepr assert repr(t[np.uint64(1)]).splitlines() == trepr
Pytest fixture to run a test case with tilde-prefixed paths. In the tilde-path case, environment variables are temporarily modified so that '~' resolves to the temp directory.
def home_is_tmpdir(monkeypatch, tmp_path): """ Pytest fixture to run a test case with tilde-prefixed paths. In the tilde-path case, environment variables are temporarily modified so that '~' resolves to the temp directory. """ # For Unix monkeypatch.setenv("HOME", str(tmp_path)) # For Windows monkeypatch.setenv("USERPROFILE", str(tmp_path))
Regression test for #828 - disallow comparison operators on whole Table
def test_disallow_inequality_comparisons(): """ Regression test for #828 - disallow comparison operators on whole Table """ t = table.Table() with pytest.raises(TypeError): t > 2 # noqa: B015 with pytest.raises(TypeError): t < 1.1 # noqa: B015 with pytest.raises(TypeError): t >= 5.5 # noqa: B015 with pytest.raises(TypeError): t <= -1.1
This highlights a Numpy bug. Once it works, it can be moved into the test_equality_masked test. Related Numpy bug report: https://github.com/numpy/numpy/issues/3840
def test_equality_masked_bug(): """ This highlights a Numpy bug. Once it works, it can be moved into the test_equality_masked test. Related Numpy bug report: https://github.com/numpy/numpy/issues/3840 """ t = table.Table.read( [ " a b c d", " 2 c 7.0 0", " 2 b 5.0 1", " 2 b 6.0 2", " 2 a 4.0 3", " 0 a 0.0 4", " 1 b 3.0 5", " 1 a 2.0 6", " 1 a 1.0 7", ], format="ascii", ) t = table.Table(t, masked=True) t2 = table.Table.read( [ " a b c d", " 2 c 7.0 0", " 2 b 5.0 1", " 3 b 6.0 2", " 2 a 4.0 3", " 0 a 1.0 4", " 1 b 3.0 5", " 1 c 2.0 6", " 1 a 1.0 7", ], format="ascii", ) assert np.all( (t.as_array() == t2) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool) )
Test converting columns to all unicode or all bytestring. This makes two columns, one which is unicode (str in Py3) and one which is bytes (UTF-8 encoded). There are two code paths in the conversions, a faster one where the data are actually ASCII and a slower one where UTF-8 conversion is required. This tests both via the ``uni`` param.
def test_unicode_bytestring_conversion(table_types, uni): """ Test converting columns to all unicode or all bytestring. This makes two columns, one which is unicode (str in Py3) and one which is bytes (UTF-8 encoded). There are two code paths in the conversions, a faster one where the data are actually ASCII and a slower one where UTF-8 conversion is required. This tests both via the ``uni`` param. """ byt = uni.encode("utf-8") t = table_types.Table([[byt], [uni], [1]], dtype=("S", "U", "i")) assert t["col0"].dtype.kind == "S" assert t["col1"].dtype.kind == "U" assert t["col2"].dtype.kind == "i" t["col0"].description = "col0" t["col1"].description = "col1" t["col0"].meta["val"] = "val0" t["col1"].meta["val"] = "val1" # Unicode to bytestring t1 = t.copy() t1.convert_unicode_to_bytestring() assert t1["col0"].dtype.kind == "S" assert t1["col1"].dtype.kind == "S" assert t1["col2"].dtype.kind == "i" # Meta made it through assert t1["col0"].description == "col0" assert t1["col1"].description == "col1" assert t1["col0"].meta["val"] == "val0" assert t1["col1"].meta["val"] == "val1" # Need to de-fang the automatic unicode sandwiching of Table assert np.array(t1["col0"])[0] == byt assert np.array(t1["col1"])[0] == byt assert np.array(t1["col2"])[0] == 1 # Bytestring to unicode t1 = t.copy() t1.convert_bytestring_to_unicode() assert t1["col0"].dtype.kind == "U" assert t1["col1"].dtype.kind == "U" assert t1["col2"].dtype.kind == "i" # Meta made it through assert t1["col0"].description == "col0" assert t1["col1"].description == "col1" assert t1["col0"].meta["val"] == "val0" assert t1["col1"].meta["val"] == "val1" # No need to de-fang the automatic unicode sandwiching of Table here, but # do just for consistency to prove things are working. assert np.array(t1["col0"])[0] == uni assert np.array(t1["col1"])[0] == uni assert np.array(t1["col2"])[0] == 1
Regression test for the reference cycle discussed in https://github.com/astropy/astropy/issues/2877
def test_table_deletion(): """ Regression test for the reference cycle discussed in https://github.com/astropy/astropy/issues/2877 """ deleted = set() # A special table subclass which leaves a record when it is finalized class TestTable(table.Table): def __del__(self): deleted.add(id(self)) t = TestTable({"a": [1, 2, 3]}) the_id = id(t) assert t["a"].parent_table is t del t # Cleanup gc.collect() assert the_id in deleted
Regression test for issue 3358 where nested iteration over a single table fails.
def test_nested_iteration(): """ Regression test for issue 3358 where nested iteration over a single table fails. """ t = table.Table([[0, 1]], names=["a"]) out = [] for r1 in t: for r2 in t: out.append((r1["a"], r2["a"])) assert out == [(0, 0), (0, 1), (1, 0), (1, 1)]
Test no copy vs light (key) copy vs deep copy of table meta for different situations. #8404.
def test_table_meta_copy(): """ Test no copy vs light (key) copy vs deep copy of table meta for different situations. #8404. """ t = table.Table([[1]]) meta = {1: [1, 2]} # Assigning meta directly implies using direct object reference t.meta = meta assert t.meta is meta # Table slice implies key copy, so values are unchanged t2 = t[:] assert t2.meta is not t.meta # NOT the same OrderedDict object but equal assert t2.meta == t.meta assert t2.meta[1] is t.meta[1] # Value IS the list same object # Table init with copy=False implies key copy t2 = table.Table(t, copy=False) assert t2.meta is not t.meta # NOT the same OrderedDict object but equal assert t2.meta == t.meta assert t2.meta[1] is t.meta[1] # Value IS the same list object # Table init with copy=True implies deep copy t2 = table.Table(t, copy=True) assert t2.meta is not t.meta # NOT the same OrderedDict object but equal assert t2.meta == t.meta assert t2.meta[1] is not t.meta[1]
Test no copy vs light (key) copy vs deep copy of table meta when meta is supplied as a table init argument. #8404.
def test_table_meta_copy_with_meta_arg(): """ Test no copy vs light (key) copy vs deep copy of table meta when meta is supplied as a table init argument. #8404. """ meta = {1: [1, 2]} meta2 = {2: [3, 4]} t = table.Table([[1]], meta=meta, copy=False) assert t.meta is meta t = table.Table([[1]], meta=meta) # default copy=True assert t.meta is not meta assert t.meta == meta # Test initializing from existing table with meta with copy=False t2 = table.Table(t, meta=meta2, copy=False) assert t2.meta is meta2 assert t2.meta != t.meta # Change behavior in #8404 # Test initializing from existing table with meta with default copy=True t2 = table.Table(t, meta=meta2) assert t2.meta is not meta2 assert t2.meta != t.meta # Change behavior in #8404 # Table init with copy=True and empty dict meta gets that empty dict t2 = table.Table(t, copy=True, meta={}) assert t2.meta == {} # Table init with copy=True and kwarg meta=None gets the original table dict. # This is a somewhat ambiguous case because it could be interpreted as the # user wanting NO meta set on the output. This could be implemented by inspecting # call args. t2 = table.Table(t, copy=True, meta=None) assert t2.meta == t.meta # Test initializing empty table with meta with copy=False t = table.Table(meta=meta, copy=False) assert t.meta is meta assert t.meta[1] is meta[1] # Test initializing empty table with meta with default copy=True (deepcopy meta) t = table.Table(meta=meta) assert t.meta is not meta assert t.meta == meta assert t.meta[1] is not meta[1]
Replace existing Quantity column with a new column in a QTable
def test_replace_column_qtable(): """Replace existing Quantity column with a new column in a QTable""" a = [1, 2, 3] * u.m b = [4, 5, 6] t = table.QTable([a, b], names=["a", "b"]) ta = t["a"] tb = t["b"] ta.info.meta = {"aa": [0, 1, 2, 3, 4]} ta.info.format = "%f" t.replace_column("a", a.to("cm")) assert np.all(t["a"] == ta) assert t["a"] is not ta # New a column assert t["b"] is tb # Original b column unchanged assert t.colnames == ["a", "b"] assert t["a"].info.meta is None assert t["a"].info.format is None
Test table update like ``t['a'] = value``. This leverages off the already well-tested ``replace_column`` and in-place update ``t['a'][:] = value``, so this testing is fairly light.
def test_replace_update_column_via_setitem(): """ Test table update like ``t['a'] = value``. This leverages off the already well-tested ``replace_column`` and in-place update ``t['a'][:] = value``, so this testing is fairly light. """ a = [1, 2] * u.m b = [3, 4] t = table.QTable([a, b], names=["a", "b"]) assert isinstance(t["a"], u.Quantity) # Inplace update ta = t["a"] t["a"] = 5 * u.m assert np.all(t["a"] == [5, 5] * u.m) assert t["a"] is ta # Replace t["a"] = [5, 6] assert np.all(t["a"] == [5, 6]) assert isinstance(t["a"], table.Column) assert t["a"] is not ta
Test warnings related to table replace change in #5556: Normal warning-free replace
def test_replace_update_column_via_setitem_warnings_normal(): """ Test warnings related to table replace change in #5556: Normal warning-free replace """ t = table.Table([[1, 2, 3], [4, 5, 6]], names=["a", "b"]) with table.conf.set_temp("replace_warnings", ["refcount", "attributes", "slice"]): t["a"] = 0 # in-place update t["a"] = [10, 20, 30]
Test warnings related to table replace change in #5556: Replace a slice, one warning.
def test_replace_update_column_via_setitem_warnings_slice(): """ Test warnings related to table replace change in #5556: Replace a slice, one warning. """ t = table.Table([[1, 2, 3], [4, 5, 6]], names=["a", "b"]) with table.conf.set_temp("replace_warnings", ["refcount", "attributes", "slice"]): t2 = t[:2] t2["a"] = 0 # in-place slice update assert np.all(t["a"] == [0, 0, 3]) with pytest.warns( TableReplaceWarning, match="replaced column 'a' which looks like an array slice", ) as w: t2["a"] = [10, 20] # replace slice assert len(w) == 1
Test warnings related to table replace change in #5556: Lost attributes.
def test_replace_update_column_via_setitem_warnings_attributes(): """ Test warnings related to table replace change in #5556: Lost attributes. """ t = table.Table([[1, 2, 3], [4, 5, 6]], names=["a", "b"]) t["a"].unit = "m" with pytest.warns( TableReplaceWarning, match=r"replaced column 'a' and column attributes \['unit'\]", ) as w: with table.conf.set_temp( "replace_warnings", ["refcount", "attributes", "slice"] ): t["a"] = [10, 20, 30] assert len(w) == 1
Test warnings related to table replace change in #5556: Reference count changes.
def test_replace_update_column_via_setitem_warnings_refcount(): """ Test warnings related to table replace change in #5556: Reference count changes. """ t = table.Table([[1, 2, 3], [4, 5, 6]], names=["a", "b"]) ta = t["a"] # Generate an extra reference to original column with pytest.warns( TableReplaceWarning, match="replaced column 'a' and the number of references" ) as w: with table.conf.set_temp( "replace_warnings", ["refcount", "attributes", "slice"] ): t["a"] = [10, 20, 30] assert len(w) == 1
Test warnings related to table replace change in #5556: Test 'always' setting that raises warning for any replace.
def test_replace_update_column_via_setitem_warnings_always(): """ Test warnings related to table replace change in #5556: Test 'always' setting that raises warning for any replace. """ from inspect import currentframe, getframeinfo t = table.Table([[1, 2, 3], [4, 5, 6]], names=["a", "b"]) with table.conf.set_temp("replace_warnings", ["always"]): t["a"] = 0 # in-place slice update with pytest.warns(TableReplaceWarning, match="replaced column 'a'") as w: frameinfo = getframeinfo(currentframe()) t["a"] = [10, 20, 30] # replace column assert len(w) == 1 # Make sure the warning points back to the user code line assert w[0].lineno == frameinfo.lineno + 1 assert "test_table" in w[0].filename
Test the replace_inplace config option related to #5556. In this case no replace is done.
def test_replace_update_column_via_setitem_replace_inplace(): """ Test the replace_inplace config option related to #5556. In this case no replace is done. """ t = table.Table([[1, 2, 3], [4, 5, 6]], names=["a", "b"]) ta = t["a"] t["a"].unit = "m" with table.conf.set_temp("replace_inplace", True): with table.conf.set_temp( "replace_warnings", ["always", "refcount", "attributes", "slice"] ): t["a"] = 0 # in-place update assert ta is t["a"] t["a"] = [10, 20, 30] # normally replaces column, but not now assert ta is t["a"] assert np.all(t["a"] == [10, 20, 30])
Test whether a new Table inherits the primary_key attribute from its parent Table. Issue #4672
def test_primary_key_is_inherited(): """Test whether a new Table inherits the primary_key attribute from its parent Table. Issue #4672""" t = table.Table([(2, 3, 2, 1), (8, 7, 6, 5)], names=("a", "b")) t.add_index("a") original_key = t.primary_key # can't test if tuples are equal, so just check content assert original_key[0] == "a" t2 = t[:] t3 = t.copy() t4 = table.Table(t) # test whether the reference is the same in the following assert original_key == t2.primary_key assert original_key == t3.primary_key assert original_key == t4.primary_key # just test one element, assume rest are equal if assert passes assert t.loc[1] == t2.loc[1] assert t.loc[1] == t3.loc[1] assert t.loc[1] == t4.loc[1]
Test that a char column of a QTable is assigned no unit and not a dimensionless unit, otherwise conversion of reader output to QTable fails.
def test_qtable_read_for_ipac_table_with_char_columns(): """Test that a char column of a QTable is assigned no unit and not a dimensionless unit, otherwise conversion of reader output to QTable fails.""" t1 = table.QTable([["A"]], names="B") out = StringIO() t1.write(out, format="ascii.ipac") t2 = table.QTable.read(out.getvalue(), format="ascii.ipac", guess=False) assert t2["B"].unit is None
Regression test for issue #8422: passing the last row of a table into Table should return a new table containing that row.
def test_create_table_from_final_row(): """Regression test for issue #8422: passing the last row of a table into Table should return a new table containing that row.""" t1 = table.Table([(1, 2)], names=["col"]) row = t1[-1] t2 = table.Table(row)["col"] assert t2[0] == 2
Test setting units and descriptions via Table.read. The test here is less comprehensive because the implementation is exactly the same as for Table.__init__ (calling Table._set_column_attribute)
def test_set_units_descriptions_read(): """Test setting units and descriptions via Table.read. The test here is less comprehensive because the implementation is exactly the same as for Table.__init__ (calling Table._set_column_attribute)""" for cls in Table, QTable: t = cls.read( ["a b", "1 2"], format="ascii", units=[u.m, u.s], descriptions=["hi", "there"], ) assert t["a"].info.unit is u.m assert t["b"].info.unit is u.s assert t["a"].info.description == "hi" assert t["b"].info.description == "there"
Explicitly check re-work of code related to broadcasting in #8933
def test_broadcasting_8933(): """Explicitly check re-work of code related to broadcasting in #8933""" t = table.Table([[1, 2]]) # Length=2 table t["a"] = [[3, 4]] # Can broadcast if ndim > 1 and shape[0] == 1 t["b"] = 5 t["c"] = [1] # Treat as broadcastable scalar, not length=1 array (which would fail) assert np.all(t["a"] == [[3, 4], [3, 4]]) assert np.all(t["b"] == [5, 5]) assert np.all(t["c"] == [1, 1]) # Test that broadcasted column is writeable t["c"][1] = 10 assert np.all(t["c"] == [1, 10])
Test the refactor and change in column upgrades introduced in 95902650f. This fixes a regression introduced by #8789 (Change behavior of Table regarding masked columns).
def test_custom_masked_column_in_nonmasked_table(): """Test the refactor and change in column upgrades introduced in 95902650f. This fixes a regression introduced by #8789 (Change behavior of Table regarding masked columns).""" class MyMaskedColumn(table.MaskedColumn): pass class MySubMaskedColumn(MyMaskedColumn): pass class MyColumn(table.Column): pass class MySubColumn(MyColumn): pass class MyTable(table.Table): Column = MyColumn MaskedColumn = MyMaskedColumn a = table.Column([1]) b = table.MaskedColumn([2], mask=[True]) c = MyMaskedColumn([3], mask=[True]) d = MySubColumn([4]) e = MySubMaskedColumn([5], mask=[True]) # Two different pathways for making table t1 = MyTable([a, b, c, d, e], names=["a", "b", "c", "d", "e"]) t2 = MyTable() t2["a"] = a t2["b"] = b t2["c"] = c t2["d"] = d t2["e"] = e for t in (t1, t2): assert type(t["a"]) is MyColumn assert type(t["b"]) is MyMaskedColumn # upgrade assert type(t["c"]) is MyMaskedColumn assert type(t["d"]) is MySubColumn assert type(t["e"]) is MySubMaskedColumn