response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Test sorting a table that has a mutable column such as SkyCoord. In this case the sort is done in-place
def test_sort_with_mutable_skycoord(): """Test sorting a table that has a mutable column such as SkyCoord. In this case the sort is done in-place """ t = Table([[2, 1], SkyCoord([4, 3], [6, 5], unit="deg,deg")], names=["a", "sc"]) meta = {"a": [1, 2]} ta = t["a"] tsc = t["sc"] t["sc"].info.meta = meta t.sort("a") assert np.all(t["a"] == [1, 2]) assert np.allclose(t["sc"].ra.to_value(u.deg), [3, 4]) assert np.allclose(t["sc"].dec.to_value(u.deg), [5, 6]) assert t["a"] is ta assert t["sc"] is tsc # Prior to astropy 4.1 this was a deep copy of SkyCoord column; after 4.1 # it is a reference. t["sc"].info.meta["a"][0] = 100 assert meta["a"][0] == 100
Test sorting a table that has a non-mutable column.
def test_sort_with_non_mutable(): """Test sorting a table that has a non-mutable column.""" t = Table([[2, 1], [3, 4]], names=["a", "b"]) ta = t["a"] tb = t["b"] t["b"].setflags(write=False) meta = {"a": [1, 2]} t["b"].info.meta = meta t.sort("a") assert np.all(t["a"] == [1, 2]) assert np.all(t["b"] == [4, 3]) assert ta is t["a"] assert tb is not t["b"] # Prior to astropy 4.1 this was a deep copy of SkyCoord column; after 4.1 # it is a reference. t["b"].info.meta["a"][0] = 100 assert meta["a"][0] == 1
Test the fix for #8977
def test_init_with_list_of_masked_arrays(): """Test the fix for #8977""" m0 = np.ma.array([0, 1, 2], mask=[True, False, True]) m1 = np.ma.array([3, 4, 5], mask=[False, True, False]) mc = [m0, m1] # Test _init_from_list t = table.Table([mc], names=["a"]) # Test add_column t["b"] = [m1, m0] assert t["a"].shape == (2, 3) assert np.all(t["a"][0] == m0) assert np.all(t["a"][1] == m1) assert np.all(t["a"][0].mask == m0.mask) assert np.all(t["a"][1].mask == m1.mask) assert t["b"].shape == (2, 3) assert np.all(t["b"][0] == m1) assert np.all(t["b"][1] == m0) assert np.all(t["b"][0].mask == m1.mask) assert np.all(t["b"][1].mask == m0.mask)
Test the update to how data_to_col works (#8972), using the regression example from #8971.
def test_data_to_col_convert_strategy(): """Test the update to how data_to_col works (#8972), using the regression example from #8971. """ t = table.Table([[0, 1]]) t["a"] = 1 t["b"] = np.int64(2) # Failed previously assert np.all(t["a"] == [1, 1]) assert np.all(t["b"] == [2, 2])
Test that adding a masked ndarray with a structured dtype works
def test_structured_masked_column(): """Test that adding a masked ndarray with a structured dtype works""" dtype = np.dtype([("z", "f8"), ("x", "f8"), ("y", "i4")]) t = Table() t["a"] = np.ma.array( [ (1, 2, 3), (4, 5, 6), ], mask=[ (False, False, True), (False, True, False), ], dtype=dtype, ) assert np.all(t["a"]["z"].mask == [False, False]) assert np.all(t["a"]["x"].mask == [False, True]) assert np.all(t["a"]["y"].mask == [True, False]) assert isinstance(t["a"], MaskedColumn)
Test for #9165 to allow adding a list of mixin objects. Also test for fix to #9357 where group_by() failed due to mixin object not having info.indices set to [].
def test_rows_with_mixins(): """Test for #9165 to allow adding a list of mixin objects. Also test for fix to #9357 where group_by() failed due to mixin object not having info.indices set to []. """ tm = Time([1, 2], format="cxcsec") q = [1, 2] * u.m mixed1 = [1 * u.m, 2] # Mixed input, fails to convert to Quantity mixed2 = [2, 1 * u.m] # Mixed input, not detected as potential mixin rows = [ (1, q[0], tm[0]), (2, q[1], tm[1]), ] t = table.QTable(rows=rows) t["a"] = [q[0], q[1]] t["b"] = [tm[0], tm[1]] t["m1"] = mixed1 t["m2"] = mixed2 assert np.all(t["col1"] == q) assert np.all(t["col2"] == tm) assert np.all(t["a"] == q) assert np.all(t["b"] == tm) assert np.all(t["m1"][ii] == mixed1[ii] for ii in range(2)) assert np.all(t["m2"][ii] == mixed2[ii] for ii in range(2)) assert type(t["m1"]) is table.Column assert t["m1"].dtype is np.dtype(object) assert type(t["m2"]) is table.Column assert t["m2"].dtype is np.dtype(object) # Ensure group_by() runs without failing for sortable columns. # The columns 'm1', and 'm2' are object dtype and not sortable. for name in ["col0", "col1", "col2", "a", "b"]: t.group_by(name) # For good measure include exactly the failure in #9357 in which the # list of Time() objects is in the Table initializer. mjds = [Time(58000, format="mjd")] t = Table([mjds, ["gbt"]], names=("mjd", "obs")) t.group_by("obs")
Ensures that upon writing a table, the fill_value attribute of a masked (integer) column is correctly propagated into the TNULL parameter in the FITS header
def test_table_write_preserves_nulls(tmp_path): """Ensures that upon writing a table, the fill_value attribute of a masked (integer) column is correctly propagated into the TNULL parameter in the FITS header""" # Could be anything except for 999999, which is the "default" fill_value # for masked int arrays NULL_VALUE = -1 # Create table with an integer MaskedColumn with custom fill_value c1 = MaskedColumn( name="a", data=np.asarray([1, 2, 3], dtype=np.int32), mask=[True, False, True], fill_value=NULL_VALUE, ) t = Table([c1]) table_filename = tmp_path / "nultable.fits" # Write the table out with Table.write() t.write(table_filename) # Open the output file, and check the TNULL parameter is NULL_VALUE with fits.open(table_filename) as hdul: header = hdul[1].header assert header["TNULL1"] == NULL_VALUE
Ensures that Table.as_array propagates a MaskedColumn's fill_value to the output array
def test_as_array_preserve_fill_value(): """Ensures that Table.as_array propagates a MaskedColumn's fill_value to the output array""" INT_FILL = 123 FLOAT_FILL = 123.0 STR_FILL = "xyz" CMPLX_FILL = complex(3.14, 2.71) # set up a table with some columns with different data types c_int = MaskedColumn(name="int", data=[1, 2, 3], fill_value=INT_FILL) c_float = MaskedColumn(name="float", data=[1.0, 2.0, 3.0], fill_value=FLOAT_FILL) c_str = MaskedColumn(name="str", data=["abc", "def", "ghi"], fill_value=STR_FILL) c_cmplx = MaskedColumn( name="cmplx", data=[complex(1, 0), complex(0, 1), complex(1, 1)], fill_value=CMPLX_FILL, ) t = Table([c_int, c_float, c_str, c_cmplx]) tn = t.as_array() assert tn["int"].fill_value == INT_FILL assert tn["float"].fill_value == FLOAT_FILL assert tn["str"].fill_value == STR_FILL assert tn["cmplx"].fill_value == CMPLX_FILL
Regression test for astropy issues #15911 and #5973
def test_table_hasattr_iloc(): """Regression test for astropy issues #15911 and #5973""" t = Table({"a": [1, 2, 3]}) assert hasattr(t, "iloc") assert hasattr(t, "loc") with pytest.raises(ValueError, match="for a table with indices"): t.iloc[0] with pytest.raises(ValueError, match="for a table with indices"): t.loc[0]
A context manager to temporarily disable stdout. Used later when installing a temporary copy of astropy to avoid a very verbose output.
def _suppress_stdout(): """ A context manager to temporarily disable stdout. Used later when installing a temporary copy of astropy to avoid a very verbose output. """ with open(os.devnull, "w") as devnull: old_stdout = sys.stdout sys.stdout = devnull try: yield finally: sys.stdout = old_stdout
This method is called after the tests have been run in coverage mode to cleanup and then save the coverage data and report.
def _save_coverage(cov, result, rootdir, testing_path): """ This method is called after the tests have been run in coverage mode to cleanup and then save the coverage data and report. """ from astropy.utils.console import color_print if result != 0: return # The coverage report includes the full path to the temporary # directory, so we replace all the paths with the true source # path. Note that this will not work properly for packages that still # rely on 2to3. try: # Coverage 4.0: _harvest_data has been renamed to get_data, the # lines dict is private cov.get_data() except AttributeError: # Coverage < 4.0 cov._harvest_data() lines = cov.data.lines else: lines = cov.data._lines for key in list(lines.keys()): new_path = os.path.relpath( os.path.realpath(key), os.path.realpath(testing_path) ) new_path = os.path.abspath(os.path.join(rootdir, new_path)) lines[new_path] = lines.pop(key) color_print("Saving coverage data in .coverage...", "green") cov.save() color_print("Saving HTML coverage report in htmlcov...", "green") cov.html_report(directory=os.path.join(rootdir, "htmlcov"))
Test that an object follows our Unicode policy. See "Unicode guidelines" in the coding guidelines. Parameters ---------- x : object The instance to test roundtrip : module, optional When provided, this namespace will be used to evaluate ``repr(x)`` and ensure that it roundtrips. It will also ensure that ``__bytes__(x)`` roundtrip. If not provided, no roundtrip testing will be performed.
def assert_follows_unicode_guidelines(x, roundtrip=None): """ Test that an object follows our Unicode policy. See "Unicode guidelines" in the coding guidelines. Parameters ---------- x : object The instance to test roundtrip : module, optional When provided, this namespace will be used to evaluate ``repr(x)`` and ensure that it roundtrips. It will also ensure that ``__bytes__(x)`` roundtrip. If not provided, no roundtrip testing will be performed. """ from astropy import conf with conf.set_temp("unicode_output", False): bytes_x = bytes(x) unicode_x = str(x) repr_x = repr(x) assert isinstance(bytes_x, bytes) bytes_x.decode("ascii") assert isinstance(unicode_x, str) unicode_x.encode("ascii") assert isinstance(repr_x, str) if isinstance(repr_x, bytes): repr_x.decode("ascii") else: repr_x.encode("ascii") if roundtrip is not None: assert x.__class__(bytes_x) == x assert x.__class__(unicode_x) == x assert eval(repr_x, roundtrip) == x with conf.set_temp("unicode_output", True): bytes_x = bytes(x) unicode_x = str(x) repr_x = repr(x) assert isinstance(bytes_x, bytes) bytes_x.decode("ascii") assert isinstance(unicode_x, str) assert isinstance(repr_x, str) if isinstance(repr_x, bytes): repr_x.decode("ascii") else: repr_x.encode("ascii") if roundtrip is not None: assert x.__class__(bytes_x) == x assert x.__class__(unicode_x) == x assert eval(repr_x, roundtrip) == x
Fixture to run all the tests for protocols 0 and 1, and -1 (most advanced). (Originally from astropy.table.tests.test_pickle).
def pickle_protocol(request): """ Fixture to run all the tests for protocols 0 and 1, and -1 (most advanced). (Originally from astropy.table.tests.test_pickle). """ return request.param
Check if the attributes of a and b are equal. Then, check if the attributes of the attributes are equal.
def generic_recursive_equality_test(a, b, class_history): """ Check if the attributes of a and b are equal. Then, check if the attributes of the attributes are equal. """ if sys.version_info < (3, 11): dict_a = a.__getstate__() if hasattr(a, "__getstate__") else a.__dict__ else: # NOTE: The call may need to be adapted if other objects implementing a __getstate__ # with required argument(s) are passed to this function. # For a class with `__slots__` the default state is not a `dict`; # with neither `__dict__` nor `__slots__` it is `None`. state = a.__getstate__(a) if isinstance(a, type) else a.__getstate__() dict_a = state if isinstance(state, dict) else getattr(a, "__dict__", dict()) dict_b = b.__dict__ for key in dict_a: assert key in dict_b, f"Did not pickle {key}" if dict_a[key].__class__.__eq__ is not object.__eq__: # Only compare if the class defines a proper equality test. # E.g., info does not define __eq__, and hence defers to # object.__eq__, which is equivalent to checking that two # instances are the same. This will generally not be true # after pickling. eq = dict_a[key] == dict_b[key] if "__iter__" in dir(eq): eq = False not in eq assert eq, f"Value of {key} changed by pickling" if hasattr(dict_a[key], "__dict__"): if dict_a[key].__class__ in class_history: # attempt to prevent infinite recursion pass else: new_class_history = [dict_a[key].__class__] new_class_history.extend(class_history) generic_recursive_equality_test( dict_a[key], dict_b[key], new_class_history )
Try to pickle an object. If successful, make sure the object's attributes survived pickling and unpickling.
def check_pickling_recovery(original, protocol): """ Try to pickle an object. If successful, make sure the object's attributes survived pickling and unpickling. """ f = pickle.dumps(original, protocol=protocol) unpickled = pickle.loads(f) class_history = [original.__class__] generic_recursive_equality_test(original, unpickled, class_history)
Raise an assertion if two objects are not equal up to desired tolerance. This is a :class:`~astropy.units.Quantity`-aware version of :func:`numpy.testing.assert_allclose`.
def assert_quantity_allclose(actual, desired, rtol=1.0e-7, atol=None, **kwargs): """ Raise an assertion if two objects are not equal up to desired tolerance. This is a :class:`~astropy.units.Quantity`-aware version of :func:`numpy.testing.assert_allclose`. """ import numpy as np from astropy.units.quantity import _unquantify_allclose_arguments np.testing.assert_allclose( *_unquantify_allclose_arguments(actual, desired, rtol, atol), **kwargs )
Regression test for https://github.com/astropy/astropy/issues/2671 This test actually puts a goofy fake module into ``sys.modules`` to test this problem.
def test_import_error_in_warning_logging(): """ Regression test for https://github.com/astropy/astropy/issues/2671 This test actually puts a goofy fake module into ``sys.modules`` to test this problem. """ class FakeModule: def __getattr__(self, attr): raise ImportError("_showwarning should ignore any exceptions here") log.enable_warnings_logging() sys.modules["<test fake module>"] = FakeModule() try: warnings.showwarning( AstropyWarning("Regression test for #2671"), AstropyWarning, "<this is only a test>", 1, ) finally: del sys.modules["<test fake module>"]
Regression test for a crash that occurred on Python 3 when logging an exception that was instantiated with no arguments (no message, etc.) Regression test for https://github.com/astropy/astropy/pull/4056
def test_exception_logging_argless_exception(): """ Regression test for a crash that occurred on Python 3 when logging an exception that was instantiated with no arguments (no message, etc.) Regression test for https://github.com/astropy/astropy/pull/4056 """ try: log.enable_exception_logging() with log.log_to_list() as log_list: raise Exception() except Exception: sys.excepthook(*sys.exc_info()) else: raise AssertionError() # exception should have been raised assert len(log_list) == 1 assert log_list[0].levelname == "ERROR" assert log_list[0].message == "Exception [astropy.tests.test_logger]" assert log_list[0].origin == "astropy.tests.test_logger"
A decorator that defines a figure test. This automatically decorates tests with mpl_image_compare with common options used by all figure tests in astropy, and also adds the decorator to allow remote data to be accessed.
def figure_test(*args, **kwargs): """ A decorator that defines a figure test. This automatically decorates tests with mpl_image_compare with common options used by all figure tests in astropy, and also adds the decorator to allow remote data to be accessed. """ # NOTE: the savefig_kwargs option below is to avoid using PNG files with # the matplotlib version embedded since this changes for every developer # version. tolerance = kwargs.pop("tolerance", 0) style = kwargs.pop("style", {}) savefig_kwargs = kwargs.pop("savefig_kwargs", {}) savefig_kwargs["metadata"] = {"Software": None} def decorator(test_function): @pytest.mark.remote_data @pytest.mark.mpl_image_compare( tolerance=tolerance, style=style, savefig_kwargs=savefig_kwargs, **kwargs ) @pytest.mark.skipif( not HAS_PYTEST_MPL, reason="pytest-mpl is required for the figure tests" ) @wraps(test_function) def test_wrapper(*args, **kwargs): return test_function(*args, **kwargs) return test_wrapper # If the decorator was used without any arguments, the only positional # argument will be the test to decorate so we do the following: if len(args) == 1: return decorator(*args) return decorator
This just imports all modules in astropy, making sure they don't have any dependencies that sneak through
def test_imports(): """ This just imports all modules in astropy, making sure they don't have any dependencies that sneak through """ def onerror(name): # We should raise any legitimate error that occurred, but not # any warnings which happen to be caught because of our pytest # settings (e.g., DeprecationWarning). try: raise except Warning: pass for imper, nm, ispkg in pkgutil.walk_packages( ["astropy"], "astropy.", onerror=onerror ): imper.find_spec(nm)
Compress array by allowing at most 2 * edgeitems + 1 in each dimension. Parameters ---------- arr : array-like Array to compress. Returns ------- out : array-like Compressed array.
def _compress_array_dims(arr): """Compress array by allowing at most 2 * edgeitems + 1 in each dimension. Parameters ---------- arr : array-like Array to compress. Returns ------- out : array-like Compressed array. """ idxs = [] edgeitems = np.get_printoptions()["edgeitems"] # Build up a list of index arrays for each dimension, allowing no more than # 2 * edgeitems + 1 elements in each dimension. for dim in range(arr.ndim): if arr.shape[dim] > 2 * edgeitems: # The middle [edgeitems] value does not matter as it gets replaced # by ... in the output. idxs.append( np.concatenate( [np.arange(edgeitems), [edgeitems], np.arange(-edgeitems, 0)] ) ) else: idxs.append(np.arange(arr.shape[dim])) # Use the magic np.ix_ function to effectively treat each index array as a # slicing operator. idxs_ix = np.ix_(*idxs) out = arr[idxs_ix] return out
Take ``val`` and convert/reshape to an array. If ``copy`` is `True` then copy input values. Returns ------- val : ndarray Array version of ``val``.
def _make_array(val, copy=COPY_IF_NEEDED): """ Take ``val`` and convert/reshape to an array. If ``copy`` is `True` then copy input values. Returns ------- val : ndarray Array version of ``val``. """ if isinstance(val, (tuple, list)) and len(val) > 0 and isinstance(val[0], Time): dtype = object else: dtype = None val = np.array(val, copy=copy, subok=True, dtype=dtype) # Allow only float64, string or object arrays as input # (object is for datetime, maybe add more specific test later?) # This also ensures the right byteorder for float64 (closes #2942). if val.dtype.kind == "f" and val.dtype.itemsize >= np.dtype(np.float64).itemsize: pass elif val.dtype.kind in "OSUMaV": pass else: val = np.asanyarray(val, dtype=np.float64) return val
Update ``mask`` in place and return unmasked ``val`` data. If ``val`` is not masked then ``mask`` and ``val`` are returned unchanged. Parameters ---------- mask : bool, ndarray(bool) Mask to update val: ndarray, np.ma.MaskedArray, Masked Input val Returns ------- mask, val: bool, ndarray Updated mask, unmasked data
def get_mask_and_data(mask, val): """ Update ``mask`` in place and return unmasked ``val`` data. If ``val`` is not masked then ``mask`` and ``val`` are returned unchanged. Parameters ---------- mask : bool, ndarray(bool) Mask to update val: ndarray, np.ma.MaskedArray, Masked Input val Returns ------- mask, val: bool, ndarray Updated mask, unmasked data """ if not isinstance(val, (np.ma.MaskedArray, Masked)): return mask, val if isinstance(val, np.ma.MaskedArray): data = val.data else: data = val.unmasked # For structured dtype, the mask is structured too. We consider an # array element masked if any field of the structure is masked. if val.dtype.names: val_mask = val.mask != np.zeros_like(val.mask, shape=()) else: val_mask = val.mask if np.any(val_mask): # Final mask is the logical-or of inputs mask = mask | val_mask return mask, data
If the current ERFA leap second table is out of date, try to update it. Uses `astropy.utils.iers.LeapSeconds.auto_open` to try to find an up-to-date table. See that routine for the definition of "out of date". In order to make it safe to call this any time, all exceptions are turned into warnings, Parameters ---------- files : list of path-like, optional List of files/URLs to attempt to open. By default, uses defined by `astropy.utils.iers.LeapSeconds.auto_open`, which includes the table used by ERFA itself, so if that is up to date, nothing will happen. Returns ------- n_update : int Number of items updated.
def update_leap_seconds(files=None): """If the current ERFA leap second table is out of date, try to update it. Uses `astropy.utils.iers.LeapSeconds.auto_open` to try to find an up-to-date table. See that routine for the definition of "out of date". In order to make it safe to call this any time, all exceptions are turned into warnings, Parameters ---------- files : list of path-like, optional List of files/URLs to attempt to open. By default, uses defined by `astropy.utils.iers.LeapSeconds.auto_open`, which includes the table used by ERFA itself, so if that is up to date, nothing will happen. Returns ------- n_update : int Number of items updated. """ try: from astropy.utils import iers table = iers.LeapSeconds.auto_open(files) return erfa.leap_seconds.update(table) except Exception as exc: warn( f"leap-second auto-update failed due to the following exception: {exc!r}", AstropyWarning, ) return 0
Iterate through each of the sub-formats and try substituting simple regular expressions for the strptime codes for year, month, day-of-month, hour, minute, second. If no % characters remain then turn the final string into a compiled regex. This assumes time formats do not have a % in them. This is done both to speed up parsing of strings and to allow mixed formats where strptime does not quite work well enough.
def _regexify_subfmts(subfmts): """ Iterate through each of the sub-formats and try substituting simple regular expressions for the strptime codes for year, month, day-of-month, hour, minute, second. If no % characters remain then turn the final string into a compiled regex. This assumes time formats do not have a % in them. This is done both to speed up parsing of strings and to allow mixed formats where strptime does not quite work well enough. """ new_subfmts = [] for subfmt_tuple in subfmts: subfmt_in = subfmt_tuple[1] if isinstance(subfmt_in, str): for strptime_code, regex in ( ("%Y", r"(?P<year>\d\d\d\d)"), ("%m", r"(?P<mon>\d{1,2})"), ("%d", r"(?P<mday>\d{1,2})"), ("%H", r"(?P<hour>\d{1,2})"), ("%M", r"(?P<min>\d{1,2})"), ("%S", r"(?P<sec>\d{1,2})"), ): subfmt_in = subfmt_in.replace(strptime_code, regex) if "%" not in subfmt_in: subfmt_tuple = ( subfmt_tuple[0], re.compile(subfmt_in + "$"), subfmt_tuple[2], ) new_subfmts.append(subfmt_tuple) return tuple(new_subfmts)
Return the sum of ``val1`` and ``val2`` as two float64s. The returned floats are an integer part and the fractional remainder, with the latter guaranteed to be within -0.5 and 0.5 (inclusive on either side, as the integer is rounded to even). The arithmetic is all done with exact floating point operations so no precision is lost to rounding error. It is assumed the sum is less than about 1e16, otherwise the remainder will be greater than 1.0. Parameters ---------- val1, val2 : array of float Values to be summed. factor : float, optional If given, multiply the sum by it. divisor : float, optional If given, divide the sum by it. Returns ------- day, frac : float64 Integer and fractional part of val1 + val2.
def day_frac(val1, val2, factor=None, divisor=None): """Return the sum of ``val1`` and ``val2`` as two float64s. The returned floats are an integer part and the fractional remainder, with the latter guaranteed to be within -0.5 and 0.5 (inclusive on either side, as the integer is rounded to even). The arithmetic is all done with exact floating point operations so no precision is lost to rounding error. It is assumed the sum is less than about 1e16, otherwise the remainder will be greater than 1.0. Parameters ---------- val1, val2 : array of float Values to be summed. factor : float, optional If given, multiply the sum by it. divisor : float, optional If given, divide the sum by it. Returns ------- day, frac : float64 Integer and fractional part of val1 + val2. """ # Add val1 and val2 exactly, returning the result as two float64s. # The first is the approximate sum (with some floating point error) # and the second is the error of the float64 sum. sum12, err12 = two_sum(val1, val2) if factor is not None: sum12, carry = two_product(sum12, factor) carry += err12 * factor sum12, err12 = two_sum(sum12, carry) if divisor is not None: q1 = sum12 / divisor p1, p2 = two_product(q1, divisor) d1, d2 = two_sum(sum12, -p1) d2 += err12 d2 -= p2 q2 = (d1 + d2) / divisor # 3-part float fine here; nothing can be lost sum12, err12 = two_sum(q1, q2) # get integer fraction day = np.round(sum12) # Calculate remaining fraction. This can have gotten >0.5 or <-0.5, which means # we would lose one bit of precision. So, correct for that. Here, we need # particular care for the case that frac=0.5 and check>0 or frac=-0.5 and check<0, # since in that case if check is large enough, rounding was done the wrong way. frac, check = two_sum(sum12 - day, err12) excess = np.where( frac * np.sign(check) != 0.5, np.round(frac), np.round(frac + 2 * check) ) day += excess frac = sum12 - day frac += err12 return day, frac
Like ``day_frac``, but for quantities with units of time. The quantities are separately converted to days. Here, we need to take care with the conversion since while the routines here can do accurate multiplication, the conversion factor itself may not be accurate. For instance, if the quantity is in seconds, the conversion factor is 1./86400., which is not exactly representable as a float. To work around this, for conversion factors less than unity, rather than multiply by that possibly inaccurate factor, the value is divided by the conversion factor of a day to that unit (i.e., by 86400. for seconds). For conversion factors larger than 1, such as 365.25 for years, we do just multiply. With this scheme, one has precise conversion factors for all regular time units that astropy defines. Note, however, that it does not necessarily work for all custom time units, and cannot work when conversion to time is via an equivalency. For those cases, one remains limited by the fact that Quantity calculations are done in double precision, not in quadruple precision as for time.
def quantity_day_frac(val1, val2=None): """Like ``day_frac``, but for quantities with units of time. The quantities are separately converted to days. Here, we need to take care with the conversion since while the routines here can do accurate multiplication, the conversion factor itself may not be accurate. For instance, if the quantity is in seconds, the conversion factor is 1./86400., which is not exactly representable as a float. To work around this, for conversion factors less than unity, rather than multiply by that possibly inaccurate factor, the value is divided by the conversion factor of a day to that unit (i.e., by 86400. for seconds). For conversion factors larger than 1, such as 365.25 for years, we do just multiply. With this scheme, one has precise conversion factors for all regular time units that astropy defines. Note, however, that it does not necessarily work for all custom time units, and cannot work when conversion to time is via an equivalency. For those cases, one remains limited by the fact that Quantity calculations are done in double precision, not in quadruple precision as for time. """ if val2 is not None: res11, res12 = quantity_day_frac(val1) res21, res22 = quantity_day_frac(val2) # This summation is can at most lose 1 ULP in the second number. return res11 + res21, res12 + res22 try: factor = val1.unit.to(u.day) except Exception: # Not a simple scaling, so cannot do the full-precision one. # But at least try normal conversion, since equivalencies may be set. return val1.to_value(u.day), 0.0 if factor == 1.0: return day_frac(val1.value, 0.0) if factor > 1: return day_frac(val1.value, 0.0, factor=factor) else: divisor = u.day.to(val1.unit) return day_frac(val1.value, 0.0, divisor=divisor)
Add ``a`` and ``b`` exactly, returning the result as two float64s. The first is the approximate sum (with some floating point error) and the second is the error of the float64 sum. Using the procedure of Shewchuk, 1997, Discrete & Computational Geometry 18(3):305-363 http://www.cs.berkeley.edu/~jrs/papers/robustr.pdf Returns ------- sum, err : float64 Approximate sum of a + b and the exact floating point error
def two_sum(a, b): """ Add ``a`` and ``b`` exactly, returning the result as two float64s. The first is the approximate sum (with some floating point error) and the second is the error of the float64 sum. Using the procedure of Shewchuk, 1997, Discrete & Computational Geometry 18(3):305-363 http://www.cs.berkeley.edu/~jrs/papers/robustr.pdf Returns ------- sum, err : float64 Approximate sum of a + b and the exact floating point error """ x = a + b eb = x - a # bvirtual in Shewchuk ea = x - eb # avirtual in Shewchuk eb = b - eb # broundoff in Shewchuk ea = a - ea # aroundoff in Shewchuk return x, ea + eb
Multiple ``a`` and ``b`` exactly, returning the result as two float64s. The first is the approximate product (with some floating point error) and the second is the error of the float64 product. Uses the procedure of Shewchuk, 1997, Discrete & Computational Geometry 18(3):305-363 http://www.cs.berkeley.edu/~jrs/papers/robustr.pdf Returns ------- prod, err : float64 Approximate product a * b and the exact floating point error
def two_product(a, b): """ Multiple ``a`` and ``b`` exactly, returning the result as two float64s. The first is the approximate product (with some floating point error) and the second is the error of the float64 product. Uses the procedure of Shewchuk, 1997, Discrete & Computational Geometry 18(3):305-363 http://www.cs.berkeley.edu/~jrs/papers/robustr.pdf Returns ------- prod, err : float64 Approximate product a * b and the exact floating point error """ x = a * b ah, al = split(a) bh, bl = split(b) y1 = ah * bh y = x - y1 y2 = al * bh y -= y2 y3 = ah * bl y -= y3 y4 = al * bl y = y4 - y return x, y
Split float64 in two aligned parts. Uses the procedure of Shewchuk, 1997, Discrete & Computational Geometry 18(3):305-363 http://www.cs.berkeley.edu/~jrs/papers/robustr.pdf
def split(a): """ Split float64 in two aligned parts. Uses the procedure of Shewchuk, 1997, Discrete & Computational Geometry 18(3):305-363 http://www.cs.berkeley.edu/~jrs/papers/robustr.pdf """ c = 134217729.0 * a # 2**27+1. abig = c - a ah = c - abig al = a - ah return ah, al
Tests creating a Time object with the `now` class method.
def test_now(): """ Tests creating a Time object with the `now` class method. """ # `Time.datetime` is not timezone aware, meaning `.replace` is necessary for # `now` also not be timezone aware. now = datetime.datetime.now(tz=datetime.timezone.utc).replace(tzinfo=None) t = Time.now() assert t.format == "datetime" assert t.scale == "utc" dt = t.datetime - now # a datetime.timedelta object # this gives a .1 second margin between the `now` call and the `Time` # initializer, which is really way more generous than necessary - typical # times are more like microseconds. But it seems safer in case some # platforms have slow clock calls or something. assert dt.total_seconds() < 0.1
Test that jd1/jd2 in a TimeFromEpoch format is always well-formed: jd1 is an integral value and abs(jd2) <= 0.5.
def test_time_from_epoch_jds(): """Test that jd1/jd2 in a TimeFromEpoch format is always well-formed: jd1 is an integral value and abs(jd2) <= 0.5. """ # From 1999:001 00:00 to 1999:002 12:00 by a non-round step. This will # catch jd2 == 0 and a case of abs(jd2) == 0.5. cxcsecs = np.linspace(0, 86400 * 1.5, 49) for cxcsec in cxcsecs: t = Time(cxcsec, format="cxcsec") assert np.round(t.jd1) == t.jd1 assert np.abs(t.jd2) <= 0.5 t = Time(cxcsecs, format="cxcsec") assert np.all(np.round(t.jd1) == t.jd1) assert np.all(np.abs(t.jd2) <= 0.5) assert np.any(np.abs(t.jd2) == 0.5)
Any Time object should evaluate to True unless it is empty [#3520].
def test_bool(): """Any Time object should evaluate to True unless it is empty [#3520].""" t = Time(np.arange(50000, 50010), format="mjd", scale="utc") assert bool(t) is True assert bool(t[0]) is True assert bool(t[:0]) is False
Check length of Time objects and that scalar ones do not have one.
def test_len_size(): """Check length of Time objects and that scalar ones do not have one.""" t = Time(np.arange(50000, 50010), format="mjd", scale="utc") assert len(t) == 10 and t.size == 10 t1 = Time(np.arange(50000, 50010).reshape(2, 5), format="mjd", scale="utc") assert len(t1) == 2 and t1.size == 10 # Can have length 1 or length 0 arrays. t2 = t[:1] assert len(t2) == 1 and t2.size == 1 t3 = t[:0] assert len(t3) == 0 and t3.size == 0 # But cannot get length from scalar. t4 = t[0] with pytest.raises(TypeError) as err: len(t4) # Ensure we're not just getting the old error of # "object of type 'float' has no len()". assert "Time" in str(err.value)
guard against recurrence of #1122, where TimeFormat class looses uses attributes (delta_ut1_utc here), preventing conversion to unix, cxc
def test_TimeFormat_scale(): """guard against recurrence of #1122, where TimeFormat class looses uses attributes (delta_ut1_utc here), preventing conversion to unix, cxc""" t = Time("1900-01-01", scale="ut1") t.delta_ut1_utc = 0.0 with pytest.warns(ErfaWarning): t.unix assert t.unix == t.utc.unix
Ensure that bigendian and little-endian both work (closes #2942)
def test_byteorder(): """Ensure that bigendian and little-endian both work (closes #2942)""" mjd = np.array([53000.00, 54000.00]) big_endian = mjd.astype(">f8") little_endian = mjd.astype("<f8") time_mjd = Time(mjd, format="mjd") time_big = Time(big_endian, format="mjd") time_little = Time(little_endian, format="mjd") assert np.all(time_big == time_mjd) assert np.all(time_little == time_mjd)
Test #3160 that time zone info in datetime objects is respected.
def test_datetime_tzinfo(): """ Test #3160 that time zone info in datetime objects is respected. """ class TZm6(datetime.tzinfo): def utcoffset(self, dt): return datetime.timedelta(hours=-6) d = datetime.datetime(2002, 1, 2, 10, 3, 4, tzinfo=TZm6()) t = Time(d) assert t.value == datetime.datetime(2002, 1, 2, 16, 3, 4)
Test having a custom subfmts with a regular expression
def test_subfmts_regex(): """ Test having a custom subfmts with a regular expression """ class TimeLongYear(TimeString): name = "longyear" subfmts = ( ( "date", r"(?P<year>[+-]\d{5})-%m-%d", # hybrid "{year:+06d}-{mon:02d}-{day:02d}", ), ) t = Time("+02000-02-03", format="longyear") assert t.value == "+02000-02-03" assert t.jd == Time("2000-02-03").jd
Test basics of setting format attribute.
def test_set_format_basic(): """ Test basics of setting format attribute. """ for format, value in ( ("jd", 2451577.5), ("mjd", 51577.0), ("cxcsec", 65923264.184), # confirmed with Chandra.Time ("datetime", datetime.datetime(2000, 2, 3, 0, 0)), ("iso", "2000-02-03 00:00:00.000"), ): t = Time("+02000-02-03", format="fits") t0 = t.replicate() t.format = format assert t.value == value # Internal jd1 and jd2 are preserved assert t._time.jd1 is t0._time.jd1 assert t._time.jd2 is t0._time.jd2
Set format and round trip through a format that shares out_subfmt
def test_set_format_shares_subfmt(): """ Set format and round trip through a format that shares out_subfmt """ t = Time("+02000-02-03", format="fits", out_subfmt="date_hms", precision=5) tc = t.copy() t.format = "isot" assert t.precision == 5 assert t.out_subfmt == "date_hms" assert t.value == "2000-02-03T00:00:00.00000" t.format = "fits" assert t.value == tc.value assert t.precision == 5
Set format and round trip through a format that does not share out_subfmt
def test_set_format_does_not_share_subfmt(): """ Set format and round trip through a format that does not share out_subfmt """ t = Time("+02000-02-03", format="fits", out_subfmt="longdate") t.format = "isot" assert t.out_subfmt == "*" # longdate_hms not there, goes to default assert t.value == "2000-02-03T00:00:00.000" t.format = "fits" assert t.out_subfmt == "*" assert t.value == "2000-02-03T00:00:00.000"
Passing a bad format to replicate should raise ValueError, not KeyError. PR #3857.
def test_replicate_value_error(): """ Passing a bad format to replicate should raise ValueError, not KeyError. PR #3857. """ t1 = Time("2007:001", scale="tai") with pytest.raises(ValueError) as err: t1.replicate(format="definitely_not_a_valid_format") assert "format must be one of" in str(err.value)
Make sure that 'astropy_time' format is really gone after #3857. Kind of silly test but just to be sure.
def test_remove_astropy_time(): """ Make sure that 'astropy_time' format is really gone after #3857. Kind of silly test but just to be sure. """ t1 = Time("2007:001", scale="tai") assert "astropy_time" not in t1.FORMATS with pytest.raises(ValueError) as err: Time(t1, format="astropy_time") assert "format must be one of" in str(err.value)
Ensure that scalar `Time` instances are not reported as iterable by the `isiterable` utility. Regression test for https://github.com/astropy/astropy/issues/4048
def test_isiterable(): """ Ensure that scalar `Time` instances are not reported as iterable by the `isiterable` utility. Regression test for https://github.com/astropy/astropy/issues/4048 """ t1 = Time.now() assert not isiterable(t1) t2 = Time( ["1999-01-01 00:00:00.123456789", "2010-01-01 00:00:00"], format="iso", scale="utc", ) assert isiterable(t2)
Ensure that jd1 and jd2 of an epoch Time are respect the (day, fraction) convention (see #6638)
def test_epoch_date_jd_is_day_fraction(): """ Ensure that jd1 and jd2 of an epoch Time are respect the (day, fraction) convention (see #6638) """ t0 = Time("J2000", scale="tdb") assert t0.jd1 == 2451545.0 assert t0.jd2 == 0.0 t1 = Time(datetime.datetime(2000, 1, 1, 12, 0, 0), scale="tdb") assert t1.jd1 == 2451545.0 assert t1.jd2 == 0.0
Ensure that two equal dates defined in different ways behave equally (#6638)
def test_sum_is_equivalent(): """ Ensure that two equal dates defined in different ways behave equally (#6638) """ t0 = Time("J2000", scale="tdb") t1 = Time("2000-01-01 12:00:00", scale="tdb") assert t0 == t1 assert (t0 + 1 * u.second) == (t1 + 1 * u.second)
Set from existing Time object.
def test_setitem_from_time_objects(): """Set from existing Time object.""" # Set from time object with different scale t = Time(["2000:001", "2000:002"], scale="utc") t2 = Time(["2000:010"], scale="tai") t[1] = t2[0] assert t.value[1] == t2.utc.value[0] # Time object with different scale and format t = Time(["2000:001", "2000:002"], scale="utc") t2.format = "jyear" t[1] = t2[0] assert t.yday[1] == t2.utc.yday[0]
Setting invalidates any transform deltas
def test_setitem_deltas(): """Setting invalidates any transform deltas""" t = Time([1, 2], format="cxcsec") t.delta_tdb_tt = [1, 2] t.delta_ut1_utc = [3, 4] t[1] = 3 assert not hasattr(t, "_delta_tdb_tt") assert not hasattr(t, "_delta_ut1_utc")
Check that we can initialize subclasses with a Time instance.
def test_subclass(): """Check that we can initialize subclasses with a Time instance.""" # Ref: Issue gh-#7449 and PR gh-#7453. class _Time(Time): pass t1 = Time("1999-01-01T01:01:01") t2 = _Time(t1) assert t2.__class__ == _Time assert t1 == t2
Test of Time.strftime
def test_strftime_scalar(): """Test of Time.strftime""" time_string = "2010-09-03 06:00:00" t = Time(time_string) for format in t.FORMATS: t.format = format assert t.strftime("%Y-%m-%d %H:%M:%S") == time_string
Test of Time.strptime
def test_strptime_scalar(): """Test of Time.strptime""" time_string = "2007-May-04 21:08:12" time_object = Time("2007-05-04 21:08:12") t = Time.strptime(time_string, "%Y-%b-%d %H:%M:%S") assert t == time_object
Test of Time.strptime
def test_strptime_array(): """Test of Time.strptime""" tstrings = [ ["1998-Jan-01 00:00:01", "1998-Jan-01 00:00:02"], ["1998-Jan-01 00:00:03", "1998-Jan-01 00:00:04"], ] tstrings = np.array(tstrings) time_object = Time( [ ["1998-01-01 00:00:01", "1998-01-01 00:00:02"], ["1998-01-01 00:00:03", "1998-01-01 00:00:04"], ] ) t = Time.strptime(tstrings, "%Y-%b-%d %H:%M:%S") assert np.all(t == time_object) assert t.shape == tstrings.shape
Test of Time.strptime
def test_strptime_fracsec_array(): """Test of Time.strptime""" tstrings = [ ["1998-Jan-01 00:00:01.123", "1998-Jan-01 00:00:02.000001"], ["1998-Jan-01 00:00:03.000900", "1998-Jan-01 00:00:04.123456"], ] tstrings = np.array(tstrings) time_object = Time( [ ["1998-01-01 00:00:01.123", "1998-01-01 00:00:02.000001"], ["1998-01-01 00:00:03.000900", "1998-01-01 00:00:04.123456"], ] ) t = Time.strptime(tstrings, "%Y-%b-%d %H:%M:%S.%f") assert np.all(t == time_object) assert t.shape == tstrings.shape
Test of Time.strftime
def test_strftime_scalar_fracsec(): """Test of Time.strftime""" time_string = "2010-09-03 06:00:00.123" t = Time(time_string) for format in t.FORMATS: t.format = format assert t.strftime("%Y-%m-%d %H:%M:%S.%f") == time_string
Test YMDHMS functionality for a dict input. This includes ensuring that key and attribute access work. For extra fun use a time within a leap second.
def test_ymdhms_init_from_dict_scalar(kwargs): """ Test YMDHMS functionality for a dict input. This includes ensuring that key and attribute access work. For extra fun use a time within a leap second. """ time_dict = { "year": 2016, "month": 12, "day": 31, "hour": 23, "minute": 59, "second": 60.123456789, } tm = Time(time_dict, **kwargs) assert tm == Time("2016-12-31T23:59:60.123456789") for attr in time_dict: for value in (tm.value[attr], getattr(tm.value, attr)): if attr == "second": assert allclose_sec(time_dict[attr], value) else: assert time_dict[attr] == value # Now test initializing from a YMDHMS format time using the object tm_rt = Time(tm) assert tm_rt == tm assert tm_rt.format == "ymdhms" # Test initializing from a YMDHMS value (np.void, i.e. recarray row) # without specified format. tm_rt = Time(tm.ymdhms) assert tm_rt == tm assert tm_rt.format == "ymdhms"
Test special-case serialization of certain Time formats
def test_write_every_format_to_ecsv(fmt): """Test special-case serialization of certain Time formats""" t = Table() # Use a time that tests the default serialization of the time format tm = Time("2020-01-01") + [[1, 1 / 7], [3, 4.5]] * u.s tm.format = fmt t["a"] = tm out = StringIO() t.write(out, format="ascii.ecsv") t2 = Table.read(out.getvalue(), format="ascii.ecsv") assert t["a"].format == t2["a"].format # Some loss of precision in the serialization assert not np.all(t["a"] == t2["a"]) # But no loss in the format representation assert np.all(t["a"].value == t2["a"].value)
Test special-case serialization of certain Time formats
def test_write_every_format_to_fits(fmt, tmp_path): """Test special-case serialization of certain Time formats""" t = Table() # Use a time that tests the default serialization of the time format tm = Time("2020-01-01") + [[1, 1 / 7], [3, 4.5]] * u.s tm.format = fmt t["a"] = tm out = tmp_path / "out.fits" t.write(out, format="fits") t2 = Table.read(out, format="fits", astropy_native=True) # Currently the format is lost in FITS so set it back t2["a"].format = fmt # No loss of precision in the serialization or representation assert np.all(t["a"] == t2["a"]) assert np.all(t["a"].value == t2["a"].value)
Test special-case serialization of certain Time formats
def test_write_every_format_to_hdf5(fmt, tmp_path): """Test special-case serialization of certain Time formats""" t = Table() # Use a time that tests the default serialization of the time format tm = Time("2020-01-01") + [[1, 1 / 7], [3, 4.5]] * u.s tm.format = fmt t["a"] = tm out = tmp_path / "out.h5" t.write(str(out), format="hdf5", path="root", serialize_meta=True) t2 = Table.read(str(out), format="hdf5", path="root") assert t["a"].format == t2["a"].format # No loss of precision in the serialization or representation assert np.all(t["a"] == t2["a"]) assert np.all(t["a"].value == t2["a"].value)
Test that changing format with out_subfmt defined is not a problem. See #9812, #9810.
def test_format_subformat_compatibility(): """Test that changing format with out_subfmt defined is not a problem. See #9812, #9810.""" t = Time("2019-12-20", out_subfmt="date_??") assert t.mjd == 58837.0 assert t.yday == "2019:354:00:00" # Preserves out_subfmt t2 = t.replicate(format="mjd") assert t2.out_subfmt == "*" # Changes to default t2 = t.copy(format="mjd") assert t2.out_subfmt == "*" t2 = Time(t, format="mjd") assert t2.out_subfmt == "*" t2 = t.copy(format="yday") assert t2.out_subfmt == "date_??" assert t2.value == "2019:354:00:00" t.format = "yday" assert t.value == "2019:354:00:00" assert t.out_subfmt == "date_??" t = Time("2019-12-20", out_subfmt="date") assert t.mjd == 58837.0 assert t.yday == "2019:354"
Test that string like "2022-08-01.123" does not parse as ISO. See #6476 and the fix.
def test_format_fractional_string_parsing(use_fast_parser): """Test that string like "2022-08-01.123" does not parse as ISO. See #6476 and the fix.""" with pytest.raises( ValueError, match=r"Input values did not match the format class iso" ): with conf.set_temp("use_fast_parser", use_fast_parser): Time("2022-08-01.123", format="iso")
From a starting Time value, test that every valid combination of to_value(format, subfmt) works. See #9812, #9361.
def test_to_value_with_subfmt_for_every_format(fmt_name, fmt_class): """From a starting Time value, test that every valid combination of to_value(format, subfmt) works. See #9812, #9361. """ t = Time("2000-01-01") subfmts = [subfmt[0] for subfmt in fmt_class.subfmts] + [None, "*"] for subfmt in subfmts: t.to_value(fmt_name, subfmt)
Test fix in #9969 for issue #9962 where the location attribute is lost when initializing Time from an existing Time instance of list of Time instances.
def test_location_init(location): """Test fix in #9969 for issue #9962 where the location attribute is lost when initializing Time from an existing Time instance of list of Time instances. """ tm = Time("J2010", location=location) # Init from a scalar Time tm2 = Time(tm) assert np.all(tm.location == tm2.location) assert type(tm.location) is type(tm2.location) # From a list of Times tm2 = Time([tm, tm]) if location is None: assert tm2.location is None else: for loc in tm2.location: assert loc == tm.location assert type(tm.location) is type(tm2.location) # Effectively the same as a list of Times, but just to be sure that # Table mixin initialization is working as expected. tm2 = Table([[tm, tm]])["col0"] if location is None: assert tm2.location is None else: for loc in tm2.location: assert loc == tm.location assert type(tm.location) is type(tm2.location)
Test fix in #9969 for issue #9962 where the location attribute is lost when initializing Time from an existing Time instance of list of Time instances. Make sure exception is correct.
def test_location_init_fail(): """Test fix in #9969 for issue #9962 where the location attribute is lost when initializing Time from an existing Time instance of list of Time instances. Make sure exception is correct. """ tm = Time("J2010", location=(45, 45)) tm2 = Time("J2010") with pytest.raises( ValueError, match="cannot concatenate times unless all locations" ): Time([tm, tm2])
Test `np.linspace` `__array_func__` implementation for scalar and arrays.
def test_linspace(): """Test `np.linspace` `__array_func__` implementation for scalar and arrays.""" t1 = Time(["2021-01-01 00:00:00", "2021-01-02 00:00:00"]) t2 = Time(["2021-01-01 01:00:00", "2021-12-28 00:00:00"]) atol = 2 * np.finfo(float).eps * abs(t1 - t2).max() ts = np.linspace(t1[0], t2[0], 3) assert ts[0].isclose(Time("2021-01-01 00:00:00"), atol=atol) assert ts[1].isclose(Time("2021-01-01 00:30:00"), atol=atol) assert ts[2].isclose(Time("2021-01-01 01:00:00"), atol=atol) ts = np.linspace(t1, t2[0], 2, endpoint=False) assert ts.shape == (2, 2) assert all( ts[0].isclose(Time(["2021-01-01 00:00:00", "2021-01-02 00:00:00"]), atol=atol) ) assert all( ts[1].isclose(Time(["2021-01-01 00:30:00", "2021-01-01 12:30:00"]), atol=atol) ) ts = np.linspace(t1, t2, 7) assert ts.shape == (7, 2) assert all( ts[0].isclose(Time(["2021-01-01 00:00:00", "2021-01-02 00:00:00"]), atol=atol) ) assert all( ts[1].isclose(Time(["2021-01-01 00:10:00", "2021-03-03 00:00:00"]), atol=atol) ) assert all( ts[5].isclose(Time(["2021-01-01 00:50:00", "2021-10-29 00:00:00"]), atol=atol) ) assert all( ts[6].isclose(Time(["2021-01-01 01:00:00", "2021-12-28 00:00:00"]), atol=atol) )
Test `np.linspace` `retstep` option.
def test_linspace_steps(): """Test `np.linspace` `retstep` option.""" t1 = Time(["2021-01-01 00:00:00", "2021-01-01 12:00:00"]) t2 = Time("2021-01-02 00:00:00") atol = 2 * np.finfo(float).eps * abs(t1 - t2).max() ts, st = np.linspace(t1, t2, 7, retstep=True) assert ts.shape == (7, 2) assert st.shape == (2,) assert all(ts[1].isclose(ts[0] + st, atol=atol)) assert all(ts[6].isclose(ts[0] + 6 * st, atol=atol)) assert all(st.isclose(TimeDelta([14400, 7200], format="sec"), atol=atol))
Test `np.linspace` `__array_func__` implementation for start/endpoints from different formats/systems.
def test_linspace_fmts(): """Test `np.linspace` `__array_func__` implementation for start/endpoints from different formats/systems. """ t1 = Time(["2020-01-01 00:00:00", "2020-01-02 00:00:00"]) t2 = Time(2458850, format="jd") t3 = Time(1578009600, format="unix") atol = 2 * np.finfo(float).eps * abs(t1 - Time([t2, t3])).max() ts = np.linspace(t1, t2, 3) assert ts.shape == (3, 2) assert all( ts[0].isclose(Time(["2020-01-01 00:00:00", "2020-01-02 00:00:00"]), atol=atol) ) assert all( ts[1].isclose(Time(["2020-01-01 06:00:00", "2020-01-01 18:00:00"]), atol=atol) ) assert all( ts[2].isclose(Time(["2020-01-01 12:00:00", "2020-01-01 12:00:00"]), atol=atol) ) ts = np.linspace(t1, Time([t2, t3]), 3) assert ts.shape == (3, 2) assert all( ts[0].isclose(Time(["2020-01-01 00:00:00", "2020-01-02 00:00:00"]), atol=atol) ) assert all( ts[1].isclose(Time(["2020-01-01 06:00:00", "2020-01-02 12:00:00"]), atol=atol) ) assert all( ts[2].isclose(Time(["2020-01-01 12:00:00", "2020-01-03 00:00:00"]), atol=atol) )
Test functionality of Time.isclose() method. Run every test with 2 args in original order and swapped, and using Quantity or TimeDelta for atol (when provided).
def test_isclose_time(swap, time_delta): """Test functionality of Time.isclose() method. Run every test with 2 args in original order and swapped, and using Quantity or TimeDelta for atol (when provided).""" def isclose_swap(t1, t2, **kwargs): if swap: t1, t2 = t2, t1 if "atol" in kwargs and time_delta: kwargs["atol"] = TimeDelta(kwargs["atol"]) return t1.isclose(t2, **kwargs) # Start with original demonstration from #8742. In this issue both t2 == t1 # and t3 == t1 give False, but this may change with a newer ERFA. t1 = Time("2018-07-24T10:41:56.807015240") t2 = t1 + 0.0 * u.s t3 = t1 + TimeDelta(0.0 * u.s) assert isclose_swap(t1, t2) assert isclose_swap(t1, t3) t2 = t1 + 1 * u.s assert isclose_swap(t1, t2, atol=1.5 / 86400 * u.day) # Test different unit assert not isclose_swap(t1, t2, atol=0.5 / 86400 * u.day) t2 = t1 + [-1, 0, 2] * u.s assert np.all(isclose_swap(t1, t2, atol=1.5 * u.s) == [True, True, False]) t2 = t1 + 3 * np.finfo(float).eps * u.day assert not isclose_swap(t1, t2)
Test functionality of TimeDelta.isclose() method. Run every test with 2 args in original order and swapped, and using Quantity or TimeDelta for atol (when provided), and using Quantity or TimeDelta for the other argument.
def test_isclose_timedelta(swap, time_delta, other_quantity): """Test functionality of TimeDelta.isclose() method. Run every test with 2 args in original order and swapped, and using Quantity or TimeDelta for atol (when provided), and using Quantity or TimeDelta for the other argument.""" def isclose_swap(t1, t2, **kwargs): if swap: t1, t2 = t2, t1 if "atol" in kwargs and time_delta: kwargs["atol"] = TimeDelta(kwargs["atol"]) return t1.isclose(t2, **kwargs) def isclose_other_quantity(t1, t2, **kwargs): if other_quantity: t2 = t2.to(u.day) if "atol" in kwargs and time_delta: kwargs["atol"] = TimeDelta(kwargs["atol"]) return t1.isclose(t2, **kwargs) t1 = TimeDelta(1.0 * u.s) t2 = t1 + 0.0 * u.s t3 = t1 + TimeDelta(0.0 * u.s) assert isclose_swap(t1, t2) assert isclose_swap(t1, t3) assert isclose_other_quantity(t1, t2) assert isclose_other_quantity(t1, t3) t2 = t1 + 1 * u.s assert isclose_swap(t1, t2, atol=1.5 / 86400 * u.day) assert not isclose_swap(t1, t2, atol=0.5 / 86400 * u.day) assert isclose_other_quantity(t1, t2, atol=1.5 / 86400 * u.day) assert not isclose_other_quantity(t1, t2, atol=0.5 / 86400 * u.day) t1 = TimeDelta(0 * u.s) t2 = t1 + [-1, 0, 2] * u.s assert np.all(isclose_swap(t1, t2, atol=1.5 * u.s) == [True, True, False]) assert np.all(isclose_other_quantity(t1, t2, atol=1.5 * u.s) == [True, True, False]) # Check with rtol # 1 * 0.6 + 0.5 = 1.1 --> 1 <= 1.1 --> True # 0 * 0.6 + 0.5 = 0.5 --> 0 <= 0.5 --> True # 2 * 0.6 + 0.5 = 1.7 --> 2 <= 1.7 --> False assert np.all(t1.isclose(t2, atol=0.5 * u.s, rtol=0.6) == [True, True, False]) t2 = t1 + 2 * np.finfo(float).eps * u.day assert not isclose_swap(t1, t2) assert not isclose_other_quantity(t1, t2)
Use offline IERS table only.
def setup_module(module): """Use offline IERS table only.""" iers.conf.auto_download = False
Restore original setting.
def teardown_module(module): """Restore original setting.""" iers.conf.auto_download = orig_auto_download
Test subclass where use_fast_parser class attribute is not in __dict__
def test_fast_subclass(): """Test subclass where use_fast_parser class attribute is not in __dict__""" class TimeYearDayTimeSubClass(TimeYearDayTime): name = "yday_subclass" # Inheritance works assert hasattr(TimeYearDayTimeSubClass, "fast_parser_pars") assert "fast_parser_pars" not in TimeYearDayTimeSubClass.__dict__ try: # For YearDayTime, forcing the fast parser with a bad date will give # "fast C time string parser failed: time string ends in middle of component". # But since YearDayTimeSubClass does not have fast_parser_pars it will # use the Python parser. with pytest.raises( ValueError, match="Time 2000:0601 does not match yday_subclass format" ): with conf.set_temp("use_fast_parser", "force"): Time("2000:0601", format="yday_subclass") finally: del TimeYearDayTimeSubClass._registry["yday_subclass"]
Fix for #9612
def test_all_masked_input(masked_cls, val): """Fix for #9612""" # Test with jd=0 and jd=np.nan. Both triggered an exception prior to #9624 # due to astropy.utils.exceptions.ErfaError. val = masked_cls(val, mask=True) t = Time(val, format="jd") if val.ndim: assert str(t.iso).endswith("β€”β€”β€”]") else: assert str(t.iso).endswith("β€”β€”β€”")
Test for cls.fill_value() being longer than other input strings.
def test_some_masked_input_str_no_subfmt(): """Test for cls.fill_value() being longer than other input strings.""" dates = Masked(["", "2023:001"], mask=True) t = Time(dates, format="yday") assert np.all(t.mask) assert np.all(t.unmasked == "2000:001:12:00:00.000")
Checks equality of shape and content.
def assert_time_all_equal(t1, t2): """Checks equality of shape and content.""" assert t1.shape == t2.shape assert np.all(t1 == t2)
This is an expensive operation, so we share it between tests using a module-scoped fixture instead of using the context manager form. This is particularly important for Hypothesis, which invokes the decorated test function many times (100 by default; see conftest.py for details).
def iers_b(): """This is an expensive operation, so we share it between tests using a module-scoped fixture instead of using the context manager form. This is particularly important for Hypothesis, which invokes the decorated test function many times (100 by default; see conftest.py for details). """ with iers.earth_orientation_table.set(iers.IERS_B.open(iers.IERS_B_FILE)): yield "<using IERS-B orientation table>"
Assert numbers are almost equal. This version also lets hypothesis know how far apart the inputs are, so that it can work towards a failure and present the worst failure ever seen as well as the simplest, which often just barely exceeds the threshold.
def assert_almost_equal(a, b, *, rtol=None, atol=None, label=""): """Assert numbers are almost equal. This version also lets hypothesis know how far apart the inputs are, so that it can work towards a failure and present the worst failure ever seen as well as the simplest, which often just barely exceeds the threshold. """ __tracebackhide__ = True if rtol is None or rtol == 0: thresh = atol elif atol is None: thresh = rtol * (abs(a) + abs(b)) / 2 else: thresh = atol + rtol * (abs(a) + abs(b)) / 2 amb = a - b if isinstance(amb, TimeDelta): ambv = amb.to_value(u.s) target(ambv, label=label + " (a-b).to_value(u.s), from TimeDelta") target(-ambv, label=label + " (b-a).to_value(u.s), from TimeDelta") if isinstance(thresh, u.Quantity): amb = amb.to(thresh.unit) else: try: target_value = float(amb) except TypeError: pass else: target(target_value, label=label + " float(a-b)") target(-target_value, label=label + " float(b-a)") assert abs(amb) < thresh
Pick a reasonable JD. These should be not too far in the past or future (so that date conversion routines don't have to deal with anything too exotic), but they should include leap second days as a special case, and they should include several particularly simple cases (today, the beginning of the MJD scale, a reasonable date) so that hypothesis' example simplification produces obviously simple examples when they trigger problems.
def reasonable_jd(): """Pick a reasonable JD. These should be not too far in the past or future (so that date conversion routines don't have to deal with anything too exotic), but they should include leap second days as a special case, and they should include several particularly simple cases (today, the beginning of the MJD scale, a reasonable date) so that hypothesis' example simplification produces obviously simple examples when they trigger problems. """ moments = [(2455000.0, 0.0), (mjd0.jd1, mjd0.jd2), (today.jd1, today.jd2)] return one_of(sampled_from(moments), reasonable_ordinary_jd(), leap_second_tricky())
JD pair that might be unordered or far away
def unreasonable_ordinary_jd(): """JD pair that might be unordered or far away""" return tuples(floats(-1e7, 1e7), floats(-1e7, 1e7))
JD pair that is ordered but not necessarily near now
def ordered_jd(): """JD pair that is ordered but not necessarily near now""" return tuples(floats(-1e7, 1e7), floats(-0.5, 0.5))
Make jd2 approach +/-0.5, and check that it doesn't go over.
def test_abs_jd2_always_less_than_half(): """Make jd2 approach +/-0.5, and check that it doesn't go over.""" t1 = Time(2400000.5, [-tiny, +tiny], format="jd") assert np.all(t1.jd1 % 1 == 0) assert np.all(abs(t1.jd2) < 0.5) t2 = Time( 2400000.0, [[0.5 - tiny, 0.5 + tiny], [-0.5 - tiny, -0.5 + tiny]], format="jd" ) assert np.all(t2.jd1 % 1 == 0) assert np.all(abs(t2.jd2) < 0.5)
Check that an addition at the limit of precision (2^-52) is seen
def test_addition(): """Check that an addition at the limit of precision (2^-52) is seen""" t = Time(2455555.0, 0.5, format="jd", scale="utc") t_dt = t + dt_tiny assert t_dt.jd1 == t.jd1 and t_dt.jd2 != t.jd2 # Check that the addition is exactly reversed by the corresponding # subtraction t2 = t_dt - dt_tiny assert t2.jd1 == t.jd1 and t2.jd2 == t.jd2
Test precision with multiply and divide
def test_mult_div(): """Test precision with multiply and divide""" dt_small = 6 * dt_tiny # pick a number that will leave remainder if divided by 6. dt_big = TimeDelta(20000.0, format="jd") dt_big_small_by_6 = (dt_big + dt_small) / 6.0 dt_frac = dt_big_small_by_6 - TimeDelta(3333.0, format="jd") assert allclose_jd2(dt_frac.jd2, 0.33333333333333354)
Check that 3 ways of specifying a time + small offset are equivalent
def test_init_variations(): """Check that 3 ways of specifying a time + small offset are equivalent""" dt_tiny_sec = dt_tiny.jd2 * 86400.0 t1 = Time(1e11, format="cxcsec") + dt_tiny t2 = Time(1e11, dt_tiny_sec, format="cxcsec") t3 = Time(dt_tiny_sec, 1e11, format="cxcsec") assert t1.jd1 == t2.jd1 assert t1.jd2 == t3.jd2 assert t1.jd1 == t2.jd1 assert t1.jd2 == t3.jd2
Check that Time object really holds more precision than float64 by looking at the (naively) summed 64-bit result and asserting equality at the bit level.
def test_precision_exceeds_64bit(): """ Check that Time object really holds more precision than float64 by looking at the (naively) summed 64-bit result and asserting equality at the bit level. """ t1 = Time(1.23456789e11, format="cxcsec") t2 = t1 + dt_tiny assert t1.jd == t2.jd
Check that precision holds through scale change (cxcsec is TT)
def test_through_scale_change(): """Check that precision holds through scale change (cxcsec is TT)""" t0 = Time(1.0, format="cxcsec") t1 = Time(1.23456789e11, format="cxcsec") dt_tt = t1 - t0 dt_tai = t1.tai - t0.tai assert allclose_jd(dt_tt.jd1, dt_tai.jd1) assert allclose_jd2(dt_tt.jd2, dt_tai.jd2)
Check when initializing from ISO date
def test_iso_init(): """Check when initializing from ISO date""" t1 = Time("2000:001:00:00:00.00000001", scale="tai") t2 = Time("3000:001:13:00:00.00000002", scale="tai") dt = t2 - t1 assert allclose_jd2(dt.jd2, 13.0 / 24.0 + 1e-8 / 86400.0 - 1.0)
Check that jd1 is a multiple of 1.
def test_jd1_is_mult_of_one(): """ Check that jd1 is a multiple of 1. """ t1 = Time("2000:001:00:00:00.00000001", scale="tai") assert np.round(t1.jd1) == t1.jd1 t1 = Time(1.23456789, 12345678.90123456, format="jd", scale="tai") assert np.round(t1.jd1) == t1.jd1
Check precision when jd1 is negative. This used to fail because ERFA routines use a test like jd1 > jd2 to decide which component to update. It was updated to abs(jd1) > abs(jd2) in erfa 1.6 (sofa 20190722).
def test_precision_neg(): """ Check precision when jd1 is negative. This used to fail because ERFA routines use a test like jd1 > jd2 to decide which component to update. It was updated to abs(jd1) > abs(jd2) in erfa 1.6 (sofa 20190722). """ t1 = Time(-100000.123456, format="jd", scale="tt") assert np.round(t1.jd1) == t1.jd1 t1_tai = t1.tai assert np.round(t1_tai.jd1) == t1_tai.jd1
Check that input via epoch also has full precision, i.e., against regression on https://github.com/astropy/astropy/pull/366
def test_precision_epoch(): """ Check that input via epoch also has full precision, i.e., against regression on https://github.com/astropy/astropy/pull/366 """ t_utc = Time(range(1980, 2001), format="jyear", scale="utc") t_tai = Time(range(1980, 2001), format="jyear", scale="tai") dt = t_utc - t_tai assert allclose_sec(dt.sec, np.round(dt.sec))
Regression tests against #2083, where a leap second was rounded incorrectly by the underlying ERFA routine.
def test_leap_seconds_rounded_correctly(): """Regression tests against #2083, where a leap second was rounded incorrectly by the underlying ERFA routine.""" with iers.conf.set_temp("auto_download", False): t = Time( ["2012-06-30 23:59:59.413", "2012-07-01 00:00:00.413"], scale="ut1", precision=3, ).utc assert np.all( t.iso == np.array(["2012-06-30 23:59:60.000", "2012-07-01 00:00:00.000"]) )
UTC is very unhappy with unreasonable times, Unlike for the other timescales, in which addition is done directly, here the time is transformed to TAI before addition, and then back to UTC. Hence, some rounding errors can occur and only a change of 2*dt_tiny is guaranteed to give a different time.
def test_resolution_never_decreases_utc(jds): """UTC is very unhappy with unreasonable times, Unlike for the other timescales, in which addition is done directly, here the time is transformed to TAI before addition, and then back to UTC. Hence, some rounding errors can occur and only a change of 2*dt_tiny is guaranteed to give a different time. """ jd1, jd2 = jds t = Time(jd1, jd2, format="jd", scale="utc") with quiet_erfa(): assert t != t + 2 * dt_tiny
Check that time ordering remains if we convert to another scale. Here, since scale differences can involve multiplication, we allow for losing one ULP, i.e., we test that two times that differ by two ULP will keep the same order if changed to another scale.
def test_conversion_never_loses_precision(iers_b, scale1, scale2, jds): """Check that time ordering remains if we convert to another scale. Here, since scale differences can involve multiplication, we allow for losing one ULP, i.e., we test that two times that differ by two ULP will keep the same order if changed to another scale. """ jd1, jd2 = jds t = Time(jd1, jd2, scale=scale1, format="jd") # Near-zero UTC JDs degrade accuracy; not clear why, # but also not so relevant, so ignoring. if (scale1 == "utc" or scale2 == "utc") and abs(jd1 + jd2) < 1: tiny = 100 * u.us else: tiny = 2 * dt_tiny try: with quiet_erfa(): t2 = t + tiny t_scale2 = getattr(t, scale2) t2_scale2 = getattr(t2, scale2) assert t_scale2 < t2_scale2 except iers.IERSRangeError: # UT1 conversion needs IERS data assume(scale1 != "ut1" or 2440000 < jd1 + jd2 < 2458000) assume(scale2 != "ut1" or 2440000 < jd1 + jd2 < 2458000) raise except ErfaError: # If the generated date is too early to compute a UTC julian date, # and we're not converting between scales which are known to be safe, # tell Hypothesis that this example is invalid and to try another. # See https://docs.astropy.org/en/latest/time/index.html#time-scale barycentric = {scale1, scale2}.issubset({"tcb", "tdb"}) geocentric = {scale1, scale2}.issubset({"tai", "tt", "tcg"}) assume(jd1 + jd2 >= -31738.5 or geocentric or barycentric) raise except AssertionError: # Before 1972, TAI-UTC changed smoothly but not always very # consistently; this can cause trouble on day boundaries for UTC to # UT1; it is not clear whether this will ever be resolved (and is # unlikely ever to matter). # Furthermore, exactly at leap-second boundaries, it is possible to # get the wrong leap-second correction due to rounding errors. # The latter is xfail'd for now, but should be fixed; see gh-13517. if "ut1" in (scale1, scale2): if abs(t_scale2 - t2_scale2 - 1 * u.s) < 1 * u.ms: pytest.xfail() assume(t.jd > 2441317.5 or t.jd2 < 0.4999999) raise
Check that no rounding errors are incurred by unit conversion. This occurred before as quantities in seconds were converted to days before trying to split them into two-part doubles. See gh-7622.
def test_quantity_conversion_rounding(q1, q2): """Check that no rounding errors are incurred by unit conversion. This occurred before as quantities in seconds were converted to days before trying to split them into two-part doubles. See gh-7622. """ t = Time("2001-01-01T00:00:00.", scale="tai") expected = Time("2016-11-05T00:53:20.", scale="tai") if q2 is None: t0 = t + q1 else: t0 = t + q1 + q2 assert abs(t0 - expected) < 20 * u.ps dt1 = TimeDelta(q1, q2) t1 = t + dt1 assert abs(t1 - expected) < 20 * u.ps dt2 = TimeDelta(q1, q2, format="sec") t2 = t + dt2 assert abs(t2 - expected) < 20 * u.ps
The doc string is formatted; this ensures this remains working.
def test_doc_string_contains_models(): """The doc string is formatted; this ensures this remains working.""" for kind in ("mean", "apparent"): for model in SIDEREAL_TIME_MODELS[kind]: assert model in Time.sidereal_time.__doc__
This is a decorator that ensures that the table contains specific methods indicated by the _required_columns attribute. The aim is to decorate all methods that might affect the columns in the table and check for consistency after the methods have been run.
def autocheck_required_columns(cls): """ This is a decorator that ensures that the table contains specific methods indicated by the _required_columns attribute. The aim is to decorate all methods that might affect the columns in the table and check for consistency after the methods have been run. """ def decorator_method(method): @wraps(method) def wrapper(self, *args, **kwargs): result = method(self, *args, **kwargs) self._check_required_columns() return result return wrapper for name in COLUMN_RELATED_METHODS: if not hasattr(cls, name) or not isinstance(getattr(cls, name), FunctionType): raise ValueError(f"{name} is not a valid method") setattr(cls, name, decorator_method(getattr(cls, name))) return cls
Manual reduceat functionality for cases where Numpy functions don't have a reduceat. It will check if the input function has a reduceat and call that if it does.
def reduceat(array, indices, function): """ Manual reduceat functionality for cases where Numpy functions don't have a reduceat. It will check if the input function has a reduceat and call that if it does. """ if len(indices) == 0: return np.array([]) elif hasattr(function, "reduceat"): return np.array(function.reduceat(array, indices)) else: result = [] for i in range(len(indices) - 1): if indices[i + 1] <= indices[i] + 1: result.append(function(array[indices[i]])) else: result.append(function(array[indices[i] : indices[i + 1]])) result.append(function(array[indices[-1] :])) return np.array(result)
Downsample a time series by binning values into bins with a fixed size or custom sizes, using a single function to combine the values in the bin. Parameters ---------- time_series : :class:`~astropy.timeseries.TimeSeries` The time series to downsample. time_bin_size : `~astropy.units.Quantity` or `~astropy.time.TimeDelta` ['time'], optional The time interval for the binned time series - this is either a scalar value (in which case all time bins will be assumed to have the same duration) or as an array of values (in which case each time bin can have a different duration). If this argument is provided, ``time_bin_end`` should not be provided. time_bin_start : `~astropy.time.Time` or iterable, optional The start time for the binned time series - this can be either given directly as a `~astropy.time.Time` array or as any iterable that initializes the `~astropy.time.Time` class. This can also be a scalar value if ``time_bin_size`` or ``time_bin_end`` is provided. Defaults to the first time in the sampled time series. time_bin_end : `~astropy.time.Time` or iterable, optional The times of the end of each bin - this can be either given directly as a `~astropy.time.Time` array or as any iterable that initializes the `~astropy.time.Time` class. This can only be given if ``time_bin_start`` is provided or its default is used. If ``time_bin_end`` is scalar and ``time_bin_start`` is an array, time bins are assumed to be contiguous; the end of each bin is the start of the next one, and ``time_bin_end`` gives the end time for the last bin. If ``time_bin_end`` is an array and ``time_bin_start`` is scalar, bins will be contiguous. If both ``time_bin_end`` and ``time_bin_start`` are arrays, bins do not need to be contiguous. If this argument is provided, ``time_bin_size`` should not be provided. n_bins : int, optional The number of bins to use. Defaults to the number needed to fit all the original points. If both ``time_bin_start`` and ``time_bin_size`` are provided and are scalar values, this determines the total bins within that interval. If ``time_bin_start`` is an iterable, this parameter will be ignored. aggregate_func : callable, optional The function to use for combining points in the same bin. Defaults to np.nanmean. Returns ------- binned_time_series : :class:`~astropy.timeseries.BinnedTimeSeries` The downsampled time series.
def aggregate_downsample( time_series, *, time_bin_size=None, time_bin_start=None, time_bin_end=None, n_bins=None, aggregate_func=None, ): """ Downsample a time series by binning values into bins with a fixed size or custom sizes, using a single function to combine the values in the bin. Parameters ---------- time_series : :class:`~astropy.timeseries.TimeSeries` The time series to downsample. time_bin_size : `~astropy.units.Quantity` or `~astropy.time.TimeDelta` ['time'], optional The time interval for the binned time series - this is either a scalar value (in which case all time bins will be assumed to have the same duration) or as an array of values (in which case each time bin can have a different duration). If this argument is provided, ``time_bin_end`` should not be provided. time_bin_start : `~astropy.time.Time` or iterable, optional The start time for the binned time series - this can be either given directly as a `~astropy.time.Time` array or as any iterable that initializes the `~astropy.time.Time` class. This can also be a scalar value if ``time_bin_size`` or ``time_bin_end`` is provided. Defaults to the first time in the sampled time series. time_bin_end : `~astropy.time.Time` or iterable, optional The times of the end of each bin - this can be either given directly as a `~astropy.time.Time` array or as any iterable that initializes the `~astropy.time.Time` class. This can only be given if ``time_bin_start`` is provided or its default is used. If ``time_bin_end`` is scalar and ``time_bin_start`` is an array, time bins are assumed to be contiguous; the end of each bin is the start of the next one, and ``time_bin_end`` gives the end time for the last bin. If ``time_bin_end`` is an array and ``time_bin_start`` is scalar, bins will be contiguous. If both ``time_bin_end`` and ``time_bin_start`` are arrays, bins do not need to be contiguous. If this argument is provided, ``time_bin_size`` should not be provided. n_bins : int, optional The number of bins to use. Defaults to the number needed to fit all the original points. If both ``time_bin_start`` and ``time_bin_size`` are provided and are scalar values, this determines the total bins within that interval. If ``time_bin_start`` is an iterable, this parameter will be ignored. aggregate_func : callable, optional The function to use for combining points in the same bin. Defaults to np.nanmean. Returns ------- binned_time_series : :class:`~astropy.timeseries.BinnedTimeSeries` The downsampled time series. """ if not isinstance(time_series, TimeSeries): raise TypeError("time_series should be a TimeSeries") if time_bin_size is not None and not isinstance( time_bin_size, (u.Quantity, TimeDelta) ): raise TypeError("'time_bin_size' should be a Quantity or a TimeDelta") if time_bin_start is not None and not isinstance(time_bin_start, (Time, TimeDelta)): time_bin_start = Time(time_bin_start) if time_bin_end is not None and not isinstance(time_bin_end, (Time, TimeDelta)): time_bin_end = Time(time_bin_end) # Use the table sorted by time ts_sorted = time_series.iloc[:] # If start time is not provided, it is assumed to be the start of the timeseries if time_bin_start is None: time_bin_start = ts_sorted.time[0] # Total duration of the timeseries is needed for determining either # `time_bin_size` or `nbins` in the case of scalar `time_bin_start` if time_bin_start.isscalar: time_duration = (ts_sorted.time[-1] - time_bin_start).sec if time_bin_size is None and time_bin_end is None: if time_bin_start.isscalar: if n_bins is None: raise TypeError( "With single 'time_bin_start' either 'n_bins', " "'time_bin_size' or time_bin_end' must be provided" ) else: # `nbins` defaults to the number needed to fit all points time_bin_size = time_duration / n_bins * u.s else: time_bin_end = np.maximum(ts_sorted.time[-1], time_bin_start[-1]) if time_bin_start.isscalar: if time_bin_size is not None: if time_bin_size.isscalar: # Determine the number of bins if n_bins is None: bin_size_sec = time_bin_size.to_value(u.s) n_bins = int(np.ceil(time_duration / bin_size_sec)) elif time_bin_end is not None: if not time_bin_end.isscalar: # Convert start time to an array and populate using `time_bin_end` scalar_start_time = time_bin_start time_bin_start = time_bin_end.replicate(copy=True) time_bin_start[0] = scalar_start_time time_bin_start[1:] = time_bin_end[:-1] # Check for overlapping bins, and warn if they are present if time_bin_end is not None: if ( not time_bin_end.isscalar and not time_bin_start.isscalar and np.any(time_bin_start[1:] < time_bin_end[:-1]) ): warnings.warn( "Overlapping bins should be avoided since they " "can lead to double-counting of data during binning.", AstropyUserWarning, ) binned = BinnedTimeSeries( time_bin_size=time_bin_size, time_bin_start=time_bin_start, time_bin_end=time_bin_end, n_bins=n_bins, ) if aggregate_func is None: aggregate_func = np.nanmean # Start and end times of the binned timeseries bin_start = binned.time_bin_start bin_end = binned.time_bin_end # Set `n_bins` to match the length of `time_bin_start` if # `n_bins` is unspecified or if `time_bin_start` is an iterable if n_bins is None or not time_bin_start.isscalar: n_bins = len(bin_start) # Find the subset of the table that is inside the union of all bins # - output: `keep` a mask to create the subset # - use relative time in seconds `np.longdouble`` in in creating `keep` to speed up # (`Time` object comparison is rather slow) # - tiny sacrifice on precision (< 0.01ns on 64 bit platform) rel_base = ts_sorted.time[0] rel_bin_start = _to_relative_longdouble(bin_start, rel_base) rel_bin_end = _to_relative_longdouble(bin_end, rel_base) rel_ts_sorted_time = _to_relative_longdouble(ts_sorted.time, rel_base) keep = (rel_ts_sorted_time >= rel_bin_start[0]) & ( rel_ts_sorted_time <= rel_bin_end[-1] ) # Find out indices to be removed because of noncontiguous bins # # Only need to check when adjacent bins have gaps, i.e., # bin_start[ind + 1] > bin_end[ind] # - see: https://github.com/astropy/astropy/issues/13058#issuecomment-1090846697 # on thoughts on how to reduce the number of times to loop noncontiguous_bins_indices = np.where(rel_bin_start[1:] > rel_bin_end[:-1])[0] for ind in noncontiguous_bins_indices: delete_indices = np.where( np.logical_and( rel_ts_sorted_time > rel_bin_end[ind], rel_ts_sorted_time < rel_bin_start[ind + 1], ) ) keep[delete_indices] = False rel_subset_time = rel_ts_sorted_time[keep] # Figure out which bin each row falls in by sorting with respect # to the bin end times indices = np.searchsorted(rel_bin_end, rel_subset_time) # For time == bin_start[i+1] == bin_end[i], let bin_start takes precedence if len(indices) and np.all(rel_bin_start[1:] >= rel_bin_end[:-1]): indices_start = np.searchsorted( rel_subset_time, rel_bin_start[rel_bin_start <= rel_ts_sorted_time[-1]] ) indices[indices_start] = np.arange(len(indices_start)) # Determine rows where values are defined if len(indices): groups = np.hstack([0, np.nonzero(np.diff(indices))[0] + 1]) else: groups = np.array([]) # Find unique indices to determine which rows in the final time series # will not be empty. unique_indices = np.unique(indices) # Add back columns subset = ts_sorted[keep] for colname in subset.colnames: if colname == "time": continue values = subset[colname] # FIXME: figure out how to avoid the following, if possible if not isinstance(values, (np.ndarray, u.Quantity)): warnings.warn( "Skipping column {0} since it has a mix-in type", AstropyUserWarning ) continue if isinstance(values, u.Quantity): data = u.Quantity(np.repeat(np.nan, n_bins), unit=values.unit) data[unique_indices] = u.Quantity( reduceat(values.value, groups, aggregate_func), values.unit, copy=False ) else: data = np.ma.zeros(n_bins, dtype=values.dtype) data.mask = 1 data[unique_indices] = reduceat(values, groups, aggregate_func) data.mask[unique_indices] = 0 binned[colname] = data return binned
This serves as the FITS reader for KEPLER or TESS files within astropy-timeseries. This function should generally not be called directly, and instead this time series reader should be accessed with the :meth:`~astropy.timeseries.TimeSeries.read` method:: >>> from astropy.timeseries import TimeSeries >>> ts = TimeSeries.read('kplr33122.fits', format='kepler.fits') # doctest: +SKIP Parameters ---------- filename : `str` or `pathlib.Path` File to load. unit_parse_strict : str, optional Behaviour when encountering invalid column units in the FITS header. Default is "warn", which will emit a ``UnitsWarning`` and create a :class:`~astropy.units.core.UnrecognizedUnit`. Values are the ones allowed by the ``parse_strict`` argument of :class:`~astropy.units.core.Unit`: ``raise``, ``warn`` and ``silent``. Returns ------- ts : `~astropy.timeseries.TimeSeries` Data converted into a TimeSeries.
def kepler_fits_reader(filename, unit_parse_strict="warn"): """ This serves as the FITS reader for KEPLER or TESS files within astropy-timeseries. This function should generally not be called directly, and instead this time series reader should be accessed with the :meth:`~astropy.timeseries.TimeSeries.read` method:: >>> from astropy.timeseries import TimeSeries >>> ts = TimeSeries.read('kplr33122.fits', format='kepler.fits') # doctest: +SKIP Parameters ---------- filename : `str` or `pathlib.Path` File to load. unit_parse_strict : str, optional Behaviour when encountering invalid column units in the FITS header. Default is "warn", which will emit a ``UnitsWarning`` and create a :class:`~astropy.units.core.UnrecognizedUnit`. Values are the ones allowed by the ``parse_strict`` argument of :class:`~astropy.units.core.Unit`: ``raise``, ``warn`` and ``silent``. Returns ------- ts : `~astropy.timeseries.TimeSeries` Data converted into a TimeSeries. """ hdulist = fits.open(filename) # Get the lightcurve HDU telescope = hdulist[0].header["telescop"].lower() if telescope == "tess": hdu = hdulist["LIGHTCURVE"] elif telescope == "kepler": hdu = hdulist[1] else: raise NotImplementedError( f"{hdulist[0].header['telescop']} is not implemented, only KEPLER or TESS" " are supported through this reader" ) if hdu.header["EXTVER"] > 1: raise NotImplementedError( f"Support for {hdu.header['TELESCOP']} v{hdu.header['EXTVER']} files not" " yet implemented" ) # Check time scale if hdu.header["TIMESYS"] != "TDB": raise NotImplementedError( f"Support for {hdu.header['TIMESYS']} time scale not yet implemented in" f" {hdu.header['TELESCOP']} reader" ) tab = Table.read(hdu, format="fits", unit_parse_strict=unit_parse_strict) # Some KEPLER files have a T column instead of TIME. if "T" in tab.colnames: tab.rename_column("T", "TIME") for colname in tab.colnames: unit = tab[colname].unit # Make masks nan for any column which will turn into a Quantity # later. TODO: remove once we support Masked Quantities properly? if unit and isinstance(tab[colname], MaskedColumn): tab[colname] = tab[colname].filled(np.nan) # Fix units if unit == "e-/s": tab[colname].unit = "electron/s" if unit == "pixels": tab[colname].unit = "pixel" # Rename columns to lowercase tab.rename_column(colname, colname.lower()) # Filter out NaN rows nans = np.isnan(tab["time"].data) if np.any(nans): warnings.warn(f"Ignoring {np.sum(nans)} rows with NaN times") tab = tab[~nans] # Time column is dependent on source and we correct it here reference_date = Time( hdu.header["BJDREFI"], hdu.header["BJDREFF"], scale=hdu.header["TIMESYS"].lower(), format="jd", ) time = reference_date + TimeDelta(tab["time"].data, format="jd") time.format = "isot" # Remove original time column tab.remove_column("time") hdulist.close() return TimeSeries(time=time, data=tab)
Compute the periodogram using a brute force reference method. t : array-like Sequence of observation times. y : array-like Sequence of observations associated with times t. ivar : array-like The inverse variance of ``y``. period : array-like The trial periods where the periodogram should be computed. duration : array-like The durations that should be tested. oversample : The resolution of the phase grid in units of durations. use_likeliood : bool If true, maximize the log likelihood over phase, duration, and depth. Returns ------- power : array-like The periodogram evaluated at the periods in ``period``. depth : array-like The estimated depth of the maximum power model at each period. depth_err : array-like The 1-sigma uncertainty on ``depth``. duration : array-like The maximum power duration at each period. transit_time : array-like The maximum power phase of the transit in units of time. This indicates the mid-transit time and it will always be in the range (0, period). depth_snr : array-like The signal-to-noise with which the depth is measured at maximum power. log_likelihood : array-like The log likelihood of the maximum power model.
def bls_slow(t, y, ivar, period, duration, oversample, use_likelihood): """Compute the periodogram using a brute force reference method. t : array-like Sequence of observation times. y : array-like Sequence of observations associated with times t. ivar : array-like The inverse variance of ``y``. period : array-like The trial periods where the periodogram should be computed. duration : array-like The durations that should be tested. oversample : The resolution of the phase grid in units of durations. use_likeliood : bool If true, maximize the log likelihood over phase, duration, and depth. Returns ------- power : array-like The periodogram evaluated at the periods in ``period``. depth : array-like The estimated depth of the maximum power model at each period. depth_err : array-like The 1-sigma uncertainty on ``depth``. duration : array-like The maximum power duration at each period. transit_time : array-like The maximum power phase of the transit in units of time. This indicates the mid-transit time and it will always be in the range (0, period). depth_snr : array-like The signal-to-noise with which the depth is measured at maximum power. log_likelihood : array-like The log likelihood of the maximum power model. """ f = partial(_bls_slow_one, t, y, ivar, duration, oversample, use_likelihood) return _apply(f, period)
Compute the periodogram using an optimized Cython implementation. t : array-like Sequence of observation times. y : array-like Sequence of observations associated with times t. ivar : array-like The inverse variance of ``y``. period : array-like The trial periods where the periodogram should be computed. duration : array-like The durations that should be tested. oversample : The resolution of the phase grid in units of durations. use_likeliood : bool If true, maximize the log likelihood over phase, duration, and depth. Returns ------- power : array-like The periodogram evaluated at the periods in ``period``. depth : array-like The estimated depth of the maximum power model at each period. depth_err : array-like The 1-sigma uncertainty on ``depth``. duration : array-like The maximum power duration at each period. transit_time : array-like The maximum power phase of the transit in units of time. This indicates the mid-transit time and it will always be in the range (0, period). depth_snr : array-like The signal-to-noise with which the depth is measured at maximum power. log_likelihood : array-like The log likelihood of the maximum power model.
def bls_fast(t, y, ivar, period, duration, oversample, use_likelihood): """Compute the periodogram using an optimized Cython implementation. t : array-like Sequence of observation times. y : array-like Sequence of observations associated with times t. ivar : array-like The inverse variance of ``y``. period : array-like The trial periods where the periodogram should be computed. duration : array-like The durations that should be tested. oversample : The resolution of the phase grid in units of durations. use_likeliood : bool If true, maximize the log likelihood over phase, duration, and depth. Returns ------- power : array-like The periodogram evaluated at the periods in ``period``. depth : array-like The estimated depth of the maximum power model at each period. depth_err : array-like The 1-sigma uncertainty on ``depth``. duration : array-like The maximum power duration at each period. transit_time : array-like The maximum power phase of the transit in units of time. This indicates the mid-transit time and it will always be in the range (0, period). depth_snr : array-like The signal-to-noise with which the depth is measured at maximum power. log_likelihood : array-like The log likelihood of the maximum power model. """ return bls_impl(t, y, ivar, period, duration, oversample, use_likelihood)