response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Regression test for #6264: make sure that asymmetric convolution functions go the right direction
def test_asymmetric_kernel(boundary): """ Regression test for #6264: make sure that asymmetric convolution functions go the right direction """ x = np.array([3.0, 0.0, 1.0], dtype=">f8") y = np.array([1, 2, 3], dtype=">f8") z = convolve(x, y, boundary=boundary, normalize_kernel=False) if boundary == "fill": assert_array_almost_equal_nulp(z, np.array([6.0, 10.0, 2.0], dtype="float"), 10) elif boundary is None: assert_array_almost_equal_nulp(z, np.array([0.0, 10.0, 0.0], dtype="float"), 10) elif boundary == "extend": assert_array_almost_equal_nulp( z, np.array([15.0, 10.0, 3.0], dtype="float"), 10 ) elif boundary == "wrap": assert_array_almost_equal_nulp(z, np.array([9.0, 10.0, 5.0], dtype="float"), 10)
Issue #9168 pointed out that kernels can be (unitless) quantities, which leads to crashes when inplace modifications are made to arrays in convolve/convolve_fft, so we now strip the quantity aspects off of kernels.
def test_regressiontest_issue9168(): """ Issue #9168 pointed out that kernels can be (unitless) quantities, which leads to crashes when inplace modifications are made to arrays in convolve/convolve_fft, so we now strip the quantity aspects off of kernels. """ x = np.array( [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]], ) kernel_fwhm = 1 * u.arcsec pixel_size = 1 * u.arcsec kernel = Gaussian2DKernel(x_stddev=kernel_fwhm / pixel_size) convolve_fft(x, kernel, boundary="fill", fill_value=np.nan, preserve_nan=True) convolve(x, kernel, boundary="fill", fill_value=np.nan, preserve_nan=True)
Assert arrays are close to within expected floating point rounding. Check that the result is correct at the precision expected for 64 bit numbers, taking account that the tolerance has to reflect that all powers in the FFTs enter our values.
def assert_floatclose(x, y): """Assert arrays are close to within expected floating point rounding. Check that the result is correct at the precision expected for 64 bit numbers, taking account that the tolerance has to reflect that all powers in the FFTs enter our values. """ # The number used is set by the fact that the Windows FFT sometimes # returns an answer that is EXACTLY 10*np.spacing. assert_allclose(x, y, atol=10 * np.spacing(x.max()), rtol=0.0)
Make sure that asymmetric convolution functions go the right direction
def test_asymmetric_kernel(boundary): """ Make sure that asymmetric convolution functions go the right direction """ x = np.array([3.0, 0.0, 1.0], dtype=">f8") y = np.array([1, 2, 3], dtype=">f8") with expected_boundary_warning(boundary=boundary): z = convolve_fft(x, y, boundary=boundary, normalize_kernel=False) if boundary in (None, "fill"): assert_array_almost_equal_nulp(z, np.array([6.0, 10.0, 2.0], dtype="float"), 10) elif boundary == "wrap": assert_array_almost_equal_nulp(z, np.array([9.0, 10.0, 5.0], dtype="float"), 10)
Test that convolve_fft works correctly when inputs are lists
def test_input_unmodified( boundary, nan_treatment, normalize_kernel, preserve_nan, dtype ): """ Test that convolve_fft works correctly when inputs are lists """ array = [1.0, 4.0, 5.0, 6.0, 5.0, 7.0, 8.0] kernel = [0.2, 0.6, 0.2] x = np.array(array, dtype=dtype) y = np.array(kernel, dtype=dtype) # Make pseudoimmutable x.flags.writeable = False y.flags.writeable = False with expected_boundary_warning(boundary=boundary): convolve_fft( x, y, boundary=boundary, nan_treatment=nan_treatment, normalize_kernel=normalize_kernel, preserve_nan=preserve_nan, ) assert np.all(np.array(array, dtype=dtype) == x) assert np.all(np.array(kernel, dtype=dtype) == y)
Test that convolve_fft doesn't modify the input data
def test_input_unmodified_with_nan( boundary, nan_treatment, normalize_kernel, preserve_nan, dtype ): """ Test that convolve_fft doesn't modify the input data """ array = [1.0, 4.0, 5.0, np.nan, 5.0, 7.0, 8.0] kernel = [0.2, 0.6, 0.2] x = np.array(array, dtype=dtype) y = np.array(kernel, dtype=dtype) # Make pseudoimmutable x.flags.writeable = False y.flags.writeable = False # make copies for post call comparison x_copy = x.copy() y_copy = y.copy() with expected_boundary_warning(boundary=boundary): convolve_fft( x, y, boundary=boundary, nan_treatment=nan_treatment, normalize_kernel=normalize_kernel, preserve_nan=preserve_nan, ) # ( NaN == NaN ) = False # Only compare non NaN values for canonical equivalence # and then check NaN explicitly with np.isnan() array_is_nan = np.isnan(array) kernel_is_nan = np.isnan(kernel) array_not_nan = ~array_is_nan kernel_not_nan = ~kernel_is_nan assert np.all(x_copy[array_not_nan] == x[array_not_nan]) assert np.all(y_copy[kernel_not_nan] == y[kernel_not_nan]) assert np.all(np.isnan(x[array_is_nan])) assert np.all(np.isnan(y[kernel_is_nan]))
Test if the sum of all pixels corresponds nearly to the integral.
def test_pixel_sum_1D(model_class, mode): """ Test if the sum of all pixels corresponds nearly to the integral. """ if model_class == Box1D and mode == "center": pytest.skip("Non integrating mode. Skip integral test.") parameters = models_1D[model_class] model = create_model(model_class, parameters) values = discretize_model(model, models_1D[model_class]["x_lim"], mode=mode) assert_allclose(values.sum(), models_1D[model_class]["integral"], atol=0.0001)
Discretize Gaussian with different modes and check if result is at least similar to Gaussian1D.eval().
def test_gaussian_eval_1D(mode): """ Discretize Gaussian with different modes and check if result is at least similar to Gaussian1D.eval(). """ model = Gaussian1D(1, 0, 20) x = np.arange(-100, 101) values = model(x) disc_values = discretize_model(model, (-100, 101), mode=mode) assert_allclose(values, disc_values, atol=0.001)
Test if the sum of all pixels corresponds nearly to the integral.
def test_pixel_sum_2D(model_class, mode): """ Test if the sum of all pixels corresponds nearly to the integral. """ if model_class == Box2D and mode == "center": pytest.skip("Non integrating mode. Skip integral test.") parameters = models_2D[model_class] model = create_model(model_class, parameters) values = discretize_model( model, models_2D[model_class]["x_lim"], models_2D[model_class]["y_lim"], mode=mode, ) assert_allclose(values.sum(), models_2D[model_class]["integral"], atol=0.0001)
Test if the sum of all pixels of a compound model corresponds nearly to the integral.
def test_pixel_sum_compound_2D(model_class, mode): """ Test if the sum of all pixels of a compound model corresponds nearly to the integral. """ if model_class == Box2D and mode == "center": pytest.skip("Non integrating mode. Skip integral test.") parameters = models_2D[model_class] model = create_model(model_class, parameters) values = discretize_model( model + model, models_2D[model_class]["x_lim"], models_2D[model_class]["y_lim"], mode=mode, ) model_integral = 2 * models_2D[model_class]["integral"] assert_allclose(values.sum(), model_integral, atol=0.0001)
Discretize Gaussian with different modes and check if result is at least similar to Gaussian2D.eval()
def test_gaussian_eval_2D(mode): """ Discretize Gaussian with different modes and check if result is at least similar to Gaussian2D.eval() """ model = Gaussian2D(0.01, 0, 0, 1, 1) x = np.arange(-2, 3) y = np.arange(-2, 3) x, y = np.meshgrid(x, y) values = model(x, y) disc_values = discretize_model(model, (-2, 3), (-2, 3), mode=mode) assert_allclose(values, disc_values, atol=1e-2)
Discretize Gaussian with integrate mode
def test_gaussian_eval_2D_integrate_mode(): """ Discretize Gaussian with integrate mode """ model_list = [ Gaussian2D(0.01, 0, 0, 2, 2), Gaussian2D(0.01, 0, 0, 1, 2), Gaussian2D(0.01, 0, 0, 2, 1), ] x = np.arange(-2, 3) y = np.arange(-2, 3) x, y = np.meshgrid(x, y) for model in model_list: values = model(x, y) disc_values = discretize_model(model, (-2, 3), (-2, 3), mode="integrate") assert_allclose(values, disc_values, atol=1e-2)
Test subpixel accuracy of the integrate mode with gaussian 1D model.
def test_subpixel_gauss_1D(): """ Test subpixel accuracy of the integrate mode with gaussian 1D model. """ gauss_1D = Gaussian1D(1, 0, 0.1) values = discretize_model(gauss_1D, (-1, 2), mode="integrate", factor=100) assert_allclose(values.sum(), np.sqrt(2 * np.pi) * 0.1, atol=0.00001)
Test subpixel accuracy of the integrate mode with gaussian 2D model.
def test_subpixel_gauss_2D(): """ Test subpixel accuracy of the integrate mode with gaussian 2D model. """ gauss_2D = Gaussian2D(1, 0, 0, 0.1, 0.1) values = discretize_model(gauss_2D, (-1, 2), (-1, 2), mode="integrate", factor=100) assert_allclose(values.sum(), 2 * np.pi * 0.01, atol=0.00001)
Test discretize when a 1d function is passed.
def test_discretize_callable_1d(): """ Test discretize when a 1d function is passed. """ def f(x): return x**2 y = discretize_model(f, (-5, 6)) assert_allclose(y, np.arange(-5, 6) ** 2)
Test discretize when a 2d function is passed.
def test_discretize_callable_2d(): """ Test discretize when a 2d function is passed. """ def f(x, y): return x**2 + y**2 actual = discretize_model(f, (-5, 6), (-5, 6)) y, x = np.indices((11, 11)) - 5 desired = x**2 + y**2 assert_allclose(actual, desired)
Test type exception.
def test_type_exception(): """ Test type exception. """ with pytest.raises(TypeError, match=r"Model must be callable\."): discretize_model(float(0), (-10, 11))
Test dimension exception 1d.
def test_dim_exception_1d(): """ Test dimension exception 1d. """ def f(x): return x**2 with pytest.raises(ValueError, match=r"y_range should not be input for a 1D model"): discretize_model(f, (-10, 11), (-10, 11))
Test dimension exception 2d.
def test_dim_exception_2d(): """ Test dimension exception 2d. """ def f(x, y): return x**2 + y**2 with pytest.raises(ValueError, match=r"y_range must be specified for a 2D model"): discretize_model(f, (-10, 11))
Return a valid representation class from ``value`` or raise exception.
def _get_repr_cls(value): """ Return a valid representation class from ``value`` or raise exception. """ if value in r.REPRESENTATION_CLASSES: value = r.REPRESENTATION_CLASSES[value] elif not isinstance(value, type) or not issubclass(value, r.BaseRepresentation): raise ValueError( f"Representation is {value!r} but must be a BaseRepresentation class " f"or one of the string aliases {list(r.REPRESENTATION_CLASSES)}" ) return value
Return a valid differential class from ``value`` or raise exception. As originally created, this is only used in the SkyCoord initializer, so if that is refactored, this function my no longer be necessary.
def _get_diff_cls(value): """ Return a valid differential class from ``value`` or raise exception. As originally created, this is only used in the SkyCoord initializer, so if that is refactored, this function my no longer be necessary. """ if value in r.DIFFERENTIAL_CLASSES: value = r.DIFFERENTIAL_CLASSES[value] elif not isinstance(value, type) or not issubclass(value, r.BaseDifferential): raise ValueError( f"Differential is {value!r} but must be a BaseDifferential class " f"or one of the string aliases {list(r.DIFFERENTIAL_CLASSES)}" ) return value
Get valid representation and differential classes. Parameters ---------- base : str or `~astropy.coordinates.BaseRepresentation` subclass class for the representation of the base coordinates. If a string, it is looked up among the known representation classes. **differentials : dict of str or `~astropy.coordinates.BaseDifferentials` Keys are like for normal differentials, i.e., 's' for a first derivative in time, etc. If an item is set to `None`, it will be guessed from the base class. Returns ------- repr_classes : dict of subclasses The base class is keyed by 'base'; the others by the keys of ``diffferentials``.
def _get_repr_classes(base, **differentials): """Get valid representation and differential classes. Parameters ---------- base : str or `~astropy.coordinates.BaseRepresentation` subclass class for the representation of the base coordinates. If a string, it is looked up among the known representation classes. **differentials : dict of str or `~astropy.coordinates.BaseDifferentials` Keys are like for normal differentials, i.e., 's' for a first derivative in time, etc. If an item is set to `None`, it will be guessed from the base class. Returns ------- repr_classes : dict of subclasses The base class is keyed by 'base'; the others by the keys of ``diffferentials``. """ base = _get_repr_cls(base) repr_classes = {"base": base} for name, differential_type in differentials.items(): if differential_type == "base": # We don't want to fail for this case. differential_type = r.DIFFERENTIAL_CLASSES.get(base.get_name(), None) elif differential_type in r.DIFFERENTIAL_CLASSES: differential_type = r.DIFFERENTIAL_CLASSES[differential_type] elif differential_type is not None and ( not isinstance(differential_type, type) or not issubclass(differential_type, r.BaseDifferential) ): raise ValueError( "Differential is {differential_type!r} but must be a BaseDifferential" f" class or one of the string aliases {list(r.DIFFERENTIAL_CLASSES)}" ) repr_classes[name] = differential_type return repr_classes
Enter your birthday as an `astropy.time.Time` object and receive a mystical horoscope about things to come. Parameters ---------- birthday : `astropy.time.Time` or str Your birthday as a `datetime.datetime` or `astropy.time.Time` object or "YYYY-MM-DD"string. corrected : bool Whether to account for the precession of the Earth instead of using the ancient Greek dates for the signs. After all, you do want your *real* horoscope, not a cheap inaccurate approximation, right? chinese : bool Chinese annual zodiac wisdom instead of Western one. Returns ------- Infinite wisdom, condensed into astrologically precise prose. Notes ----- This function was implemented on April 1. Take note of that date.
def horoscope(birthday, corrected=True, chinese=False): """ Enter your birthday as an `astropy.time.Time` object and receive a mystical horoscope about things to come. Parameters ---------- birthday : `astropy.time.Time` or str Your birthday as a `datetime.datetime` or `astropy.time.Time` object or "YYYY-MM-DD"string. corrected : bool Whether to account for the precession of the Earth instead of using the ancient Greek dates for the signs. After all, you do want your *real* horoscope, not a cheap inaccurate approximation, right? chinese : bool Chinese annual zodiac wisdom instead of Western one. Returns ------- Infinite wisdom, condensed into astrologically precise prose. Notes ----- This function was implemented on April 1. Take note of that date. """ from bs4 import BeautifulSoup today = datetime.now() err_msg = "Invalid response from celestial gods (failed to load horoscope)." headers = {"User-Agent": "foo/bar"} special_words = { "([sS]tar[s^ ]*)": "yellow", "([yY]ou[^ ]*)": "magenta", "([pP]lay[^ ]*)": "blue", "([hH]eart)": "red", "([fF]ate)": "lightgreen", } if isinstance(birthday, str): birthday = datetime.strptime(birthday, "%Y-%m-%d") if chinese: # TODO: Make this more accurate by using the actual date, not just year # Might need third-party tool like https://pypi.org/project/lunardate zodiac_sign = _get_zodiac(birthday.year) url = ( "https://www.horoscope.com/us/horoscopes/yearly/" f"{today.year}-chinese-horoscope-{zodiac_sign}.aspx" ) summ_title_sfx = f"in {today.year}" try: res = Request(url, headers=headers) with urlopen(res) as f: try: doc = BeautifulSoup(f, "html.parser") # TODO: Also include Love, Family & Friends, Work, Money, More? item = doc.find(id="overview") desc = item.getText() except Exception: raise CelestialError(err_msg) except Exception: raise CelestialError(err_msg) else: birthday = atime.Time(birthday) if corrected: with warnings.catch_warnings(): warnings.simplefilter("ignore") # Ignore ErfaWarning zodiac_sign = get_sun(birthday).get_constellation().lower() zodiac_sign = _CONST_TO_SIGNS.get(zodiac_sign, zodiac_sign) if zodiac_sign not in _VALID_SIGNS: raise HumanError( f"On your birthday the sun was in {zodiac_sign.title()}, which is" " not a sign of the zodiac. You must not exist. Or maybe you can" " settle for corrected=False." ) else: zodiac_sign = get_sign(birthday.to_datetime()) url = f"https://astrology.com/horoscope/daily/{zodiac_sign}.html" summ_title_sfx = f"on {today.strftime('%Y-%m-%d')}" res = Request(url, headers=headers) with urlopen(res) as f: try: doc = BeautifulSoup(f, "html.parser") item = doc.find("div", {"id": "content"}) desc = item.getText() except Exception: raise CelestialError(err_msg) print("*" * 79) color_print(f"Horoscope for {zodiac_sign.capitalize()} {summ_title_sfx}:", "green") print("*" * 79) for block in textwrap.wrap(desc, 79): split_block = block.split() for i, word in enumerate(split_block): for re_word in special_words.keys(): match = re.search(re_word, word) if match is None: continue split_block[i] = _color_text(match.groups()[0], special_words[re_word]) print(" ".join(split_block))
Eccentricity of the Earth's orbit at the requested Julian Date. Parameters ---------- jd : scalar or array-like Julian date at which to compute the eccentricity Returns ------- eccentricity : scalar or array The eccentricity (or array of eccentricities) References ---------- * Explanatory Supplement to the Astronomical Almanac: P. Kenneth Seidelmann (ed), University Science Books (1992).
def eccentricity(jd): """ Eccentricity of the Earth's orbit at the requested Julian Date. Parameters ---------- jd : scalar or array-like Julian date at which to compute the eccentricity Returns ------- eccentricity : scalar or array The eccentricity (or array of eccentricities) References ---------- * Explanatory Supplement to the Astronomical Almanac: P. Kenneth Seidelmann (ed), University Science Books (1992). """ T = (jd - jd1950) / 36525.0 p = (-0.000000126, -0.00004193, 0.01673011) return np.polyval(p, T)
Computes the mean longitude of perigee of the Earth's orbit at the requested Julian Date. Parameters ---------- jd : scalar or array-like Julian date at which to compute the mean longitude of perigee Returns ------- mean_lon_of_perigee : scalar or array Mean longitude of perigee in degrees (or array of mean longitudes) References ---------- * Explanatory Supplement to the Astronomical Almanac: P. Kenneth Seidelmann (ed), University Science Books (1992).
def mean_lon_of_perigee(jd): """ Computes the mean longitude of perigee of the Earth's orbit at the requested Julian Date. Parameters ---------- jd : scalar or array-like Julian date at which to compute the mean longitude of perigee Returns ------- mean_lon_of_perigee : scalar or array Mean longitude of perigee in degrees (or array of mean longitudes) References ---------- * Explanatory Supplement to the Astronomical Almanac: P. Kenneth Seidelmann (ed), University Science Books (1992). """ T = (jd - jd1950) / 36525.0 p = (0.012, 1.65, 6190.67, 1015489.951) return np.polyval(p, T) / 3600.0
Computes the obliquity of the Earth at the requested Julian Date. Parameters ---------- jd : scalar or array-like Julian date (TT) at which to compute the obliquity algorithm : int Year of algorithm based on IAU adoption. Can be 2006, 2000 or 1980. The IAU 2006 algorithm is based on Hilton et al. 2006. The IAU 1980 algorithm is based on the Explanatory Supplement to the Astronomical Almanac (1992). The IAU 2000 algorithm starts with the IAU 1980 algorithm and applies a precession-rate correction from the IAU 2000 precession model. Returns ------- obliquity : scalar or array Mean obliquity in degrees (or array of obliquities) References ---------- * Hilton, J. et al., 2006, Celest.Mech.Dyn.Astron. 94, 351 * Capitaine, N., et al., 2003, Astron.Astrophys. 400, 1145-1154 * Explanatory Supplement to the Astronomical Almanac: P. Kenneth Seidelmann (ed), University Science Books (1992).
def obliquity(jd, algorithm=2006): """ Computes the obliquity of the Earth at the requested Julian Date. Parameters ---------- jd : scalar or array-like Julian date (TT) at which to compute the obliquity algorithm : int Year of algorithm based on IAU adoption. Can be 2006, 2000 or 1980. The IAU 2006 algorithm is based on Hilton et al. 2006. The IAU 1980 algorithm is based on the Explanatory Supplement to the Astronomical Almanac (1992). The IAU 2000 algorithm starts with the IAU 1980 algorithm and applies a precession-rate correction from the IAU 2000 precession model. Returns ------- obliquity : scalar or array Mean obliquity in degrees (or array of obliquities) References ---------- * Hilton, J. et al., 2006, Celest.Mech.Dyn.Astron. 94, 351 * Capitaine, N., et al., 2003, Astron.Astrophys. 400, 1145-1154 * Explanatory Supplement to the Astronomical Almanac: P. Kenneth Seidelmann (ed), University Science Books (1992). """ if algorithm == 2006: return np.rad2deg(erfa.obl06(jd, 0)) elif algorithm == 2000: return np.rad2deg(erfa.obl80(jd, 0) + erfa.pr00(jd, 0)[1]) elif algorithm == 1980: return np.rad2deg(erfa.obl80(jd, 0)) else: raise ValueError("invalid algorithm year for computing obliquity")
Computes the precession matrix from one Julian epoch to another, per IAU 2006. Parameters ---------- fromepoch : `~astropy.time.Time` The epoch to precess from. toepoch : `~astropy.time.Time` The epoch to precess to. Returns ------- pmatrix : 3x3 array Precession matrix to get from ``fromepoch`` to ``toepoch`` References ---------- Hilton, J. et al., 2006, Celest.Mech.Dyn.Astron. 94, 351
def precession_matrix_Capitaine(fromepoch, toepoch): """ Computes the precession matrix from one Julian epoch to another, per IAU 2006. Parameters ---------- fromepoch : `~astropy.time.Time` The epoch to precess from. toepoch : `~astropy.time.Time` The epoch to precess to. Returns ------- pmatrix : 3x3 array Precession matrix to get from ``fromepoch`` to ``toepoch`` References ---------- Hilton, J. et al., 2006, Celest.Mech.Dyn.Astron. 94, 351 """ # Multiply the two precession matrices (without frame bias) through J2000.0 fromepoch_to_J2000 = matrix_transpose(erfa.bp06(*get_jd12(fromepoch, "tt"))[1]) J2000_to_toepoch = erfa.bp06(*get_jd12(toepoch, "tt"))[1] return J2000_to_toepoch @ fromepoch_to_J2000
Computes the precession matrix from one Besselian epoch to another using Newcomb's method. ``epoch1`` and ``epoch2`` are in Besselian year numbers.
def _precession_matrix_besselian(epoch1, epoch2): """ Computes the precession matrix from one Besselian epoch to another using Newcomb's method. ``epoch1`` and ``epoch2`` are in Besselian year numbers. """ # tropical years t1 = (epoch1 - 1850.0) / 1000.0 t2 = (epoch2 - 1850.0) / 1000.0 dt = t2 - t1 zeta1 = 23035.545 + t1 * 139.720 + 0.060 * t1 * t1 zeta2 = 30.240 - 0.27 * t1 zeta3 = 17.995 pzeta = (zeta3, zeta2, zeta1, 0) zeta = np.polyval(pzeta, dt) / 3600 z1 = 23035.545 + t1 * 139.720 + 0.060 * t1 * t1 z2 = 109.480 + 0.39 * t1 z3 = 18.325 pz = (z3, z2, z1, 0) z = np.polyval(pz, dt) / 3600 theta1 = 20051.12 - 85.29 * t1 - 0.37 * t1 * t1 theta2 = -42.65 - 0.37 * t1 theta3 = -41.8 ptheta = (theta3, theta2, theta1, 0) theta = np.polyval(ptheta, dt) / 3600 return ( rotation_matrix(-z, "z") @ rotation_matrix(theta, "y") @ rotation_matrix(-zeta, "z") )
Computes nutation components following the IAU 2000B specification. Parameters ---------- jd : scalar Julian date (TT) at which to compute the nutation components Returns ------- eps : float epsilon in radians dpsi : float dpsi in radians deps : float depsilon in raidans
def nutation_components2000B(jd): """ Computes nutation components following the IAU 2000B specification. Parameters ---------- jd : scalar Julian date (TT) at which to compute the nutation components Returns ------- eps : float epsilon in radians dpsi : float dpsi in radians deps : float depsilon in raidans """ dpsi, deps, epsa, _, _, _, _, _ = erfa.pn00b(jd, 0) return epsa, dpsi, deps
Nutation matrix generated from nutation components, IAU 2000B model. Matrix converts from mean coordinate to true coordinate as r_true = M * r_mean Parameters ---------- epoch : `~astropy.time.Time` The epoch at which to compute the nutation matrix Returns ------- nmatrix : 3x3 array Nutation matrix for the specified epoch References ---------- * Explanatory Supplement to the Astronomical Almanac: P. Kenneth Seidelmann (ed), University Science Books (1992).
def nutation_matrix(epoch): """ Nutation matrix generated from nutation components, IAU 2000B model. Matrix converts from mean coordinate to true coordinate as r_true = M * r_mean Parameters ---------- epoch : `~astropy.time.Time` The epoch at which to compute the nutation matrix Returns ------- nmatrix : 3x3 array Nutation matrix for the specified epoch References ---------- * Explanatory Supplement to the Astronomical Almanac: P. Kenneth Seidelmann (ed), University Science Books (1992). """ # TODO: implement higher precision 2006/2000A model if requested/needed return erfa.num00b(*get_jd12(epoch, "tt"))
Converts 3D rectangular cartesian coordinates to spherical polar coordinates. Note that the resulting angles are latitude/longitude or elevation/azimuthal form. I.e., the origin is along the equator rather than at the north pole. .. note:: This function simply wraps functionality provided by the `~astropy.coordinates.CartesianRepresentation` and `~astropy.coordinates.SphericalRepresentation` classes. In general, for both performance and readability, we suggest using these classes directly. But for situations where a quick one-off conversion makes sense, this function is provided. Parameters ---------- x : scalar, array-like, or `~astropy.units.Quantity` The first Cartesian coordinate. y : scalar, array-like, or `~astropy.units.Quantity` The second Cartesian coordinate. z : scalar, array-like, or `~astropy.units.Quantity` The third Cartesian coordinate. Returns ------- r : `~astropy.units.Quantity` The radial coordinate (in the same units as the inputs). lat : `~astropy.units.Quantity` ['angle'] The latitude in radians lon : `~astropy.units.Quantity` ['angle'] The longitude in radians
def cartesian_to_spherical(x, y, z): """ Converts 3D rectangular cartesian coordinates to spherical polar coordinates. Note that the resulting angles are latitude/longitude or elevation/azimuthal form. I.e., the origin is along the equator rather than at the north pole. .. note:: This function simply wraps functionality provided by the `~astropy.coordinates.CartesianRepresentation` and `~astropy.coordinates.SphericalRepresentation` classes. In general, for both performance and readability, we suggest using these classes directly. But for situations where a quick one-off conversion makes sense, this function is provided. Parameters ---------- x : scalar, array-like, or `~astropy.units.Quantity` The first Cartesian coordinate. y : scalar, array-like, or `~astropy.units.Quantity` The second Cartesian coordinate. z : scalar, array-like, or `~astropy.units.Quantity` The third Cartesian coordinate. Returns ------- r : `~astropy.units.Quantity` The radial coordinate (in the same units as the inputs). lat : `~astropy.units.Quantity` ['angle'] The latitude in radians lon : `~astropy.units.Quantity` ['angle'] The longitude in radians """ if not hasattr(x, "unit"): x = x * u.dimensionless_unscaled if not hasattr(y, "unit"): y = y * u.dimensionless_unscaled if not hasattr(z, "unit"): z = z * u.dimensionless_unscaled cart = CartesianRepresentation(x, y, z) sph = cart.represent_as(SphericalRepresentation) return sph.distance, sph.lat, sph.lon
Converts spherical polar coordinates to rectangular cartesian coordinates. Note that the input angles should be in latitude/longitude or elevation/azimuthal form. I.e., the origin is along the equator rather than at the north pole. .. note:: This is a low-level function used internally in `astropy.coordinates`. It is provided for users if they really want to use it, but it is recommended that you use the `astropy.coordinates` coordinate systems. Parameters ---------- r : scalar, array-like, or `~astropy.units.Quantity` The radial coordinate (in the same units as the inputs). lat : scalar, array-like, or `~astropy.units.Quantity` ['angle'] The latitude (in radians if array or scalar) lon : scalar, array-like, or `~astropy.units.Quantity` ['angle'] The longitude (in radians if array or scalar) Returns ------- x : float or array The first cartesian coordinate. y : float or array The second cartesian coordinate. z : float or array The third cartesian coordinate.
def spherical_to_cartesian(r, lat, lon): """ Converts spherical polar coordinates to rectangular cartesian coordinates. Note that the input angles should be in latitude/longitude or elevation/azimuthal form. I.e., the origin is along the equator rather than at the north pole. .. note:: This is a low-level function used internally in `astropy.coordinates`. It is provided for users if they really want to use it, but it is recommended that you use the `astropy.coordinates` coordinate systems. Parameters ---------- r : scalar, array-like, or `~astropy.units.Quantity` The radial coordinate (in the same units as the inputs). lat : scalar, array-like, or `~astropy.units.Quantity` ['angle'] The latitude (in radians if array or scalar) lon : scalar, array-like, or `~astropy.units.Quantity` ['angle'] The longitude (in radians if array or scalar) Returns ------- x : float or array The first cartesian coordinate. y : float or array The second cartesian coordinate. z : float or array The third cartesian coordinate. """ if not hasattr(r, "unit"): r = r * u.dimensionless_unscaled if not hasattr(lat, "unit"): lat = lat * u.radian if not hasattr(lon, "unit"): lon = lon * u.radian sph = SphericalRepresentation(distance=r, lat=lat, lon=lon) cart = sph.represent_as(CartesianRepresentation) return cart.x, cart.y, cart.z
Determines the location of the sun at a given time (or times, if the input is an array `~astropy.time.Time` object), in geocentric coordinates. Parameters ---------- time : `~astropy.time.Time` The time(s) at which to compute the location of the sun. Returns ------- newsc : `~astropy.coordinates.SkyCoord` The location of the sun as a `~astropy.coordinates.SkyCoord` in the `~astropy.coordinates.GCRS` frame. Notes ----- The algorithm for determining the sun/earth relative position is based on the simplified version of VSOP2000 that is part of ERFA. Compared to JPL's ephemeris, it should be good to about 4 km (in the Sun-Earth vector) from 1900-2100 C.E., 8 km for the 1800-2200 span, and perhaps 250 km over the 1000-3000.
def get_sun(time): """ Determines the location of the sun at a given time (or times, if the input is an array `~astropy.time.Time` object), in geocentric coordinates. Parameters ---------- time : `~astropy.time.Time` The time(s) at which to compute the location of the sun. Returns ------- newsc : `~astropy.coordinates.SkyCoord` The location of the sun as a `~astropy.coordinates.SkyCoord` in the `~astropy.coordinates.GCRS` frame. Notes ----- The algorithm for determining the sun/earth relative position is based on the simplified version of VSOP2000 that is part of ERFA. Compared to JPL's ephemeris, it should be good to about 4 km (in the Sun-Earth vector) from 1900-2100 C.E., 8 km for the 1800-2200 span, and perhaps 250 km over the 1000-3000. """ earth_pv_helio, earth_pv_bary = erfa.epv00(*get_jd12(time, "tdb")) # We have to manually do aberration because we're outputting directly into # GCRS earth_p = earth_pv_helio["p"] earth_v = earth_pv_bary["v"] # convert barycentric velocity to units of c, but keep as array for passing in to erfa earth_v /= c.to_value(u.au / u.d) dsun = np.sqrt(np.sum(earth_p**2, axis=-1)) invlorentz = (1 - np.sum(earth_v**2, axis=-1)) ** 0.5 properdir = erfa.ab( earth_p / dsun.reshape(dsun.shape + (1,)), -earth_v, dsun, invlorentz ) cartrep = CartesianRepresentation( x=-dsun * properdir[..., 0] * u.AU, y=-dsun * properdir[..., 1] * u.AU, z=-dsun * properdir[..., 2] * u.AU, ) return SkyCoord(cartrep, frame=GCRS(obstime=time))
Determines the constellation(s) a given coordinate object contains. Parameters ---------- coord : coordinate-like The object to determine the constellation of. short_name : bool If True, the returned names are the IAU-sanctioned abbreviated names. Otherwise, full names for the constellations are used. constellation_list : str The set of constellations to use. Currently only ``'iau'`` is supported, meaning the 88 "modern" constellations endorsed by the IAU. Returns ------- constellation : str or string array If ``coords`` contains a scalar coordinate, returns the name of the constellation. If it is an array coordinate object, it returns an array of names. Notes ----- To determine which constellation a point on the sky is in, this precesses to B1875, and then uses the Delporte boundaries of the 88 modern constellations, as tabulated by `Roman 1987 <https://cdsarc.cds.unistra.fr/viz-bin/cat/VI/42>`_.
def get_constellation(coord, short_name=False, constellation_list="iau"): """ Determines the constellation(s) a given coordinate object contains. Parameters ---------- coord : coordinate-like The object to determine the constellation of. short_name : bool If True, the returned names are the IAU-sanctioned abbreviated names. Otherwise, full names for the constellations are used. constellation_list : str The set of constellations to use. Currently only ``'iau'`` is supported, meaning the 88 "modern" constellations endorsed by the IAU. Returns ------- constellation : str or string array If ``coords`` contains a scalar coordinate, returns the name of the constellation. If it is an array coordinate object, it returns an array of names. Notes ----- To determine which constellation a point on the sky is in, this precesses to B1875, and then uses the Delporte boundaries of the 88 modern constellations, as tabulated by `Roman 1987 <https://cdsarc.cds.unistra.fr/viz-bin/cat/VI/42>`_. """ if constellation_list != "iau": raise ValueError("only 'iau' us currently supported for constellation_list") # read the data files and cache them if they haven't been already if not _constellation_data: cdata = data.get_pkg_data_contents("data/constellation_data_roman87.dat") ctable = ascii.read(cdata, names=["ral", "rau", "decl", "name"]) cnames = data.get_pkg_data_contents( "data/constellation_names.dat", encoding="UTF8" ) cnames_short_to_long = { l[:3]: l[4:] for l in cnames.split("\n") if not l.startswith("#") } cnames_long = np.array([cnames_short_to_long[nm] for nm in ctable["name"]]) _constellation_data["ctable"] = ctable _constellation_data["cnames_long"] = cnames_long else: ctable = _constellation_data["ctable"] cnames_long = _constellation_data["cnames_long"] isscalar = coord.isscalar # if it is geocentric, we reproduce the frame but with the 1875 equinox, # which is where the constellations are defined # this yields a "dubious year" warning because ERFA considers the year 1875 # "dubious", probably because UTC isn't well-defined then and precession # models aren't precisely calibrated back to then. But it's plenty # sufficient for constellations with warnings.catch_warnings(): warnings.simplefilter("ignore", erfa.ErfaWarning) constel_coord = coord.transform_to(PrecessedGeocentric(equinox="B1875")) if isscalar: rah = constel_coord.ra.ravel().hour decd = constel_coord.dec.ravel().deg else: rah = constel_coord.ra.hour decd = constel_coord.dec.deg constellidx = -np.ones(len(rah), dtype=int) notided = constellidx == -1 # should be all for i, row in enumerate(ctable): msk = (row["ral"] < rah) & (rah < row["rau"]) & (decd > row["decl"]) constellidx[notided & msk] = i notided = constellidx == -1 if np.sum(notided) == 0: break else: raise ValueError( f"Could not find constellation for coordinates {constel_coord[notided]}" ) if short_name: names = ctable["name"][constellidx] else: names = cnames_long[constellidx] if isscalar: return names[0] else: return names
Helper function for the concatenate function below. Gets and concatenates all of the individual components for an iterable of representations or differentials.
def _concatenate_components(reps_difs, names): """Helper function for the concatenate function below. Gets and concatenates all of the individual components for an iterable of representations or differentials. """ return [ np.concatenate(np.atleast_1d(*[getattr(x, name) for x in reps_difs])) for name in names ]
Combine multiple representation objects into a single instance by concatenating the data in each component. Currently, all of the input representations have to be the same type. This properly handles differential or velocity data, but all input objects must have the same differential object type as well. Parameters ---------- reps : sequence of `~astropy.coordinates.BaseRepresentation` The objects to concatenate Returns ------- rep : `~astropy.coordinates.BaseRepresentation` subclass instance A single representation object with its data set to the concatenation of all the elements of the input sequence of representations.
def concatenate_representations(reps): """ Combine multiple representation objects into a single instance by concatenating the data in each component. Currently, all of the input representations have to be the same type. This properly handles differential or velocity data, but all input objects must have the same differential object type as well. Parameters ---------- reps : sequence of `~astropy.coordinates.BaseRepresentation` The objects to concatenate Returns ------- rep : `~astropy.coordinates.BaseRepresentation` subclass instance A single representation object with its data set to the concatenation of all the elements of the input sequence of representations. """ if not isinstance(reps, (Sequence, np.ndarray)): raise TypeError("Input must be a list or iterable of representation objects.") # First, validate that the representations are the same, and # concatenate all of the positional data: rep_type = type(reps[0]) if any(type(r) != rep_type for r in reps): raise TypeError("Input representations must all have the same type.") # Construct the new representation with the concatenated data from the # representations passed in values = _concatenate_components(reps, rep_type.attr_classes.keys()) new_rep = rep_type(*values) has_diff = any("s" in rep.differentials for rep in reps) if has_diff and any("s" not in rep.differentials for rep in reps): raise ValueError( "Input representations must either all contain " "differentials, or not contain differentials." ) if has_diff: dif_type = type(reps[0].differentials["s"]) if any( "s" not in r.differentials or type(r.differentials["s"]) != dif_type for r in reps ): raise TypeError( "All input representations must have the same differential type." ) values = _concatenate_components( [r.differentials["s"] for r in reps], dif_type.attr_classes.keys() ) new_dif = dif_type(*values) new_rep = new_rep.with_differentials({"s": new_dif}) return new_rep
Combine multiple coordinate objects into a single `~astropy.coordinates.SkyCoord`. "Coordinate objects" here mean frame objects with data, `~astropy.coordinates.SkyCoord`, or representation objects. Currently, they must all be in the same frame, but in a future version this may be relaxed to allow inhomogeneous sequences of objects. Parameters ---------- coords : sequence of coordinate-like The objects to concatenate Returns ------- cskycoord : SkyCoord A single sky coordinate with its data set to the concatenation of all the elements in ``coords``
def concatenate(coords): """ Combine multiple coordinate objects into a single `~astropy.coordinates.SkyCoord`. "Coordinate objects" here mean frame objects with data, `~astropy.coordinates.SkyCoord`, or representation objects. Currently, they must all be in the same frame, but in a future version this may be relaxed to allow inhomogeneous sequences of objects. Parameters ---------- coords : sequence of coordinate-like The objects to concatenate Returns ------- cskycoord : SkyCoord A single sky coordinate with its data set to the concatenation of all the elements in ``coords`` """ if getattr(coords, "isscalar", False) or not isiterable(coords): raise TypeError("The argument to concatenate must be iterable") scs = [SkyCoord(coord, copy=False) for coord in coords] # Check that all frames are equivalent for sc in scs[1:]: if not sc.is_equivalent_frame(scs[0]): raise ValueError( f"All inputs must have equivalent frames: {sc} != {scs[0]}" ) # TODO: this can be changed to SkyCoord.from_representation() for a speed # boost when we switch to using classmethods return SkyCoord( concatenate_representations([c.data for c in coords]), frame=scs[0].frame )
Regex match for coordinates in name.
def search(name, raise_=False): """Regex match for coordinates in name.""" # extract the coordinate data from name match = JPARSER.search(name) if match is None and raise_: raise ValueError("No coordinate match found!") return match
get RA in hourangle and DEC in degrees by parsing name.
def to_ra_dec_angles(name): """get RA in hourangle and DEC in degrees by parsing name.""" groups = search(name, True).groups() prefix, hms, dms = np.split(groups, [1, 6]) ra = (_sexagesimal(hms) / (1, 60, 60 * 60) * u.hourangle).sum() dec = (_sexagesimal(dms) * (u.deg, u.arcmin, u.arcsec)).sum() return ra, dec
Convert to `name` to `SkyCoords` object.
def to_skycoord(name, frame="icrs"): """Convert to `name` to `SkyCoords` object.""" return SkyCoord(*to_ra_dec_angles(name), frame=frame)
Produce a shortened version of the full object name. The shortened name is built from the prefix (usually the survey name) and RA (hour, minute), DEC (deg, arcmin) parts. e.g.: '2MASS J06495091-0737408' --> '2MASS J0649-0737' Parameters ---------- name : str Full object name with J-coords embedded. Returns ------- shortName: str
def shorten(name): """Produce a shortened version of the full object name. The shortened name is built from the prefix (usually the survey name) and RA (hour, minute), DEC (deg, arcmin) parts. e.g.: '2MASS J06495091-0737408' --> '2MASS J0649-0737' Parameters ---------- name : str Full object name with J-coords embedded. Returns ------- shortName: str """ match = search(name) return "".join(match.group(1, 3, 4, 7, 8, 9))
Finds the nearest 3-dimensional matches of a coordinate or coordinates in a set of catalog coordinates. This finds the 3-dimensional closest neighbor, which is only different from the on-sky distance if ``distance`` is set in either ``matchcoord`` or ``catalogcoord``. Parameters ---------- matchcoord : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The coordinate(s) to match to the catalog. catalogcoord : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The base catalog in which to search for matches. Typically this will be a coordinate object that is an array (i.e., ``catalogcoord.isscalar == False``) nthneighbor : int, optional Which closest neighbor to search for. Typically ``1`` is desired here, as that is correct for matching one set of coordinates to another. The next likely use case is ``2``, for matching a coordinate catalog against *itself* (``1`` is inappropriate because each point will find itself as the closest match). storekdtree : bool or str, optional If a string, will store the KD-Tree used for the computation in the ``catalogcoord``, as in ``catalogcoord.cache`` with the provided name. This dramatically speeds up subsequent calls with the same catalog. If False, the KD-Tree is discarded after use. Returns ------- idx : int array Indices into ``catalogcoord`` to get the matched points for each ``matchcoord``. Shape matches ``matchcoord``. sep2d : `~astropy.coordinates.Angle` The on-sky separation between the closest match for each ``matchcoord`` and the ``matchcoord``. Shape matches ``matchcoord``. dist3d : `~astropy.units.Quantity` ['length'] The 3D distance between the closest match for each ``matchcoord`` and the ``matchcoord``. Shape matches ``matchcoord``. Notes ----- This function requires `SciPy <https://www.scipy.org/>`_ to be installed or it will fail.
def match_coordinates_3d( matchcoord, catalogcoord, nthneighbor=1, storekdtree="kdtree_3d" ): """ Finds the nearest 3-dimensional matches of a coordinate or coordinates in a set of catalog coordinates. This finds the 3-dimensional closest neighbor, which is only different from the on-sky distance if ``distance`` is set in either ``matchcoord`` or ``catalogcoord``. Parameters ---------- matchcoord : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The coordinate(s) to match to the catalog. catalogcoord : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The base catalog in which to search for matches. Typically this will be a coordinate object that is an array (i.e., ``catalogcoord.isscalar == False``) nthneighbor : int, optional Which closest neighbor to search for. Typically ``1`` is desired here, as that is correct for matching one set of coordinates to another. The next likely use case is ``2``, for matching a coordinate catalog against *itself* (``1`` is inappropriate because each point will find itself as the closest match). storekdtree : bool or str, optional If a string, will store the KD-Tree used for the computation in the ``catalogcoord``, as in ``catalogcoord.cache`` with the provided name. This dramatically speeds up subsequent calls with the same catalog. If False, the KD-Tree is discarded after use. Returns ------- idx : int array Indices into ``catalogcoord`` to get the matched points for each ``matchcoord``. Shape matches ``matchcoord``. sep2d : `~astropy.coordinates.Angle` The on-sky separation between the closest match for each ``matchcoord`` and the ``matchcoord``. Shape matches ``matchcoord``. dist3d : `~astropy.units.Quantity` ['length'] The 3D distance between the closest match for each ``matchcoord`` and the ``matchcoord``. Shape matches ``matchcoord``. Notes ----- This function requires `SciPy <https://www.scipy.org/>`_ to be installed or it will fail. """ if catalogcoord.isscalar or len(catalogcoord) < 1: raise ValueError( "The catalog for coordinate matching cannot be a scalar or length-0." ) kdt = _get_cartesian_kdtree(catalogcoord, storekdtree) # make sure coordinate systems match if isinstance(matchcoord, SkyCoord): matchcoord = matchcoord.transform_to(catalogcoord, merge_attributes=False) else: matchcoord = matchcoord.transform_to(catalogcoord) # make sure units match catunit = catalogcoord.cartesian.x.unit matchxyz = matchcoord.cartesian.xyz.to(catunit) matchflatxyz = matchxyz.reshape((3, np.prod(matchxyz.shape) // 3)) # Querying NaN returns garbage if np.isnan(matchflatxyz.value).any(): raise ValueError("Matching coordinates cannot contain NaN entries.") dist, idx = kdt.query(matchflatxyz.T, nthneighbor) if nthneighbor > 1: # query gives 1D arrays if k=1, 2D arrays otherwise dist = dist[:, -1] idx = idx[:, -1] sep2d = catalogcoord[idx].separation(matchcoord) return ( idx.reshape(matchxyz.shape[1:]), sep2d, dist.reshape(matchxyz.shape[1:]) * catunit, )
Finds the nearest on-sky matches of a coordinate or coordinates in a set of catalog coordinates. This finds the on-sky closest neighbor, which is only different from the 3-dimensional match if ``distance`` is set in either ``matchcoord`` or ``catalogcoord``. Parameters ---------- matchcoord : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The coordinate(s) to match to the catalog. catalogcoord : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The base catalog in which to search for matches. Typically this will be a coordinate object that is an array (i.e., ``catalogcoord.isscalar == False``) nthneighbor : int, optional Which closest neighbor to search for. Typically ``1`` is desired here, as that is correct for matching one set of coordinates to another. The next likely use case is ``2``, for matching a coordinate catalog against *itself* (``1`` is inappropriate because each point will find itself as the closest match). storekdtree : bool or str, optional If a string, will store the KD-Tree used for the computation in the ``catalogcoord`` in ``catalogcoord.cache`` with the provided name. This dramatically speeds up subsequent calls with the same catalog. If False, the KD-Tree is discarded after use. Returns ------- idx : int array Indices into ``catalogcoord`` to get the matched points for each ``matchcoord``. Shape matches ``matchcoord``. sep2d : `~astropy.coordinates.Angle` The on-sky separation between the closest match for each ``matchcoord`` and the ``matchcoord``. Shape matches ``matchcoord``. dist3d : `~astropy.units.Quantity` ['length'] The 3D distance between the closest match for each ``matchcoord`` and the ``matchcoord``. Shape matches ``matchcoord``. If either ``matchcoord`` or ``catalogcoord`` don't have a distance, this is the 3D distance on the unit sphere, rather than a true distance. Notes ----- This function requires `SciPy <https://www.scipy.org/>`_ to be installed or it will fail.
def match_coordinates_sky( matchcoord, catalogcoord, nthneighbor=1, storekdtree="kdtree_sky" ): """ Finds the nearest on-sky matches of a coordinate or coordinates in a set of catalog coordinates. This finds the on-sky closest neighbor, which is only different from the 3-dimensional match if ``distance`` is set in either ``matchcoord`` or ``catalogcoord``. Parameters ---------- matchcoord : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The coordinate(s) to match to the catalog. catalogcoord : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The base catalog in which to search for matches. Typically this will be a coordinate object that is an array (i.e., ``catalogcoord.isscalar == False``) nthneighbor : int, optional Which closest neighbor to search for. Typically ``1`` is desired here, as that is correct for matching one set of coordinates to another. The next likely use case is ``2``, for matching a coordinate catalog against *itself* (``1`` is inappropriate because each point will find itself as the closest match). storekdtree : bool or str, optional If a string, will store the KD-Tree used for the computation in the ``catalogcoord`` in ``catalogcoord.cache`` with the provided name. This dramatically speeds up subsequent calls with the same catalog. If False, the KD-Tree is discarded after use. Returns ------- idx : int array Indices into ``catalogcoord`` to get the matched points for each ``matchcoord``. Shape matches ``matchcoord``. sep2d : `~astropy.coordinates.Angle` The on-sky separation between the closest match for each ``matchcoord`` and the ``matchcoord``. Shape matches ``matchcoord``. dist3d : `~astropy.units.Quantity` ['length'] The 3D distance between the closest match for each ``matchcoord`` and the ``matchcoord``. Shape matches ``matchcoord``. If either ``matchcoord`` or ``catalogcoord`` don't have a distance, this is the 3D distance on the unit sphere, rather than a true distance. Notes ----- This function requires `SciPy <https://www.scipy.org/>`_ to be installed or it will fail. """ if catalogcoord.isscalar or len(catalogcoord) < 1: raise ValueError( "The catalog for coordinate matching cannot be a scalar or length-0." ) # send to catalog frame if isinstance(matchcoord, SkyCoord): newmatch = matchcoord.transform_to(catalogcoord, merge_attributes=False) else: newmatch = matchcoord.transform_to(catalogcoord) # strip out distance info match_urepr = newmatch.data.represent_as(UnitSphericalRepresentation) newmatch_u = newmatch.realize_frame(match_urepr) cat_urepr = catalogcoord.data.represent_as(UnitSphericalRepresentation) newcat_u = catalogcoord.realize_frame(cat_urepr) # Check for a stored KD-tree on the passed-in coordinate. Normally it will # have a distinct name from the "3D" one, so it's safe to use even though # it's based on UnitSphericalRepresentation. storekdtree = catalogcoord.cache.get(storekdtree, storekdtree) idx, sep2d, sep3d = match_coordinates_3d( newmatch_u, newcat_u, nthneighbor, storekdtree ) # sep3d is *wrong* above, because the distance information was removed, # unless one of the catalogs doesn't have a real distance if not ( isinstance(catalogcoord.data, UnitSphericalRepresentation) or isinstance(newmatch.data, UnitSphericalRepresentation) ): sep3d = catalogcoord[idx].separation_3d(newmatch) # update the kdtree on the actual passed-in coordinate if isinstance(storekdtree, str): catalogcoord.cache[storekdtree] = newcat_u.cache[storekdtree] elif storekdtree is True: # the old backwards-compatible name catalogcoord.cache["kdtree"] = newcat_u.cache["kdtree"] return idx, sep2d, sep3d
Searches for pairs of points that are at least as close as a specified distance in 3D space. This is intended for use on coordinate objects with arrays of coordinates, not scalars. For scalar coordinates, it is better to use the ``separation_3d`` methods. Parameters ---------- coords1 : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The first set of coordinates, which will be searched for matches from ``coords2`` within ``seplimit``. Cannot be a scalar coordinate. coords2 : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The second set of coordinates, which will be searched for matches from ``coords1`` within ``seplimit``. Cannot be a scalar coordinate. distlimit : `~astropy.units.Quantity` ['length'] The physical radius to search within. storekdtree : bool or str, optional If a string, will store the KD-Tree used in the search with the name ``storekdtree`` in ``coords2.cache``. This speeds up subsequent calls to this function. If False, the KD-Trees are not saved. Returns ------- idx1 : int array Indices into ``coords1`` that matches to the corresponding element of ``idx2``. Shape matches ``idx2``. idx2 : int array Indices into ``coords2`` that matches to the corresponding element of ``idx1``. Shape matches ``idx1``. sep2d : `~astropy.coordinates.Angle` The on-sky separation between the coordinates. Shape matches ``idx1`` and ``idx2``. dist3d : `~astropy.units.Quantity` ['length'] The 3D distance between the coordinates. Shape matches ``idx1`` and ``idx2``. The unit is that of ``coords1``. Notes ----- This function requires `SciPy <https://www.scipy.org/>`_ to be installed or it will fail. If you are using this function to search in a catalog for matches around specific points, the convention is for ``coords2`` to be the catalog, and ``coords1`` are the points to search around. While these operations are mathematically the same if ``coords1`` and ``coords2`` are flipped, some of the optimizations may work better if this convention is obeyed. In the current implementation, the return values are always sorted in the same order as the ``coords1`` (so ``idx1`` is in ascending order). This is considered an implementation detail, though, so it could change in a future release.
def search_around_3d(coords1, coords2, distlimit, storekdtree="kdtree_3d"): """ Searches for pairs of points that are at least as close as a specified distance in 3D space. This is intended for use on coordinate objects with arrays of coordinates, not scalars. For scalar coordinates, it is better to use the ``separation_3d`` methods. Parameters ---------- coords1 : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The first set of coordinates, which will be searched for matches from ``coords2`` within ``seplimit``. Cannot be a scalar coordinate. coords2 : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The second set of coordinates, which will be searched for matches from ``coords1`` within ``seplimit``. Cannot be a scalar coordinate. distlimit : `~astropy.units.Quantity` ['length'] The physical radius to search within. storekdtree : bool or str, optional If a string, will store the KD-Tree used in the search with the name ``storekdtree`` in ``coords2.cache``. This speeds up subsequent calls to this function. If False, the KD-Trees are not saved. Returns ------- idx1 : int array Indices into ``coords1`` that matches to the corresponding element of ``idx2``. Shape matches ``idx2``. idx2 : int array Indices into ``coords2`` that matches to the corresponding element of ``idx1``. Shape matches ``idx1``. sep2d : `~astropy.coordinates.Angle` The on-sky separation between the coordinates. Shape matches ``idx1`` and ``idx2``. dist3d : `~astropy.units.Quantity` ['length'] The 3D distance between the coordinates. Shape matches ``idx1`` and ``idx2``. The unit is that of ``coords1``. Notes ----- This function requires `SciPy <https://www.scipy.org/>`_ to be installed or it will fail. If you are using this function to search in a catalog for matches around specific points, the convention is for ``coords2`` to be the catalog, and ``coords1`` are the points to search around. While these operations are mathematically the same if ``coords1`` and ``coords2`` are flipped, some of the optimizations may work better if this convention is obeyed. In the current implementation, the return values are always sorted in the same order as the ``coords1`` (so ``idx1`` is in ascending order). This is considered an implementation detail, though, so it could change in a future release. """ if not distlimit.isscalar: raise ValueError("distlimit must be a scalar in search_around_3d") if coords1.isscalar or coords2.isscalar: raise ValueError( "One of the inputs to search_around_3d is a scalar. search_around_3d is" " intended for use with array coordinates, not scalars. Instead, use" " ``coord1.separation_3d(coord2) < distlimit`` to find the coordinates near" " a scalar coordinate." ) kdt2 = _get_cartesian_kdtree(coords2, storekdtree) cunit = coords2.cartesian.x.unit # we convert coord1 to match coord2's frame. We do it this way # so that if the conversion does happen, the KD tree of coord2 at least gets # saved. (by convention, coord2 is the "catalog" if that makes sense) coords1 = coords1.transform_to(coords2) kdt1 = _get_cartesian_kdtree(coords1, storekdtree, forceunit=cunit) idxs1 = [] idxs2 = [] for i, matches in enumerate(kdt1.query_ball_tree(kdt2, distlimit.to_value(cunit))): idxs1.extend(len(matches) * [i]) idxs2.extend(matches) return ( np.array(idxs1, dtype=int), np.array(idxs2, dtype=int), coords1[idxs1].separation(coords2[idxs2]), coords1[idxs1].separation_3d(coords2[idxs2]), )
Searches for pairs of points that have an angular separation at least as close as a specified angle. This is intended for use on coordinate objects with arrays of coordinates, not scalars. For scalar coordinates, it is better to use the ``separation`` methods. Parameters ---------- coords1 : coordinate-like The first set of coordinates, which will be searched for matches from ``coords2`` within ``seplimit``. Cannot be a scalar coordinate. coords2 : coordinate-like The second set of coordinates, which will be searched for matches from ``coords1`` within ``seplimit``. Cannot be a scalar coordinate. seplimit : `~astropy.units.Quantity` ['angle'] The on-sky separation to search within. storekdtree : bool or str, optional If a string, will store the KD-Tree used in the search with the name ``storekdtree`` in ``coords2.cache``. This speeds up subsequent calls to this function. If False, the KD-Trees are not saved. Returns ------- idx1 : int array Indices into ``coords1`` that matches to the corresponding element of ``idx2``. Shape matches ``idx2``. idx2 : int array Indices into ``coords2`` that matches to the corresponding element of ``idx1``. Shape matches ``idx1``. sep2d : `~astropy.coordinates.Angle` The on-sky separation between the coordinates. Shape matches ``idx1`` and ``idx2``. dist3d : `~astropy.units.Quantity` ['length'] The 3D distance between the coordinates. Shape matches ``idx1`` and ``idx2``; the unit is that of ``coords1``. If either ``coords1`` or ``coords2`` don't have a distance, this is the 3D distance on the unit sphere, rather than a physical distance. Notes ----- This function requires `SciPy <https://www.scipy.org/>`_ to be installed or it will fail. In the current implementation, the return values are always sorted in the same order as the ``coords1`` (so ``idx1`` is in ascending order). This is considered an implementation detail, though, so it could change in a future release.
def search_around_sky(coords1, coords2, seplimit, storekdtree="kdtree_sky"): """ Searches for pairs of points that have an angular separation at least as close as a specified angle. This is intended for use on coordinate objects with arrays of coordinates, not scalars. For scalar coordinates, it is better to use the ``separation`` methods. Parameters ---------- coords1 : coordinate-like The first set of coordinates, which will be searched for matches from ``coords2`` within ``seplimit``. Cannot be a scalar coordinate. coords2 : coordinate-like The second set of coordinates, which will be searched for matches from ``coords1`` within ``seplimit``. Cannot be a scalar coordinate. seplimit : `~astropy.units.Quantity` ['angle'] The on-sky separation to search within. storekdtree : bool or str, optional If a string, will store the KD-Tree used in the search with the name ``storekdtree`` in ``coords2.cache``. This speeds up subsequent calls to this function. If False, the KD-Trees are not saved. Returns ------- idx1 : int array Indices into ``coords1`` that matches to the corresponding element of ``idx2``. Shape matches ``idx2``. idx2 : int array Indices into ``coords2`` that matches to the corresponding element of ``idx1``. Shape matches ``idx1``. sep2d : `~astropy.coordinates.Angle` The on-sky separation between the coordinates. Shape matches ``idx1`` and ``idx2``. dist3d : `~astropy.units.Quantity` ['length'] The 3D distance between the coordinates. Shape matches ``idx1`` and ``idx2``; the unit is that of ``coords1``. If either ``coords1`` or ``coords2`` don't have a distance, this is the 3D distance on the unit sphere, rather than a physical distance. Notes ----- This function requires `SciPy <https://www.scipy.org/>`_ to be installed or it will fail. In the current implementation, the return values are always sorted in the same order as the ``coords1`` (so ``idx1`` is in ascending order). This is considered an implementation detail, though, so it could change in a future release. """ if not seplimit.isscalar: raise ValueError("seplimit must be a scalar in search_around_sky") if coords1.isscalar or coords2.isscalar: raise ValueError( "One of the inputs to search_around_sky is a scalar. search_around_sky is" " intended for use with array coordinates, not scalars. Instead, use" " ``coord1.separation(coord2) < seplimit`` to find the coordinates near a" " scalar coordinate." ) # we convert coord1 to match coord2's frame. We do it this way # so that if the conversion does happen, the KD tree of coord2 at least gets # saved. (by convention, coord2 is the "catalog" if that makes sense) coords1 = coords1.transform_to(coords2) # strip out distance info urepr1 = coords1.data.represent_as(UnitSphericalRepresentation) kdt1 = _get_cartesian_kdtree(coords1.realize_frame(urepr1), storekdtree) if storekdtree and coords2.cache.get(storekdtree): # just use the stored KD-Tree kdt2 = coords2.cache[storekdtree] else: # strip out distance info urepr2 = coords2.data.represent_as(UnitSphericalRepresentation) kdt2 = _get_cartesian_kdtree(coords2.realize_frame(urepr2), storekdtree) if storekdtree: coords2.cache["kdtree" if storekdtree is True else storekdtree] = kdt2 # this is the *cartesian* 3D distance that corresponds to the given angle r = (2 * np.sin(Angle(0.5 * seplimit))).value idxs1 = [] idxs2 = [] for i, matches in enumerate(kdt1.query_ball_tree(kdt2, r)): idxs1.extend(len(matches) * [i]) idxs2.extend(matches) d2ds = coords1[idxs1].separation(coords2[idxs2]) try: d3ds = coords1[idxs1].separation_3d(coords2[idxs2]) except ValueError: # they don't have distances, so we just fall back on the cartesian # distance, computed from d2ds d3ds = 2 * np.sin(0.5 * d2ds) return np.array(idxs1, dtype=int), np.array(idxs2, dtype=int), d2ds, d3ds
This is a utility function to retrieve (and build/cache, if necessary) a 3D cartesian KD-Tree from various sorts of astropy coordinate objects. Parameters ---------- coord : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The coordinates to build the KD-Tree for. attrname_or_kdt : bool or str or KDTree If a string, will store the KD-Tree used for the computation in the ``coord``, in ``coord.cache`` with the provided name. If given as a KD-Tree, it will just be used directly. forceunit : unit or None If a unit, the cartesian coordinates will convert to that unit before being put in the KD-Tree. If None, whatever unit it's already in will be used Returns ------- kdt : `~scipy.spatial.KDTree` The KD-Tree representing the 3D cartesian representation of the input coordinates.
def _get_cartesian_kdtree(coord, attrname_or_kdt="kdtree", forceunit=None): """ This is a utility function to retrieve (and build/cache, if necessary) a 3D cartesian KD-Tree from various sorts of astropy coordinate objects. Parameters ---------- coord : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The coordinates to build the KD-Tree for. attrname_or_kdt : bool or str or KDTree If a string, will store the KD-Tree used for the computation in the ``coord``, in ``coord.cache`` with the provided name. If given as a KD-Tree, it will just be used directly. forceunit : unit or None If a unit, the cartesian coordinates will convert to that unit before being put in the KD-Tree. If None, whatever unit it's already in will be used Returns ------- kdt : `~scipy.spatial.KDTree` The KD-Tree representing the 3D cartesian representation of the input coordinates. """ from scipy.spatial import KDTree if attrname_or_kdt is True: # backwards compatibility for pre v0.4 attrname_or_kdt = "kdtree" # figure out where any cached KDTree might be if isinstance(attrname_or_kdt, str): kdt = coord.cache.get(attrname_or_kdt, None) if kdt is not None and not isinstance(kdt, KDTree): raise TypeError( f'The `attrname_or_kdt` "{attrname_or_kdt}" is not a scipy KD tree!' ) elif isinstance(attrname_or_kdt, KDTree): kdt = attrname_or_kdt attrname_or_kdt = None elif not attrname_or_kdt: kdt = None else: raise TypeError( "Invalid `attrname_or_kdt` argument for KD-Tree:" + str(attrname_or_kdt) ) if kdt is None: # need to build the cartesian KD-tree for the catalog if forceunit is None: cartxyz = coord.cartesian.xyz else: cartxyz = coord.cartesian.xyz.to(forceunit) flatxyz = cartxyz.reshape((3, np.prod(cartxyz.shape) // 3)) # There should be no NaNs in the kdtree data. if np.isnan(flatxyz.value).any(): raise ValueError("Catalog coordinates cannot contain NaN entries.") # Not obvious if compact_nodes=False, balanced_tree=False is still needed but # we stay backwards-compatible with previous versions of `astropy` for now. kdt = KDTree(flatxyz.value.T, compact_nodes=False, balanced_tree=False) if attrname_or_kdt: # cache the kdtree in `coord` coord.cache[attrname_or_kdt] = kdt return kdt
Matrix multiply all arguments together. Arguments should have dimension 2 or larger. Larger dimensional objects are interpreted as stacks of matrices residing in the last two dimensions. This function mostly exists for readability: using `~numpy.matmul` directly, one would have ``matmul(matmul(m1, m2), m3)``, etc. For even better readability, one might consider using `~numpy.matrix` for the arguments (so that one could write ``m1 * m2 * m3``), but then it is not possible to handle stacks of matrices. Once only python >=3.5 is supported, this function can be replaced by ``m1 @ m2 @ m3``.
def matrix_product(*matrices): """Matrix multiply all arguments together. Arguments should have dimension 2 or larger. Larger dimensional objects are interpreted as stacks of matrices residing in the last two dimensions. This function mostly exists for readability: using `~numpy.matmul` directly, one would have ``matmul(matmul(m1, m2), m3)``, etc. For even better readability, one might consider using `~numpy.matrix` for the arguments (so that one could write ``m1 * m2 * m3``), but then it is not possible to handle stacks of matrices. Once only python >=3.5 is supported, this function can be replaced by ``m1 @ m2 @ m3``. """ return reduce(np.matmul, matrices)
Transpose a matrix or stack of matrices by swapping the last two axes. This function mostly exists for readability; seeing ``.swapaxes(-2, -1)`` it is not that obvious that one does a transpose. Note that one cannot use `~numpy.ndarray.T`, as this transposes all axes and thus does not work for stacks of matrices. We also avoid ``np.matrix_transpose`` (new in numpy 2.0), since it is slower, as it first ensures the input is an array, while we ducktype, assuming the input has a ``.swapaxes`` method.
def matrix_transpose(matrix): """Transpose a matrix or stack of matrices by swapping the last two axes. This function mostly exists for readability; seeing ``.swapaxes(-2, -1)`` it is not that obvious that one does a transpose. Note that one cannot use `~numpy.ndarray.T`, as this transposes all axes and thus does not work for stacks of matrices. We also avoid ``np.matrix_transpose`` (new in numpy 2.0), since it is slower, as it first ensures the input is an array, while we ducktype, assuming the input has a ``.swapaxes`` method. """ return matrix.swapaxes(-2, -1)
Generate matrices for rotation by some angle around some axis. Parameters ---------- angle : angle-like The amount of rotation the matrices should represent. Can be an array. axis : str or array-like Either ``'x'``, ``'y'``, ``'z'``, or a (x,y,z) specifying the axis to rotate about. If ``'x'``, ``'y'``, or ``'z'``, the rotation sense is counterclockwise looking down the + axis (e.g. positive rotations obey left-hand-rule). If given as an array, the last dimension should be 3; it will be broadcast against ``angle``. unit : unit-like, optional If ``angle`` does not have associated units, they are in this unit. If neither are provided, it is assumed to be degrees. Returns ------- rmat : `numpy.matrix` A unitary rotation matrix.
def rotation_matrix(angle, axis="z", unit=None): """ Generate matrices for rotation by some angle around some axis. Parameters ---------- angle : angle-like The amount of rotation the matrices should represent. Can be an array. axis : str or array-like Either ``'x'``, ``'y'``, ``'z'``, or a (x,y,z) specifying the axis to rotate about. If ``'x'``, ``'y'``, or ``'z'``, the rotation sense is counterclockwise looking down the + axis (e.g. positive rotations obey left-hand-rule). If given as an array, the last dimension should be 3; it will be broadcast against ``angle``. unit : unit-like, optional If ``angle`` does not have associated units, they are in this unit. If neither are provided, it is assumed to be degrees. Returns ------- rmat : `numpy.matrix` A unitary rotation matrix. """ if isinstance(angle, u.Quantity): angle = angle.to_value(u.radian) else: if unit is None: angle = np.deg2rad(angle) else: angle = u.Unit(unit).to(u.rad, angle) s = np.sin(angle) c = np.cos(angle) # use optimized implementations for x/y/z try: i = "xyz".index(axis) except TypeError: axis = np.asarray(axis) axis = axis / np.sqrt((axis * axis).sum(axis=-1, keepdims=True)) R = ( axis[..., np.newaxis] * axis[..., np.newaxis, :] * (1.0 - c)[..., np.newaxis, np.newaxis] ) for i in range(3): R[..., i, i] += c a1 = (i + 1) % 3 a2 = (i + 2) % 3 R[..., a1, a2] += axis[..., i] * s R[..., a2, a1] -= axis[..., i] * s else: a1 = (i + 1) % 3 a2 = (i + 2) % 3 R = np.zeros(getattr(angle, "shape", ()) + (3, 3)) R[..., i, i] = 1.0 R[..., a1, a1] = c R[..., a1, a2] = s R[..., a2, a1] = -s R[..., a2, a2] = c return R
Angle of rotation and rotation axis for a given rotation matrix. Parameters ---------- matrix : array-like A 3 x 3 unitary rotation matrix (or stack of matrices). Returns ------- angle : `~astropy.coordinates.Angle` The angle of rotation. axis : array The (normalized) axis of rotation (with last dimension 3).
def angle_axis(matrix): """ Angle of rotation and rotation axis for a given rotation matrix. Parameters ---------- matrix : array-like A 3 x 3 unitary rotation matrix (or stack of matrices). Returns ------- angle : `~astropy.coordinates.Angle` The angle of rotation. axis : array The (normalized) axis of rotation (with last dimension 3). """ m = np.asanyarray(matrix) if m.shape[-2:] != (3, 3): raise ValueError("matrix is not 3x3") axis = np.zeros(m.shape[:-1]) axis[..., 0] = m[..., 2, 1] - m[..., 1, 2] axis[..., 1] = m[..., 0, 2] - m[..., 2, 0] axis[..., 2] = m[..., 1, 0] - m[..., 0, 1] r = np.sqrt((axis * axis).sum(-1, keepdims=True)) angle = np.arctan2(r[..., 0], m[..., 0, 0] + m[..., 1, 1] + m[..., 2, 2] - 1.0) return Angle(angle, u.radian), -axis / r
Check whether a matrix is in the length-preserving group O(3). Parameters ---------- matrix : (..., N, N) array-like Must have attribute ``.shape`` and method ``.swapaxes()`` and not error when using `~numpy.isclose`. atol : float, optional The allowed absolute difference. If `None` it defaults to 1e-15 or 5 * epsilon of the matrix's dtype, if floating. .. versionadded:: 5.3 Returns ------- is_o3 : bool or array of bool If the matrix has more than two axes, the O(3) check is performed on slices along the last two axes -- (M, N, N) => (M, ) bool array. Notes ----- The orthogonal group O(3) preserves lengths, but is not guaranteed to keep orientations. Rotations and reflections are in this group. For more information, see https://en.wikipedia.org/wiki/Orthogonal_group
def is_O3(matrix, atol=None): """Check whether a matrix is in the length-preserving group O(3). Parameters ---------- matrix : (..., N, N) array-like Must have attribute ``.shape`` and method ``.swapaxes()`` and not error when using `~numpy.isclose`. atol : float, optional The allowed absolute difference. If `None` it defaults to 1e-15 or 5 * epsilon of the matrix's dtype, if floating. .. versionadded:: 5.3 Returns ------- is_o3 : bool or array of bool If the matrix has more than two axes, the O(3) check is performed on slices along the last two axes -- (M, N, N) => (M, ) bool array. Notes ----- The orthogonal group O(3) preserves lengths, but is not guaranteed to keep orientations. Rotations and reflections are in this group. For more information, see https://en.wikipedia.org/wiki/Orthogonal_group """ # matrix is in O(3) (rotations, proper and improper). I = np.identity(matrix.shape[-1]) if atol is None: if np.issubdtype(matrix.dtype, np.floating): atol = np.finfo(matrix.dtype).eps * 5 else: atol = 1e-15 is_o3 = np.all( np.isclose(matrix @ matrix.swapaxes(-2, -1), I, atol=atol), axis=(-2, -1) ) return is_o3
Check whether a matrix is a rotation, proper or improper. Parameters ---------- matrix : (..., N, N) array-like Must have attribute ``.shape`` and method ``.swapaxes()`` and not error when using `~numpy.isclose` and `~numpy.linalg.det`. allow_improper : bool, optional Whether to restrict check to the SO(3), the group of proper rotations, or also allow improper rotations (with determinant -1). The default (False) is only SO(3). atol : float, optional The allowed absolute difference. If `None` it defaults to 1e-15 or 5 * epsilon of the matrix's dtype, if floating. .. versionadded:: 5.3 Returns ------- isrot : bool or array of bool If the matrix has more than two axes, the checks are performed on slices along the last two axes -- (M, N, N) => (M, ) bool array. See Also -------- astopy.coordinates.matrix_utilities.is_O3 : For the less restrictive check that a matrix is in the group O(3). Notes ----- The group SO(3) is the rotation group. It is O(3), with determinant 1. Rotations with determinant -1 are improper rotations, combining both a rotation and a reflection. For more information, see https://en.wikipedia.org/wiki/Orthogonal_group
def is_rotation(matrix, allow_improper=False, atol=None): """Check whether a matrix is a rotation, proper or improper. Parameters ---------- matrix : (..., N, N) array-like Must have attribute ``.shape`` and method ``.swapaxes()`` and not error when using `~numpy.isclose` and `~numpy.linalg.det`. allow_improper : bool, optional Whether to restrict check to the SO(3), the group of proper rotations, or also allow improper rotations (with determinant -1). The default (False) is only SO(3). atol : float, optional The allowed absolute difference. If `None` it defaults to 1e-15 or 5 * epsilon of the matrix's dtype, if floating. .. versionadded:: 5.3 Returns ------- isrot : bool or array of bool If the matrix has more than two axes, the checks are performed on slices along the last two axes -- (M, N, N) => (M, ) bool array. See Also -------- astopy.coordinates.matrix_utilities.is_O3 : For the less restrictive check that a matrix is in the group O(3). Notes ----- The group SO(3) is the rotation group. It is O(3), with determinant 1. Rotations with determinant -1 are improper rotations, combining both a rotation and a reflection. For more information, see https://en.wikipedia.org/wiki/Orthogonal_group """ if atol is None: if np.issubdtype(matrix.dtype, np.floating): atol = np.finfo(matrix.dtype).eps * 5 else: atol = 1e-15 # matrix is in O(3). is_o3 = is_O3(matrix, atol=atol) # determinant checks for rotation (proper and improper) if allow_improper: # determinant can be +/- 1 is_det1 = np.isclose(np.abs(np.linalg.det(matrix)), 1.0, atol=atol) else: # restrict to SO(3) is_det1 = np.isclose(np.linalg.det(matrix), 1.0, atol=atol) return is_o3 & is_det1
Given a string response from SESAME, parse out the coordinates by looking for a line starting with a J, meaning ICRS J2000 coordinates. Parameters ---------- resp_data : str The string HTTP response from SESAME. Returns ------- ra : str The string Right Ascension parsed from the HTTP response. dec : str The string Declination parsed from the HTTP response.
def _parse_response(resp_data): """ Given a string response from SESAME, parse out the coordinates by looking for a line starting with a J, meaning ICRS J2000 coordinates. Parameters ---------- resp_data : str The string HTTP response from SESAME. Returns ------- ra : str The string Right Ascension parsed from the HTTP response. dec : str The string Declination parsed from the HTTP response. """ pattr = re.compile(r"%J\s*([0-9\.]+)\s*([\+\-\.0-9]+)") matched = pattr.search(resp_data) if matched is None: return None, None else: ra, dec = matched.groups() return ra, dec
Retrieve an ICRS object by using `Sesame <https://cds.unistra.fr/cgi-bin/Sesame>`_ to retrieve coordinates for the specified name. By default, this will search all available databases (SIMBAD, NED and VizieR) until a match is found. If you would like to specify the database, use the science state ``astropy.coordinates.name_resolve.sesame_database``. You can also specify a list of servers to use for querying Sesame using the science state ``astropy.coordinates.name_resolve.sesame_url``. This will try each one in order until a valid response is returned. By default, this list includes the main Sesame host and a mirror at vizier. The configuration item `astropy.utils.data.Conf.remote_timeout` controls the number of seconds to wait for a response from the server before giving up. Parameters ---------- name : str The name of the object to get coordinates for, e.g. ``'M42'``. parse : bool Whether to attempt extracting the coordinates from the name by parsing with a regex. For objects catalog names that have J-coordinates embedded in their names eg: 'CRTS SSS100805 J194428-420209', this may be much faster than a sesame query for the same object name. The coordinates extracted in this way may differ from the database coordinates by a few deci-arcseconds, so only use this option if you do not need sub-arcsecond accuracy for coordinates. cache : bool, str, optional Determines whether to cache the results or not. Passed through to `~astropy.utils.data.download_file`, so pass "update" to update the cached value. Returns ------- coord : `astropy.coordinates.ICRS` object The object's coordinates in the ICRS frame.
def get_icrs_coordinates(name, parse=False, cache=False): """ Retrieve an ICRS object by using `Sesame <https://cds.unistra.fr/cgi-bin/Sesame>`_ to retrieve coordinates for the specified name. By default, this will search all available databases (SIMBAD, NED and VizieR) until a match is found. If you would like to specify the database, use the science state ``astropy.coordinates.name_resolve.sesame_database``. You can also specify a list of servers to use for querying Sesame using the science state ``astropy.coordinates.name_resolve.sesame_url``. This will try each one in order until a valid response is returned. By default, this list includes the main Sesame host and a mirror at vizier. The configuration item `astropy.utils.data.Conf.remote_timeout` controls the number of seconds to wait for a response from the server before giving up. Parameters ---------- name : str The name of the object to get coordinates for, e.g. ``'M42'``. parse : bool Whether to attempt extracting the coordinates from the name by parsing with a regex. For objects catalog names that have J-coordinates embedded in their names eg: 'CRTS SSS100805 J194428-420209', this may be much faster than a sesame query for the same object name. The coordinates extracted in this way may differ from the database coordinates by a few deci-arcseconds, so only use this option if you do not need sub-arcsecond accuracy for coordinates. cache : bool, str, optional Determines whether to cache the results or not. Passed through to `~astropy.utils.data.download_file`, so pass "update" to update the cached value. Returns ------- coord : `astropy.coordinates.ICRS` object The object's coordinates in the ICRS frame. """ # if requested, first try extract coordinates embedded in the object name. # Do this first since it may be much faster than doing the sesame query if parse: from . import jparser if jparser.search(name): return jparser.to_skycoord(name) else: # if the parser failed, fall back to sesame query. pass # maybe emit a warning instead of silently falling back to sesame? database = sesame_database.get() # The web API just takes the first letter of the database name db = database.upper()[0] # the A option does not set a preferred order for the database if db == "A": # we look into SIMBAD, NED, and then VizieR. This is the default Sesame behavior. db = "SNV" # Make sure we don't have duplicates in the url list urls = [] domains = [] for url in sesame_url.get(): domain = urllib.parse.urlparse(url).netloc # Check for duplicates if domain not in domains: domains.append(domain) # Add the query to the end of the url, add to url list fmt_url = os.path.join(url, "{db}?{name}") fmt_url = fmt_url.format(name=urllib.parse.quote(name), db=db) urls.append(fmt_url) exceptions = [] for url in urls: try: resp_data = get_file_contents( download_file(url, cache=cache, show_progress=False) ) break except urllib.error.URLError as e: exceptions.append(e) continue except TimeoutError as e: # There are some cases where urllib2 does not catch socket.timeout # especially while receiving response data on an already previously # working request e.reason = ( "Request took longer than the allowed " f"{data.conf.remote_timeout:.1f} seconds" ) exceptions.append(e) continue # All Sesame URL's failed... else: messages = [f"{url}: {e.reason}" for url, e in zip(urls, exceptions)] raise NameResolveError( "All Sesame queries failed. Unable to retrieve coordinates. See errors per" f" URL below: \n {os.linesep.join(messages)}" ) ra, dec = _parse_response(resp_data) if ra is None or dec is None: if db == "SNV": err = f"Unable to find coordinates for name '{name}' using {url}" else: err = ( f"Unable to find coordinates for name '{name}' in database" f" {database} using {url}" ) raise NameResolveError(err) # Return SkyCoord object sc = SkyCoord(ra=ra, dec=dec, unit=(u.degree, u.degree), frame="icrs") return sc
Add a custom set of mappings from values to Stokes symbols. Parameters ---------- mappings A list of dictionaries with custom mappings between values (integers) and `.StokesSymbol` classes. replace Replace all mappings with this one.
def custom_stokes_symbol_mapping( mapping: dict[int, StokesSymbol], replace: bool = False ) -> None: """ Add a custom set of mappings from values to Stokes symbols. Parameters ---------- mappings A list of dictionaries with custom mappings between values (integers) and `.StokesSymbol` classes. replace Replace all mappings with this one. """ global STOKES_VALUE_SYMBOL_MAP original_mapping = STOKES_VALUE_SYMBOL_MAP.copy() if not replace: STOKES_VALUE_SYMBOL_MAP = {**original_mapping, **mapping} else: STOKES_VALUE_SYMBOL_MAP = mapping yield STOKES_VALUE_SYMBOL_MAP = original_mapping
Load observatory database from data/observatories.json and parse them into a SiteRegistry.
def get_builtin_sites(): """ Load observatory database from data/observatories.json and parse them into a SiteRegistry. """ jsondb = json.loads(get_pkg_data_contents("data/sites.json")) return SiteRegistry.from_json(jsondb)
Load observatory database from data.astropy.org and parse into a SiteRegistry.
def get_downloaded_sites(jsonurl=None): """ Load observatory database from data.astropy.org and parse into a SiteRegistry. """ # we explicitly set the encoding because the default is to leave it set by # the users' locale, which may fail if it's not matched to the sites.json if jsonurl is None: content = get_pkg_data_contents("coordinates/sites.json", encoding="UTF-8") else: content = get_file_contents(jsonurl, encoding="UTF-8") jsondb = json.loads(content) return SiteRegistry.from_json(jsondb)
Get a frame class from the input `frame`, which could be a frame name string, or frame class.
def _get_frame_class(frame): """ Get a frame class from the input `frame`, which could be a frame name string, or frame class. """ if isinstance(frame, str): frame_names = frame_transform_graph.get_names() if frame not in frame_names: raise ValueError( f'Coordinate frame name "{frame}" is not a known ' f"coordinate frame ({sorted(frame_names)})" ) frame_cls = frame_transform_graph.lookup_name(frame) elif isinstance(frame, type) and issubclass(frame, BaseCoordinateFrame): frame_cls = frame else: raise ValueError( "Coordinate frame must be a frame name or frame class, not a" f" '{frame.__class__.__name__}'" ) return frame_cls
Determines the coordinate frame from input SkyCoord args and kwargs. This function extracts (removes) all frame attributes from the kwargs and determines the frame class either using the kwargs, or using the first element in the args (if a single frame object is passed in, for example). This function allows a frame to be specified as a string like 'icrs' or a frame class like ICRS, or an instance ICRS(), as long as the instance frame attributes don't conflict with kwargs passed in (which could require a three-way merge with the coordinate data possibly specified via the args).
def _get_frame_without_data(args, kwargs): """ Determines the coordinate frame from input SkyCoord args and kwargs. This function extracts (removes) all frame attributes from the kwargs and determines the frame class either using the kwargs, or using the first element in the args (if a single frame object is passed in, for example). This function allows a frame to be specified as a string like 'icrs' or a frame class like ICRS, or an instance ICRS(), as long as the instance frame attributes don't conflict with kwargs passed in (which could require a three-way merge with the coordinate data possibly specified via the args). """ from .sky_coordinate import SkyCoord # We eventually (hopefully) fill and return these by extracting the frame # and frame attributes from the input: frame_cls = None frame_cls_kwargs = {} # The first place to check: the frame could be specified explicitly frame = kwargs.pop("frame", None) if isinstance(frame, SkyCoord): # If the frame was passed as a SkyCoord, we also want to preserve # any extra attributes (e.g., obstime) if they are not already # specified in the kwargs. We preserve these extra attributes by # adding them to the kwargs dict: for attr in frame._extra_frameattr_names: value = getattr(frame, attr) if attr not in kwargs: kwargs[attr] = value elif np.any(value != kwargs[attr]): # This SkyCoord attribute passed in with the frame= object # conflicts with an attribute passed in directly to the # SkyCoord initializer as a kwarg: raise ValueError( _conflict_err_msg.format(attr, value, kwargs[attr], "SkyCoord") ) frame = frame.frame if isinstance(frame, BaseCoordinateFrame): # Extract any frame attributes for attr in frame.frame_attributes: # If the frame was specified as an instance, we have to make # sure that no frame attributes were specified as kwargs - this # would require a potential three-way merge: if attr in kwargs: raise ValueError( f"Cannot specify frame attribute '{attr}' directly as an" " argument to SkyCoord because a frame instance was passed in." " Either pass a frame class, or modify the frame attributes of" " the input frame instance." ) if not frame.is_frame_attr_default(attr): kwargs[attr] = getattr(frame, attr) frame_cls = frame.__class__ # Make sure we propagate representation/differential _type choices, # unless these are specified directly in the kwargs: kwargs.setdefault("representation_type", frame.representation_type) kwargs.setdefault("differential_type", frame.differential_type) elif frame_cls is None and frame is not None: # frame probably a string frame_cls = _get_frame_class(frame) # Check that the new frame doesn't conflict with existing coordinate frame # if a coordinate is supplied in the args list. If the frame still had not # been set by this point and a coordinate was supplied, then use that frame. for arg in args: # this catches the "single list passed in" case. For that case we want # to allow the first argument to set the class. That's OK because # _parse_coordinate_arg goes and checks that the frames match between # the first and all the others if isinstance(arg, (Sequence, np.ndarray)) and len(args) == 1 and len(arg) > 0: arg = arg[0] if isinstance(arg, BaseCoordinateFrame): coord_frame_obj = arg elif isinstance(arg, SkyCoord): coord_frame_obj = arg.frame else: continue coord_frame_cls = type(coord_frame_obj) if frame_cls is None: frame_cls = coord_frame_cls elif frame_cls is not coord_frame_cls: raise ValueError( f"Cannot override frame='{coord_frame_cls.__name__}' of input " f"coordinate with new frame='{frame_cls.__name__}'. Instead, " "transform the coordinate." ) for attr in coord_frame_obj.frame_attributes: if not coord_frame_obj.is_frame_attr_default(attr): value = getattr(coord_frame_obj, attr) if attr not in kwargs: kwargs[attr] = value elif np.any(value != kwargs[attr]): raise ValueError( f"Frame attribute '{attr}' has conflicting values between the" " input coordinate data and either keyword arguments or the " f"frame specification (frame=...): {value} =/= {kwargs[attr]}" ) if frame_cls is None: from .builtin_frames import ICRS frame_cls = ICRS # By now, frame_cls should be set - if it's not, something went wrong if not issubclass(frame_cls, BaseCoordinateFrame): # We should hopefully never get here... raise ValueError(f"Frame class has unexpected type: {frame_cls.__name__}") for attr in frame_cls.frame_attributes: if attr in kwargs: frame_cls_kwargs[attr] = kwargs.pop(attr) if (representation_type := kwargs.pop("representation_type", None)) is not None: frame_cls_kwargs["representation_type"] = _get_repr_cls(representation_type) if (differential_type := kwargs.pop("differential_type", None)) is not None: frame_cls_kwargs["differential_type"] = _get_diff_cls(differential_type) return frame_cls, frame_cls_kwargs
Extract coordinate data from the args and kwargs passed to SkyCoord. By this point, we assume that all of the frame attributes have been extracted from kwargs (see _get_frame_without_data()), so all that are left are (1) extra SkyCoord attributes, and (2) the coordinate data, specified in any of the valid ways.
def _parse_coordinate_data(frame, args, kwargs): """ Extract coordinate data from the args and kwargs passed to SkyCoord. By this point, we assume that all of the frame attributes have been extracted from kwargs (see _get_frame_without_data()), so all that are left are (1) extra SkyCoord attributes, and (2) the coordinate data, specified in any of the valid ways. """ valid_skycoord_kwargs = {} info = None # Look through the remaining kwargs to see if any are valid attribute names # by asking the frame transform graph: for attr in tuple(kwargs): if attr in frame_transform_graph.frame_attributes: valid_skycoord_kwargs[attr] = kwargs.pop(attr) # By this point in parsing the arguments, anything left in the args and # kwargs should be data. Either as individual components, or a list of # objects, or a representation, etc. # Get units of components units = _get_representation_component_units(args, kwargs) # Grab any frame-specific attr names like `ra` or `l` or `distance` from # kwargs and move them to valid_components. valid_components = _get_representation_attrs(frame, units, kwargs) # Error if anything is still left in kwargs if kwargs: # The next few lines add a more user-friendly error message to a # common and confusing situation when the user specifies, e.g., # `pm_ra` when they really should be passing `pm_ra_cosdec`. The # extra error should only turn on when the positional representation # is spherical, and when the component 'pm_<lon>' is passed. pm_message = "" if frame.representation_type == SphericalRepresentation: lon_name, lat_name, _ = frame.get_representation_component_names() if f"pm_{lon_name}" in kwargs: pm_message = ( "\n\n By default, most frame classes expect the longitudinal proper" " motion to include the cos(latitude) term, named" f" `pm_{lon_name}_cos{lat_name}`. Did you mean to pass in this" " component?" ) raise ValueError( "Unrecognized keyword argument(s) {}{}".format( ", ".join(f"'{key}'" for key in kwargs), pm_message ) ) if len(args) > 3: raise ValueError( f"Must supply no more than three positional arguments, got {len(args)}" ) # Finally deal with the unnamed args. This figures out what the arg[0] # is and returns a dict with appropriate key/values for initializing # frame class. Note that differentials are *never* valid args, only # kwargs. So they are not accounted for here (unless they're in a frame # or SkyCoord object) _skycoord_kwargs = {} _components = {} if len(args) > 1: for arg, (frame_attr_name, repr_attr_name), unit in zip( args, frame.representation_component_names.items(), units ): attr_class = frame.representation_type.attr_classes[repr_attr_name] _components[frame_attr_name] = attr_class(arg, unit=unit) elif len(args) == 1: # One arg which must be a coordinate. In this case coord_kwargs # will contain keys like 'ra', 'dec', 'distance' along with any # frame attributes like equinox or obstime which were explicitly # specified in the coordinate object (i.e. non-default). _skycoord_kwargs, _components = _parse_coordinate_arg(args[0], frame, units) # Copy other 'info' attr only if it has actually been defined. if "info" in getattr(args[0], "__dict__", ()): info = args[0].info # The next two loops copy the component and skycoord attribute data into # their final, respective "valid_" dictionaries. For each, we check that # there are no relevant conflicts with values specified by the user # through other means: # First validate the component data for attr, coord_value in _components.items(): if attr in valid_components: raise ValueError( _conflict_err_msg.format( attr, coord_value, valid_components[attr], "SkyCoord" ) ) valid_components[attr] = coord_value # Now validate the custom SkyCoord attributes for attr, value in _skycoord_kwargs.items(): if attr in valid_skycoord_kwargs and np.any( valid_skycoord_kwargs[attr] != value ): raise ValueError( _conflict_err_msg.format( attr, value, valid_skycoord_kwargs[attr], "SkyCoord" ) ) valid_skycoord_kwargs[attr] = value return valid_skycoord_kwargs, valid_components, info
Get the unit from kwargs for the *representation* components (not the differentials).
def _get_representation_component_units(args, kwargs): """ Get the unit from kwargs for the *representation* components (not the differentials). """ if "unit" not in kwargs: units = [None, None, None] else: units = kwargs.pop("unit") if isinstance(units, str): units = [x.strip() for x in units.split(",")] # Allow for input like unit='deg' or unit='m' if len(units) == 1: units = [units[0], units[0], units[0]] elif isinstance(units, (Unit, IrreducibleUnit)): units = [units, units, units] try: units = [(Unit(x) if x else None) for x in units] units.extend(None for x in range(3 - len(units))) if len(units) > 3: raise ValueError() except Exception as err: raise ValueError( "Unit keyword must have one to three unit values as " "tuple or comma-separated string." ) from err return units
Find instances of the "representation attributes" for specifying data for this frame. Pop them off of kwargs, run through the appropriate class constructor (to validate and apply unit), and put into the output valid_kwargs. "Representation attributes" are the frame-specific aliases for the underlying data values in the representation, e.g. "ra" for "lon" for many equatorial spherical representations, or "w" for "x" in the cartesian representation of Galactic. This also gets any *differential* kwargs, because they go into the same frame initializer later on.
def _get_representation_attrs(frame, units, kwargs): """ Find instances of the "representation attributes" for specifying data for this frame. Pop them off of kwargs, run through the appropriate class constructor (to validate and apply unit), and put into the output valid_kwargs. "Representation attributes" are the frame-specific aliases for the underlying data values in the representation, e.g. "ra" for "lon" for many equatorial spherical representations, or "w" for "x" in the cartesian representation of Galactic. This also gets any *differential* kwargs, because they go into the same frame initializer later on. """ frame_attr_names = frame.representation_component_names.keys() repr_attr_classes = frame.representation_type.attr_classes.values() valid_kwargs = {} for frame_attr_name, repr_attr_class, unit in zip( frame_attr_names, repr_attr_classes, units ): value = kwargs.pop(frame_attr_name, None) if value is not None: try: valid_kwargs[frame_attr_name] = repr_attr_class(value, unit=unit) except u.UnitConversionError as err: error_message = ( f"Unit '{unit}' ({unit.physical_type}) could not be applied to" f" '{frame_attr_name}'. This can occur when passing units for some" " coordinate components when other components are specified as" " Quantity objects. Either pass a list of units for all components" " (and unit-less coordinate data), or pass Quantities for all" " components." ) raise u.UnitConversionError(error_message) from err # also check the differentials. They aren't included in the units keyword, # so we only look for the names. differential_type = frame.differential_type if differential_type is not None: for frame_name, repr_name in frame.get_representation_component_names( "s" ).items(): diff_attr_class = differential_type.attr_classes[repr_name] value = kwargs.pop(frame_name, None) if value is not None: valid_kwargs[frame_name] = diff_attr_class(value) return valid_kwargs
Parse longitude-like and latitude-like values from a string. Currently the following formats are always supported: * space separated 2-value or 6-value format If the input can be assumed to represent an RA and Dec then the following are additionally supported: * space separated <6-value format, this requires a plus or minus sign separation between RA and Dec * sign separated format * JHHMMSS.ss+DDMMSS.ss format, with up to two optional decimal digits * JDDDMMSS.ss+DDMMSS.ss format, with up to two optional decimal digits Parameters ---------- coord_str : str Coordinate string to parse. is_radec : bool, keyword-only Whether the coordinates represent an RA and Dec. Returns ------- longitude-like, latitude-like : str Parsed coordinate values. If ``is_radec`` is `True` then they are RA and Dec.
def _parse_one_coord_str(coord_str: str, *, is_radec: bool = True) -> tuple[str, str]: """Parse longitude-like and latitude-like values from a string. Currently the following formats are always supported: * space separated 2-value or 6-value format If the input can be assumed to represent an RA and Dec then the following are additionally supported: * space separated <6-value format, this requires a plus or minus sign separation between RA and Dec * sign separated format * JHHMMSS.ss+DDMMSS.ss format, with up to two optional decimal digits * JDDDMMSS.ss+DDMMSS.ss format, with up to two optional decimal digits Parameters ---------- coord_str : str Coordinate string to parse. is_radec : bool, keyword-only Whether the coordinates represent an RA and Dec. Returns ------- longitude-like, latitude-like : str Parsed coordinate values. If ``is_radec`` is `True` then they are RA and Dec. """ if not isinstance(coord_str, str): # This exception should never be raised from SkyCoord raise TypeError("coord_str must be a single str") split_coord = coord_str.split() if len(split_coord) == 6: return " ".join(split_coord[:3]), " ".join(split_coord[3:]) if len(split_coord) == 2 or not is_radec: return tuple(split_coord) if len(split_coord) == 1 and (match_j := J_PREFIXED_RA_DEC_RE.match(coord_str)): ra, dec = match_j.groups() if len(ra.split(".", 1)[0]) == 7: ra = f"{ra[0:3]} {ra[3:5]} {ra[5:]}" else: ra = f"{ra[0:2]} {ra[2:4]} {ra[4:]}" return ra, f"{dec[0:3]} {dec[3:5]} {dec[5:]}" ra, *dec = PLUS_MINUS_RE.split(coord_str) return ra, " ".join(dec)
Try importing jplephem, download/retrieve from cache the Satellite Planet Kernel corresponding to the given ephemeris.
def _get_kernel(value): """ Try importing jplephem, download/retrieve from cache the Satellite Planet Kernel corresponding to the given ephemeris. """ if value is None or value.lower() == "builtin": return None try: from jplephem.spk import SPK except ImportError: raise ImportError( "Solar system JPL ephemeris calculations require the jplephem package " "(https://pypi.org/project/jplephem/)" ) if value.lower() == "jpl": # Get the default JPL ephemeris URL value = DEFAULT_JPL_EPHEMERIS if re.compile(r"de[0-9][0-9][0-9]s?").match(value.lower()): value = ( "https://naif.jpl.nasa.gov/pub/naif/generic_kernels" f"/spk/planets/{value.lower():s}.bsp" ) elif os.path.isfile(value): return SPK.open(value) else: try: urlparse(value) except Exception: raise ValueError( f"{value} was not one of the standard strings and " "could not be parsed as a file path or URL" ) return SPK.open(download_file(value, cache=True))
Calculate the barycentric position (and velocity) of a solar system body. Parameters ---------- body : str or other The solar system body for which to calculate positions. Can also be a kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL kernel. time : `~astropy.time.Time` Time of observation. ephemeris : str, optional Ephemeris to use. By default, use the one set with ``astropy.coordinates.solar_system_ephemeris.set`` get_velocity : bool, optional Whether or not to calculate the velocity as well as the position. Returns ------- position : `~astropy.coordinates.CartesianRepresentation` or tuple Barycentric (ICRS) position or tuple of position and velocity. Notes ----- Whether or not velocities are calculated makes little difference for the built-in ephemerides, but for most JPL ephemeris files, the execution time roughly doubles.
def _get_body_barycentric_posvel(body, time, ephemeris=None, get_velocity=True): """Calculate the barycentric position (and velocity) of a solar system body. Parameters ---------- body : str or other The solar system body for which to calculate positions. Can also be a kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL kernel. time : `~astropy.time.Time` Time of observation. ephemeris : str, optional Ephemeris to use. By default, use the one set with ``astropy.coordinates.solar_system_ephemeris.set`` get_velocity : bool, optional Whether or not to calculate the velocity as well as the position. Returns ------- position : `~astropy.coordinates.CartesianRepresentation` or tuple Barycentric (ICRS) position or tuple of position and velocity. Notes ----- Whether or not velocities are calculated makes little difference for the built-in ephemerides, but for most JPL ephemeris files, the execution time roughly doubles. """ # If the ephemeris is to be taken from solar_system_ephemeris, or the one # it already contains, use the kernel there. Otherwise, open the ephemeris, # possibly downloading it, but make sure the file is closed at the end. default_kernel = ephemeris is None or ephemeris is solar_system_ephemeris._value kernel = None try: if default_kernel: if solar_system_ephemeris.get() is None: raise ValueError(cleandoc(_EPHEMERIS_NOTE)) kernel = solar_system_ephemeris.kernel else: kernel = _get_kernel(ephemeris) jd1, jd2 = get_jd12(time, "tdb") if kernel is None: body = body.lower() earth_pv_helio, earth_pv_bary = erfa.epv00(jd1, jd2) if body == "earth": body_pv_bary = earth_pv_bary elif body == "moon": # The moon98 documentation notes that it takes TT, but that TDB leads # to errors smaller than the uncertainties in the algorithm. # moon98 returns the astrometric position relative to the Earth. moon_pv_geo = erfa.moon98(jd1, jd2) body_pv_bary = erfa.pvppv(moon_pv_geo, earth_pv_bary) else: sun_pv_bary = erfa.pvmpv(earth_pv_bary, earth_pv_helio) if body == "sun": body_pv_bary = sun_pv_bary else: try: body_index = PLAN94_BODY_NAME_TO_PLANET_INDEX[body] except KeyError: raise KeyError( f"{body}'s position and velocity cannot be " f"calculated with the '{ephemeris}' ephemeris." ) body_pv_helio = erfa.plan94(jd1, jd2, body_index) body_pv_bary = erfa.pvppv(body_pv_helio, sun_pv_bary) body_pos_bary = CartesianRepresentation( body_pv_bary["p"], unit=u.au, xyz_axis=-1, copy=COPY_IF_NEEDED ) if get_velocity: body_vel_bary = CartesianRepresentation( body_pv_bary["v"], unit=u.au / u.day, xyz_axis=-1, copy=COPY_IF_NEEDED, ) else: if isinstance(body, str): # Look up kernel chain for JPL ephemeris, based on name try: kernel_spec = BODY_NAME_TO_KERNEL_SPEC[body.lower()] except KeyError: raise KeyError( f"{body}'s position cannot be calculated with " f"the {ephemeris} ephemeris." ) else: # otherwise, assume the user knows what their doing and intentionally # passed in a kernel chain kernel_spec = body # jplephem cannot handle multi-D arrays, so convert to 1D here. jd1_shape = getattr(jd1, "shape", ()) if len(jd1_shape) > 1: jd1, jd2 = jd1.ravel(), jd2.ravel() # Note that we use the new jd1.shape here to create a 1D result array. # It is reshaped below. body_posvel_bary = np.zeros( (2 if get_velocity else 1, 3) + getattr(jd1, "shape", ()) ) for pair in kernel_spec: spk = kernel[pair] if spk.data_type == 3: # Type 3 kernels contain both position and velocity. posvel = spk.compute(jd1, jd2) if get_velocity: body_posvel_bary += posvel.reshape(body_posvel_bary.shape) else: body_posvel_bary[0] += posvel[:3] else: # spk.generate first yields the position and then the # derivative. If no velocities are desired, body_posvel_bary # has only one element and thus the loop ends after a single # iteration, avoiding the velocity calculation. for body_p_or_v, p_or_v in zip( body_posvel_bary, spk.generate(jd1, jd2) ): body_p_or_v += p_or_v body_posvel_bary.shape = body_posvel_bary.shape[:2] + jd1_shape body_pos_bary = CartesianRepresentation( body_posvel_bary[0], unit=u.km, copy=False ) if get_velocity: body_vel_bary = CartesianRepresentation( body_posvel_bary[1], unit=u.km / u.day, copy=False ) return (body_pos_bary, body_vel_bary) if get_velocity else body_pos_bary finally: if not default_kernel and kernel is not None: kernel.daf.file.close()
Calculate the barycentric position and velocity of a solar system body. Parameters ---------- body : str or list of tuple The solar system body for which to calculate positions. Can also be a kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL kernel. time : `~astropy.time.Time` Time of observation. ephemeris : str, optional Ephemeris to use. By default, use the one set with ``astropy.coordinates.solar_system_ephemeris.set`` Returns ------- position, velocity : tuple of `~astropy.coordinates.CartesianRepresentation` Tuple of barycentric (ICRS) position and velocity. See Also -------- get_body_barycentric : to calculate position only. This is faster by about a factor two for JPL kernels, but has no speed advantage for the built-in ephemeris. Notes ----- {_EPHEMERIS_NOTE}
def get_body_barycentric_posvel(body, time, ephemeris=None): """Calculate the barycentric position and velocity of a solar system body. Parameters ---------- body : str or list of tuple The solar system body for which to calculate positions. Can also be a kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL kernel. time : `~astropy.time.Time` Time of observation. ephemeris : str, optional Ephemeris to use. By default, use the one set with ``astropy.coordinates.solar_system_ephemeris.set`` Returns ------- position, velocity : tuple of `~astropy.coordinates.CartesianRepresentation` Tuple of barycentric (ICRS) position and velocity. See Also -------- get_body_barycentric : to calculate position only. This is faster by about a factor two for JPL kernels, but has no speed advantage for the built-in ephemeris. Notes ----- {_EPHEMERIS_NOTE} """ return _get_body_barycentric_posvel(body, time, ephemeris)
Calculate the barycentric position of a solar system body. Parameters ---------- body : str or list of tuple The solar system body for which to calculate positions. Can also be a kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL kernel. time : `~astropy.time.Time` Time of observation. ephemeris : str, optional Ephemeris to use. By default, use the one set with ``astropy.coordinates.solar_system_ephemeris.set`` Returns ------- position : `~astropy.coordinates.CartesianRepresentation` Barycentric (ICRS) position of the body in cartesian coordinates See Also -------- get_body_barycentric_posvel : to calculate both position and velocity. Notes ----- {_EPHEMERIS_NOTE}
def get_body_barycentric(body, time, ephemeris=None): """Calculate the barycentric position of a solar system body. Parameters ---------- body : str or list of tuple The solar system body for which to calculate positions. Can also be a kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL kernel. time : `~astropy.time.Time` Time of observation. ephemeris : str, optional Ephemeris to use. By default, use the one set with ``astropy.coordinates.solar_system_ephemeris.set`` Returns ------- position : `~astropy.coordinates.CartesianRepresentation` Barycentric (ICRS) position of the body in cartesian coordinates See Also -------- get_body_barycentric_posvel : to calculate both position and velocity. Notes ----- {_EPHEMERIS_NOTE} """ return _get_body_barycentric_posvel(body, time, ephemeris, get_velocity=False)
Calculate the apparent position of body ``body`` relative to Earth. This corrects for the light-travel time to the object. Parameters ---------- body : str or other The solar system body for which to calculate positions. Can also be a kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL kernel. time : `~astropy.time.Time` Time of observation. ephemeris : str, optional Ephemeris to use. By default, use the one set with ``~astropy.coordinates.solar_system_ephemeris.set`` obsgeoloc : `~astropy.coordinates.CartesianRepresentation`, optional The GCRS position of the observer Returns ------- cartesian_position : `~astropy.coordinates.CartesianRepresentation` Barycentric (ICRS) apparent position of the body in cartesian coordinates Notes ----- {_EPHEMERIS_NOTE}
def _get_apparent_body_position(body, time, ephemeris, obsgeoloc=None): """Calculate the apparent position of body ``body`` relative to Earth. This corrects for the light-travel time to the object. Parameters ---------- body : str or other The solar system body for which to calculate positions. Can also be a kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL kernel. time : `~astropy.time.Time` Time of observation. ephemeris : str, optional Ephemeris to use. By default, use the one set with ``~astropy.coordinates.solar_system_ephemeris.set`` obsgeoloc : `~astropy.coordinates.CartesianRepresentation`, optional The GCRS position of the observer Returns ------- cartesian_position : `~astropy.coordinates.CartesianRepresentation` Barycentric (ICRS) apparent position of the body in cartesian coordinates Notes ----- {_EPHEMERIS_NOTE} """ if ephemeris is None: ephemeris = solar_system_ephemeris.get() # Calculate position given approximate light travel time. delta_light_travel_time = 20.0 * u.s emitted_time = time light_travel_time = 0.0 * u.s earth_loc = get_body_barycentric("earth", time, ephemeris) if obsgeoloc is not None: earth_loc += obsgeoloc while np.any(np.fabs(delta_light_travel_time) > 1.0e-8 * u.s): body_loc = get_body_barycentric(body, emitted_time, ephemeris) earth_distance = (body_loc - earth_loc).norm() delta_light_travel_time = light_travel_time - earth_distance / speed_of_light light_travel_time = earth_distance / speed_of_light emitted_time = time - light_travel_time return get_body_barycentric(body, emitted_time, ephemeris)
Get a `~astropy.coordinates.SkyCoord` for a solar system body as observed from a location on Earth in the `~astropy.coordinates.GCRS` reference system. Parameters ---------- body : str or list of tuple The solar system body for which to calculate positions. Can also be a kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL kernel. time : `~astropy.time.Time` Time of observation. location : `~astropy.coordinates.EarthLocation`, optional Location of observer on the Earth. If not given, will be taken from ``time`` (if not present, a geocentric observer will be assumed). ephemeris : str, optional Ephemeris to use. If not given, use the one set with ``astropy.coordinates.solar_system_ephemeris.set`` (which is set to 'builtin' by default). Returns ------- skycoord : `~astropy.coordinates.SkyCoord` GCRS Coordinate for the body Notes ----- The coordinate returned is the apparent position, which is the position of the body at time *t* minus the light travel time from the *body* to the observing *location*. {_EPHEMERIS_NOTE}
def get_body(body, time, location=None, ephemeris=None): """ Get a `~astropy.coordinates.SkyCoord` for a solar system body as observed from a location on Earth in the `~astropy.coordinates.GCRS` reference system. Parameters ---------- body : str or list of tuple The solar system body for which to calculate positions. Can also be a kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL kernel. time : `~astropy.time.Time` Time of observation. location : `~astropy.coordinates.EarthLocation`, optional Location of observer on the Earth. If not given, will be taken from ``time`` (if not present, a geocentric observer will be assumed). ephemeris : str, optional Ephemeris to use. If not given, use the one set with ``astropy.coordinates.solar_system_ephemeris.set`` (which is set to 'builtin' by default). Returns ------- skycoord : `~astropy.coordinates.SkyCoord` GCRS Coordinate for the body Notes ----- The coordinate returned is the apparent position, which is the position of the body at time *t* minus the light travel time from the *body* to the observing *location*. {_EPHEMERIS_NOTE} """ if location is None: location = time.location if location is not None: obsgeoloc, obsgeovel = location.get_gcrs_posvel(time) else: obsgeoloc, obsgeovel = None, None cartrep = _get_apparent_body_position(body, time, ephemeris, obsgeoloc) icrs = ICRS(cartrep) gcrs = icrs.transform_to( GCRS(obstime=time, obsgeoloc=obsgeoloc, obsgeovel=obsgeovel) ) return SkyCoord(gcrs)
Get a `~astropy.coordinates.SkyCoord` for the Earth's Moon as observed from a location on Earth in the `~astropy.coordinates.GCRS` reference system. Parameters ---------- time : `~astropy.time.Time` Time of observation location : `~astropy.coordinates.EarthLocation` Location of observer on the Earth. If none is supplied, taken from ``time`` (if not present, a geocentric observer will be assumed). ephemeris : str, optional Ephemeris to use. If not given, use the one set with ``astropy.coordinates.solar_system_ephemeris.set`` (which is set to 'builtin' by default). Returns ------- skycoord : `~astropy.coordinates.SkyCoord` GCRS Coordinate for the Moon Notes ----- The coordinate returned is the apparent position, which is the position of the moon at time *t* minus the light travel time from the moon to the observing *location*. {_EPHEMERIS_NOTE}
def get_moon(time, location=None, ephemeris=None): """ Get a `~astropy.coordinates.SkyCoord` for the Earth's Moon as observed from a location on Earth in the `~astropy.coordinates.GCRS` reference system. Parameters ---------- time : `~astropy.time.Time` Time of observation location : `~astropy.coordinates.EarthLocation` Location of observer on the Earth. If none is supplied, taken from ``time`` (if not present, a geocentric observer will be assumed). ephemeris : str, optional Ephemeris to use. If not given, use the one set with ``astropy.coordinates.solar_system_ephemeris.set`` (which is set to 'builtin' by default). Returns ------- skycoord : `~astropy.coordinates.SkyCoord` GCRS Coordinate for the Moon Notes ----- The coordinate returned is the apparent position, which is the position of the moon at time *t* minus the light travel time from the moon to the observing *location*. {_EPHEMERIS_NOTE} """ return get_body("moon", time, location=location, ephemeris=ephemeris)
Given a `SpectralQuantity` and a velocity, return a new `SpectralQuantity` that is Doppler shifted by this amount. Note that the Doppler shift applied is the full relativistic one, so `SpectralQuantity` currently expressed in velocity and not using the relativistic convention will temporarily be converted to use the relativistic convention while the shift is applied. Positive velocities are assumed to redshift the spectral quantity, while negative velocities blueshift the spectral quantity.
def _apply_relativistic_doppler_shift(scoord, velocity): """ Given a `SpectralQuantity` and a velocity, return a new `SpectralQuantity` that is Doppler shifted by this amount. Note that the Doppler shift applied is the full relativistic one, so `SpectralQuantity` currently expressed in velocity and not using the relativistic convention will temporarily be converted to use the relativistic convention while the shift is applied. Positive velocities are assumed to redshift the spectral quantity, while negative velocities blueshift the spectral quantity. """ # NOTE: we deliberately don't keep sub-classes of SpectralQuantity intact # since we can't guarantee that their metadata would be correct/consistent. squantity = scoord.view(SpectralQuantity) beta = velocity / c doppler_factor = np.sqrt((1 + beta) / (1 - beta)) if squantity.unit.is_equivalent(u.m): # wavelength return squantity * doppler_factor elif ( squantity.unit.is_equivalent(u.Hz) or squantity.unit.is_equivalent(u.eV) or squantity.unit.is_equivalent(1 / u.m) ): return squantity / doppler_factor elif squantity.unit.is_equivalent(KMS): # velocity return (squantity.to(u.Hz) / doppler_factor).to(squantity.unit) else: # pragma: no cover raise RuntimeError( f"Unexpected units in velocity shift: {squantity.unit}. This should not" " happen, so please report this in the astropy issue tracker!" )
Given an original coordinate object, update the differentials so that the final coordinate is at the same location as the original coordinate but co-moving with the velocity reference object. If preserve_original_frame is set to True, the resulting object will be in the frame of the original coordinate, otherwise it will be in the frame of the velocity reference.
def update_differentials_to_match( original, velocity_reference, preserve_observer_frame=False ): """ Given an original coordinate object, update the differentials so that the final coordinate is at the same location as the original coordinate but co-moving with the velocity reference object. If preserve_original_frame is set to True, the resulting object will be in the frame of the original coordinate, otherwise it will be in the frame of the velocity reference. """ if not velocity_reference.data.differentials: raise ValueError("Reference frame has no velocities") # If the reference has an obstime already defined, we should ignore # it and stick with the original observer obstime. if "obstime" in velocity_reference.frame_attributes and hasattr( original, "obstime" ): velocity_reference = velocity_reference.replicate(obstime=original.obstime) # We transform both coordinates to ICRS for simplicity and because we know # it's a simple frame that is not time-dependent (it could be that both # the original and velocity_reference frame are time-dependent) original_icrs = original.transform_to(ICRS()) velocity_reference_icrs = velocity_reference.transform_to(ICRS()) differentials = velocity_reference_icrs.data.represent_as( CartesianRepresentation, CartesianDifferential ).differentials data_with_differentials = original_icrs.data.represent_as( CartesianRepresentation ).with_differentials(differentials) final_icrs = original_icrs.realize_frame(data_with_differentials) if preserve_observer_frame: final = final_icrs.transform_to(original) else: final = final_icrs.transform_to(velocity_reference) return final.replicate( representation_type=CartesianRepresentation, differential_type=CartesianDifferential, )
Set the differentials to be stationary on a coordinate object.
def attach_zero_velocities(coord): """ Set the differentials to be stationary on a coordinate object. """ new_data = coord.cartesian.with_differentials(ZERO_VELOCITIES) return coord.realize_frame(new_data)
Return any Angle subclass objects as an Angle objects. This is used to ensure that Latitude and Longitude change to Angle objects when they are used in calculations (such as lon/2.)
def _no_angle_subclass(obj): """Return any Angle subclass objects as an Angle objects. This is used to ensure that Latitude and Longitude change to Angle objects when they are used in calculations (such as lon/2.) """ if isinstance(obj, tuple): return tuple(_no_angle_subclass(_obj) for _obj in obj) return obj.view(Angle) if isinstance(obj, (Latitude, Longitude)) else obj
Checks that the given value is in the range [-24,24]. If the value is equal to -24 or 24, then a warning is raised.
def _check_hour_range(hrs: float) -> None: """ Checks that the given value is in the range [-24,24]. If the value is equal to -24 or 24, then a warning is raised. """ if not -24.0 < hrs < 24.0: if abs(hrs) != 24.0: raise IllegalHourError(hrs) warn(IllegalHourWarning(hrs, "Treating as 24 hr"))
Checks that the given value is in the range [0,60]. If the value is equal to 60, then a warning is raised.
def _check_minute_range(m: float) -> None: """ Checks that the given value is in the range [0,60]. If the value is equal to 60, then a warning is raised. """ if not 0.0 <= m < 60.0: if m != 60.0: raise IllegalMinuteError(m) warn(IllegalMinuteWarning(m, "Treating as 0 min, +1 hr/deg"))
Checks that the given value is in the range [0,60]. If the value is equal to 60, then a warning is raised.
def _check_second_range(sec: float) -> None: """ Checks that the given value is in the range [0,60]. If the value is equal to 60, then a warning is raised. """ if not 0.0 <= sec < 60.0: if sec != 60.0: raise IllegalSecondError(sec) warn(IllegalSecondWarning(sec, "Treating as 0 sec, +1 min"))
Parses an input string value into an angle value. Parameters ---------- angle : str A string representing the angle. May be in one of the following forms: * 01:02:30.43 degrees * 1 2 0 hours * 1°2′3″ * 1d2m3s * -1h2m3s unit : `~astropy.units.UnitBase` instance, optional The unit used to interpret the string. If ``unit`` is not provided, the unit must be explicitly represented in the string, either at the end or as number separators. debug : bool, optional If `True`, print debugging information from the parser. Returns ------- value, unit : tuple ``value`` is the value as a floating point number or three-part tuple, and ``unit`` is a `Unit` instance which is either the unit passed in or the one explicitly mentioned in the input string.
def parse_angle(angle, unit=None, debug=False): """ Parses an input string value into an angle value. Parameters ---------- angle : str A string representing the angle. May be in one of the following forms: * 01:02:30.43 degrees * 1 2 0 hours * 1°2′3″ * 1d2m3s * -1h2m3s unit : `~astropy.units.UnitBase` instance, optional The unit used to interpret the string. If ``unit`` is not provided, the unit must be explicitly represented in the string, either at the end or as number separators. debug : bool, optional If `True`, print debugging information from the parser. Returns ------- value, unit : tuple ``value`` is the value as a floating point number or three-part tuple, and ``unit`` is a `Unit` instance which is either the unit passed in or the one explicitly mentioned in the input string. """ return _AngleParser().parse(angle, unit, debug=debug)
Convert a floating-point input to a 3 tuple - if input is in degrees, the result is (degree, arcminute, arcsecond) - if input is in hourangle, the result is (hour, minute, second)
def _decimal_to_sexagesimal(a, /): """ Convert a floating-point input to a 3 tuple - if input is in degrees, the result is (degree, arcminute, arcsecond) - if input is in hourangle, the result is (hour, minute, second) """ sign = np.copysign(1.0, a) # assuming a in degree, these are (degree fraction, degree) (df, d) = np.modf(np.fabs(a)) # assuming a in degree, these are (arcminute fraction, arcminute) (mf, m) = np.modf(df * 60.0) s = mf * 60.0 return np.floor(sign * d), sign * np.floor(m), sign * s
Given a floating point angle, convert it to string
def _decimal_to_sexagesimal_string( angle, precision=None, pad=False, sep=(":",), fields=3 ): """ Given a floating point angle, convert it to string """ values = _decimal_to_sexagesimal(angle) # Check to see if values[0] is negative, using np.copysign to handle -0 sign = np.copysign(1.0, values[0]) # If the coordinates are negative, we need to take the absolute values. # We use np.abs because abs(-0) is -0 # TODO: Is this true? (MHvK, 2018-02-01: not on my system) values = [np.abs(value) for value in values] if pad: pad = 3 if sign == -1 else 2 else: pad = 0 if not isinstance(sep, tuple): sep = tuple(sep) if fields < 1 or fields > 3: raise ValueError("fields must be 1, 2, or 3") if not sep: # empty string, False, or None, etc. sep = ("", "", "") elif len(sep) == 1: if fields == 3: sep = sep + (sep[0], "") elif fields == 2: sep = sep + ("", "") else: sep = ("", "", "") elif len(sep) == 2: sep = sep + ("",) elif len(sep) != 3: raise ValueError( "Invalid separator specification for converting angle to string." ) # Simplify the expression based on the requested precision. For # example, if the seconds will round up to 60, we should convert # it to 0 and carry upwards. If the field is hidden (by the # fields kwarg) we round up around the middle, 30.0. rounding_thresh = 60.0 - (10.0 ** -(8 if precision is None else precision)) if fields == 3 and values[2] >= rounding_thresh: values[2] = 0.0 values[1] += 1.0 elif fields < 3 and values[2] >= 30.0: values[1] += 1.0 if fields >= 2 and values[1] >= 60.0: values[1] = 0.0 values[0] += 1.0 elif fields < 2 and values[1] >= 30.0: values[0] += 1.0 literal = f"{np.copysign(values[0], sign):0{pad}.0f}{sep[0]}" if fields >= 2: literal += f"{int(values[1]):02d}{sep[1]}" if fields == 3: if precision is None: last_value = f"{abs(values[2]):.8f}".rstrip("0").rstrip(".") else: last_value = f"{abs(values[2]):.{precision}f}" if len(last_value) == 1 or last_value[1] == ".": last_value = "0" + last_value literal += f"{last_value}{sep[2]}" return literal
Angular separation between two points on a sphere. Parameters ---------- lon1, lat1, lon2, lat2 : `~astropy.coordinates.Angle`, `~astropy.units.Quantity` or float Longitude and latitude of the two points. Quantities should be in angular units; floats in radians. Returns ------- angular separation : `~astropy.units.Quantity` ['angle'] or float Type depends on input; ``Quantity`` in angular units, or float in radians. Notes ----- The angular separation is calculated using the Vincenty formula [1]_, which is slightly more complex and computationally expensive than some alternatives, but is stable at at all distances, including the poles and antipodes. .. [1] https://en.wikipedia.org/wiki/Great-circle_distance
def angular_separation(lon1, lat1, lon2, lat2): """ Angular separation between two points on a sphere. Parameters ---------- lon1, lat1, lon2, lat2 : `~astropy.coordinates.Angle`, `~astropy.units.Quantity` or float Longitude and latitude of the two points. Quantities should be in angular units; floats in radians. Returns ------- angular separation : `~astropy.units.Quantity` ['angle'] or float Type depends on input; ``Quantity`` in angular units, or float in radians. Notes ----- The angular separation is calculated using the Vincenty formula [1]_, which is slightly more complex and computationally expensive than some alternatives, but is stable at at all distances, including the poles and antipodes. .. [1] https://en.wikipedia.org/wiki/Great-circle_distance """ sdlon = np.sin(lon2 - lon1) cdlon = np.cos(lon2 - lon1) slat1 = np.sin(lat1) slat2 = np.sin(lat2) clat1 = np.cos(lat1) clat2 = np.cos(lat2) num1 = clat2 * sdlon num2 = clat1 * slat2 - slat1 * clat2 * cdlon denominator = slat1 * slat2 + clat1 * clat2 * cdlon return np.arctan2(np.hypot(num1, num2), denominator)
Position Angle (East of North) between two points on a sphere. Parameters ---------- lon1, lat1, lon2, lat2 : `~astropy.coordinates.Angle`, `~astropy.units.Quantity` or float Longitude and latitude of the two points. Quantities should be in angular units; floats in radians. Returns ------- pa : `~astropy.coordinates.Angle` The (positive) position angle of the vector pointing from position 1 to position 2. If any of the angles are arrays, this will contain an array following the appropriate `numpy` broadcasting rules.
def position_angle(lon1, lat1, lon2, lat2): """ Position Angle (East of North) between two points on a sphere. Parameters ---------- lon1, lat1, lon2, lat2 : `~astropy.coordinates.Angle`, `~astropy.units.Quantity` or float Longitude and latitude of the two points. Quantities should be in angular units; floats in radians. Returns ------- pa : `~astropy.coordinates.Angle` The (positive) position angle of the vector pointing from position 1 to position 2. If any of the angles are arrays, this will contain an array following the appropriate `numpy` broadcasting rules. """ from .core import Angle deltalon = lon2 - lon1 colat = np.cos(lat2) x = np.sin(lat2) * np.cos(lat1) - colat * np.sin(lat1) * np.cos(deltalon) y = np.sin(deltalon) * colat return Angle(np.arctan2(y, x), u.radian).wrap_at(360 * u.deg)
Point with the given offset from the given point. Parameters ---------- lon, lat, posang, distance : `~astropy.coordinates.Angle`, `~astropy.units.Quantity` or float Longitude and latitude of the starting point, position angle and distance to the final point. Quantities should be in angular units; floats in radians. Polar points at lat= +/-90 are treated as limit of +/-(90-epsilon) and same lon. Returns ------- lon, lat : `~astropy.coordinates.Angle` The position of the final point. If any of the angles are arrays, these will contain arrays following the appropriate `numpy` broadcasting rules. 0 <= lon < 2pi.
def offset_by(lon, lat, posang, distance): """ Point with the given offset from the given point. Parameters ---------- lon, lat, posang, distance : `~astropy.coordinates.Angle`, `~astropy.units.Quantity` or float Longitude and latitude of the starting point, position angle and distance to the final point. Quantities should be in angular units; floats in radians. Polar points at lat= +/-90 are treated as limit of +/-(90-epsilon) and same lon. Returns ------- lon, lat : `~astropy.coordinates.Angle` The position of the final point. If any of the angles are arrays, these will contain arrays following the appropriate `numpy` broadcasting rules. 0 <= lon < 2pi. """ from .core import Angle # Calculations are done using the spherical trigonometry sine and cosine rules # of the triangle A at North Pole, B at starting point, C at final point # with angles A (change in lon), B (posang), C (not used, but negative reciprocal posang) # with sides a (distance), b (final co-latitude), c (starting colatitude) # B, a, c are knowns; A and b are unknowns # https://en.wikipedia.org/wiki/Spherical_trigonometry cos_a = np.cos(distance) sin_a = np.sin(distance) cos_c = np.sin(lat) sin_c = np.cos(lat) cos_B = np.cos(posang) sin_B = np.sin(posang) # cosine rule: Know two sides: a,c and included angle: B; get unknown side b cos_b = cos_c * cos_a + sin_c * sin_a * cos_B # sin_b = np.sqrt(1 - cos_b**2) # sine rule and cosine rule for A (using both lets arctan2 pick quadrant). # multiplying both sin_A and cos_A by x=sin_b * sin_c prevents /0 errors # at poles. Correct for the x=0 multiplication a few lines down. # sin_A/sin_a == sin_B/sin_b # Sine rule xsin_A = sin_a * sin_B * sin_c # cos_a == cos_b * cos_c + sin_b * sin_c * cos_A # cosine rule xcos_A = cos_a - cos_b * cos_c A = Angle(np.arctan2(xsin_A, xcos_A), u.radian) # Treat the poles as if they are infinitesimally far from pole but at given lon small_sin_c = sin_c < 1e-12 if small_sin_c.any(): # For south pole (cos_c = -1), A = posang; for North pole, A=180 deg - posang A_pole = (90 * u.deg + cos_c * (90 * u.deg - Angle(posang, u.radian))).to(u.rad) if A.shape: # broadcast to ensure the shape is like that of A, which is also # affected by the (possible) shapes of lat, posang, and distance. small_sin_c = np.broadcast_to(small_sin_c, A.shape) A[small_sin_c] = A_pole[small_sin_c] else: A = A_pole outlon = (Angle(lon, u.radian) + A).wrap_at(360.0 * u.deg).to(u.deg) outlat = Angle(np.arcsin(cos_b), u.radian).to(u.deg) return outlon, outlat
Generate a grid of points on the surface of the unit sphere using the Fibonacci or Golden Spiral method. .. seealso:: `Evenly distributing points on a sphere <https://stackoverflow.com/questions/9600801/evenly-distributing-n-points-on-a-sphere>`_ Parameters ---------- size : int The number of points to generate. Returns ------- rep : `~astropy.coordinates.UnitSphericalRepresentation` The grid of points.
def golden_spiral_grid(size): """Generate a grid of points on the surface of the unit sphere using the Fibonacci or Golden Spiral method. .. seealso:: `Evenly distributing points on a sphere <https://stackoverflow.com/questions/9600801/evenly-distributing-n-points-on-a-sphere>`_ Parameters ---------- size : int The number of points to generate. Returns ------- rep : `~astropy.coordinates.UnitSphericalRepresentation` The grid of points. """ golden_r = (1 + 5**0.5) / 2 grid = np.arange(0, size, dtype=float) + 0.5 lon = _TWOPI / golden_r * grid * u.rad lat = np.arcsin(1 - 2 * grid / size) * u.rad return UnitSphericalRepresentation(lon, lat)
Generate a random sampling of points on the surface of the unit sphere. Parameters ---------- size : int The number of points to generate. Returns ------- rep : `~astropy.coordinates.UnitSphericalRepresentation` The random points.
def uniform_spherical_random_surface(size=1): """Generate a random sampling of points on the surface of the unit sphere. Parameters ---------- size : int The number of points to generate. Returns ------- rep : `~astropy.coordinates.UnitSphericalRepresentation` The random points. """ rng = np.random # can maybe switch to this being an input later - see #11628 lon = rng.uniform(0, _TWOPI, size) * u.rad lat = np.arcsin(rng.uniform(-1, 1, size=size)) * u.rad return UnitSphericalRepresentation(lon, lat)
Generate a random sampling of points that follow a uniform volume density distribution within a sphere. Parameters ---------- size : int The number of points to generate. max_radius : number, quantity-like, optional A dimensionless or unit-ful factor to scale the random distances. Returns ------- rep : `~astropy.coordinates.SphericalRepresentation` The random points.
def uniform_spherical_random_volume(size=1, max_radius=1): """Generate a random sampling of points that follow a uniform volume density distribution within a sphere. Parameters ---------- size : int The number of points to generate. max_radius : number, quantity-like, optional A dimensionless or unit-ful factor to scale the random distances. Returns ------- rep : `~astropy.coordinates.SphericalRepresentation` The random points. """ rng = np.random # can maybe switch to this being an input later - see #11628 usph = uniform_spherical_random_surface(size=size) r = np.cbrt(rng.uniform(size=size)) * u.Quantity(max_radius, copy=COPY_IF_NEEDED) return SphericalRepresentation(usph.lon, usph.lat, r)
Return the e-terms of aberration vector. Parameters ---------- equinox : Time object The equinox for which to compute the e-terms
def fk4_e_terms(equinox): """ Return the e-terms of aberration vector. Parameters ---------- equinox : Time object The equinox for which to compute the e-terms """ # Constant of aberration at J2000; from Explanatory Supplement to the # Astronomical Almanac (Seidelmann, 2005). k = 0.0056932 # in degrees (v_earth/c ~ 1e-4 rad ~ 0.0057 deg) k = np.radians(k) # Eccentricity of the Earth's orbit e = earth.eccentricity(equinox.jd) # Mean longitude of perigee of the solar orbit g = earth.mean_lon_of_perigee(equinox.jd) g = np.radians(g) # Obliquity of the ecliptic o = earth.obliquity(equinox.jd, algorithm=1980) o = np.radians(o) return ( e * k * np.sin(g), -e * k * np.cos(g) * np.cos(o), -e * k * np.cos(g) * np.sin(o), )
This is a correction term in the FK4 transformations because FK4 is a rotating system - see Murray 89 eqn 29.
def _fk4_B_matrix(obstime): """ This is a correction term in the FK4 transformations because FK4 is a rotating system - see Murray 89 eqn 29. """ # Note this is *julian century*, not besselian T = (obstime.jyear - 1950.0) / 100.0 if getattr(T, "shape", ()): # Ensure we broadcast possibly arrays of times properly. T.shape += (1, 1) return _B1950_TO_J2000_M + _FK4_CORR * T
Use the ``inverse`` argument to get the inverse transformation, matrix and offsets to go from Galactocentric to ICRS.
def get_matrix_vectors(galactocentric_frame, inverse=False): """ Use the ``inverse`` argument to get the inverse transformation, matrix and offsets to go from Galactocentric to ICRS. """ # shorthand gcf = galactocentric_frame # rotation matrix to align x(ICRS) with the vector to the Galactic center mat1 = rotation_matrix(-gcf.galcen_coord.dec, "y") mat2 = rotation_matrix(gcf.galcen_coord.ra, "z") # extra roll away from the Galactic x-z plane mat0 = rotation_matrix(gcf.get_roll0() - gcf.roll, "x") # construct transformation matrix and use it R = mat0 @ mat1 @ mat2 # Now need to translate by Sun-Galactic center distance around x' and # rotate about y' to account for tilt due to Sun's height above the plane translation = r.CartesianRepresentation(gcf.galcen_distance * [1.0, 0.0, 0.0]) z_d = gcf.z_sun / gcf.galcen_distance H = rotation_matrix(-np.arcsin(z_d), "y") # compute total matrices A = H @ R # Now we re-align the translation vector to account for the Sun's height # above the midplane offset = -translation.transform(H) if inverse: # the inverse of a rotation matrix is a transpose, which is much faster # and more stable to compute A = matrix_transpose(A) offset = (-offset).transform(A) offset_v = r.CartesianDifferential.from_cartesian( (-gcf.galcen_v_sun).to_cartesian().transform(A) ) offset = offset.with_differentials(offset_v) else: offset = offset.with_differentials(gcf.galcen_v_sun) return A, offset
B-matrix from USNO circular 179. Used by the ICRS->FK5 transformation functions.
def _icrs_to_fk5_matrix(): """ B-matrix from USNO circular 179. Used by the ICRS->FK5 transformation functions. """ eta0 = -19.9 / 3600000.0 xi0 = 9.1 / 3600000.0 da0 = -22.9 / 3600000.0 return ( rotation_matrix(-eta0, "x") @ rotation_matrix(xi0, "y") @ rotation_matrix(da0, "z") )
Compute the polar motion p-matrix at the given time. If the nutation-precession matrix is already known, it should be passed in, as this is by far the most expensive calculation.
def tete_to_itrs_mat(time, rbpn=None): """Compute the polar motion p-matrix at the given time. If the nutation-precession matrix is already known, it should be passed in, as this is by far the most expensive calculation. """ xp, yp = get_polar_motion(time) sp = erfa.sp00(*get_jd12(time, "tt")) pmmat = erfa.pom00(xp, yp, sp) # now determine the greenwich apparent sidereal time for the input obstime # we use the 2006A model for consistency with RBPN matrix use in GCRS <-> TETE ujd1, ujd2 = get_jd12(time, "ut1") jd1, jd2 = get_jd12(time, "tt") if rbpn is None: # erfa.gst06a calls pnm06a to calculate rbpn and then gst06. Use it in # favour of getting rbpn with erfa.pnm06a to avoid a possibly large array. gast = erfa.gst06a(ujd1, ujd2, jd1, jd2) else: gast = erfa.gst06(ujd1, ujd2, jd1, jd2, rbpn) # c2tcio expects a GCRS->CIRS matrix, but we just set that to an I-matrix # because we're already in CIRS equivalent frame return erfa.c2tcio(np.eye(3), gast, pmmat)
Create a GCRS frame at the location and obstime. The reference frame z axis must point to the Celestial Intermediate Pole (as is the case for CIRS and TETE). This function is here to avoid location.get_gcrs(obstime), which would recalculate matrices that are already available below (and return a GCRS coordinate, rather than a frame with obsgeoloc and obsgeovel). Instead, it uses the private method that allows passing in the matrices.
def get_location_gcrs(location, obstime, ref_to_itrs, gcrs_to_ref): """Create a GCRS frame at the location and obstime. The reference frame z axis must point to the Celestial Intermediate Pole (as is the case for CIRS and TETE). This function is here to avoid location.get_gcrs(obstime), which would recalculate matrices that are already available below (and return a GCRS coordinate, rather than a frame with obsgeoloc and obsgeovel). Instead, it uses the private method that allows passing in the matrices. """ obsgeoloc, obsgeovel = location._get_gcrs_posvel(obstime, ref_to_itrs, gcrs_to_ref) return GCRS(obstime=obstime, obsgeoloc=obsgeoloc, obsgeovel=obsgeovel)
Create a new class that is the sky offset frame for a specific class of origin frame. If such a class has already been created for this frame, the same class will be returned. The new class will always have component names for spherical coordinates of ``lon``/``lat``. Parameters ---------- framecls : `~astropy.coordinates.BaseCoordinateFrame` subclass The class to create the SkyOffsetFrame of. Returns ------- skyoffsetframecls : class The class for the new skyoffset frame. Notes ----- This function is necessary because Astropy's frame transformations depend on connection between specific frame *classes*. So each type of frame needs its own distinct skyoffset frame class. This function generates just that class, as well as ensuring that only one example of such a class actually gets created in any given python session.
def make_skyoffset_cls(framecls): """ Create a new class that is the sky offset frame for a specific class of origin frame. If such a class has already been created for this frame, the same class will be returned. The new class will always have component names for spherical coordinates of ``lon``/``lat``. Parameters ---------- framecls : `~astropy.coordinates.BaseCoordinateFrame` subclass The class to create the SkyOffsetFrame of. Returns ------- skyoffsetframecls : class The class for the new skyoffset frame. Notes ----- This function is necessary because Astropy's frame transformations depend on connection between specific frame *classes*. So each type of frame needs its own distinct skyoffset frame class. This function generates just that class, as well as ensuring that only one example of such a class actually gets created in any given python session. """ # Create a new SkyOffsetFrame subclass for this frame class. name = "SkyOffset" + framecls.__name__ _SkyOffsetFramecls = type( name, (SkyOffsetFrame, framecls), { "origin": CoordinateAttribute( frame=framecls, default=None, doc="The origin of the offset frame" ), # The following two have to be done because otherwise we use the # defaults of SkyOffsetFrame set by BaseCoordinateFrame. "_default_representation": framecls._default_representation, "_default_differential": framecls._default_differential, "__doc__": SkyOffsetFrame.__doc__, }, ) @frame_transform_graph.transform( FunctionTransform, _SkyOffsetFramecls, _SkyOffsetFramecls ) def skyoffset_to_skyoffset(from_skyoffset_coord, to_skyoffset_frame): """Transform between two skyoffset frames.""" # This transform goes through the parent frames on each side. # from_frame -> from_frame.origin -> to_frame.origin -> to_frame tmp_from = from_skyoffset_coord.transform_to(from_skyoffset_coord.origin) tmp_to = tmp_from.transform_to(to_skyoffset_frame.origin) return tmp_to.transform_to(to_skyoffset_frame) @frame_transform_graph.transform( DynamicMatrixTransform, framecls, _SkyOffsetFramecls ) def reference_to_skyoffset(reference_frame, skyoffset_frame): """Convert a reference coordinate to an sky offset frame.""" # Define rotation matrices along the position angle vector, and # relative to the origin. origin = skyoffset_frame.origin.spherical return ( rotation_matrix(-skyoffset_frame.rotation, "x") @ rotation_matrix(-origin.lat, "y") @ rotation_matrix(origin.lon, "z") ) @frame_transform_graph.transform( DynamicMatrixTransform, _SkyOffsetFramecls, framecls ) def skyoffset_to_reference(skyoffset_coord, reference_frame): """Convert an sky offset frame coordinate to the reference frame.""" # use the forward transform, but just invert it R = reference_to_skyoffset(reference_frame, skyoffset_coord) # transpose is the inverse because R is a rotation matrix return matrix_transpose(R) return _SkyOffsetFramecls
gets the two polar motion components in radians for use with apio.
def get_polar_motion(time): """ gets the two polar motion components in radians for use with apio. """ # Get the polar motion from the IERS table iers_table = iers.earth_orientation_table.get() xp, yp, status = iers_table.pm_xy(time, return_status=True) wmsg = ( "Tried to get polar motions for times {} IERS data is " "valid. Defaulting to polar motion from the 50-yr mean for those. " "This may affect precision at the arcsec level. Please check your " "astropy.utils.iers.conf.iers_auto_url and point it to a newer " "version if necessary." ) if np.any(status == iers.TIME_BEFORE_IERS_RANGE): xp[status == iers.TIME_BEFORE_IERS_RANGE] = _DEFAULT_PM[0] yp[status == iers.TIME_BEFORE_IERS_RANGE] = _DEFAULT_PM[1] warnings.warn(wmsg.format("before"), AstropyWarning) if np.any(status == iers.TIME_BEYOND_IERS_RANGE): xp[status == iers.TIME_BEYOND_IERS_RANGE] = _DEFAULT_PM[0] yp[status == iers.TIME_BEYOND_IERS_RANGE] = _DEFAULT_PM[1] warnings.warn(wmsg.format("after"), AstropyWarning) return xp.to_value(u.radian), yp.to_value(u.radian)
Generate a warning for an IERSRangeerror. Parameters ---------- ierserr : An `~astropy.utils.iers.IERSRangeError`
def _warn_iers(ierserr): """ Generate a warning for an IERSRangeerror. Parameters ---------- ierserr : An `~astropy.utils.iers.IERSRangeError` """ msg = "{0} Assuming UT1-UTC=0 for coordinate transformations." warnings.warn(msg.format(ierserr.args[0]), AstropyWarning)
This function is used to get UT1-UTC in coordinates because normally it gives an error outside the IERS range, but in coordinates we want to allow it to go through but with a warning.
def get_dut1utc(time): """ This function is used to get UT1-UTC in coordinates because normally it gives an error outside the IERS range, but in coordinates we want to allow it to go through but with a warning. """ try: return time.delta_ut1_utc except iers.IERSRangeError as e: _warn_iers(e) return np.zeros(time.shape)
Gets ``jd1`` and ``jd2`` from a time object in a particular scale. Parameters ---------- time : `~astropy.time.Time` The time to get the jds for scale : str The time scale to get the jds for Returns ------- jd1 : float jd2 : float
def get_jd12(time, scale): """ Gets ``jd1`` and ``jd2`` from a time object in a particular scale. Parameters ---------- time : `~astropy.time.Time` The time to get the jds for scale : str The time scale to get the jds for Returns ------- jd1 : float jd2 : float """ if time.scale == scale: newtime = time else: try: newtime = getattr(time, scale) except iers.IERSRangeError as e: _warn_iers(e) newtime = time return newtime.jd1, newtime.jd2
Normalise a p-vector.
def norm(p): """ Normalise a p-vector. """ return p / np.sqrt(np.einsum("...i,...i", p, p))[..., np.newaxis]
Combine p- and v- vectors into a pv-vector.
def pav2pv(p, v): """ Combine p- and v- vectors into a pv-vector. """ pv = np.empty(np.broadcast(p, v).shape[:-1], erfa.dt_pv) pv["p"] = p pv["v"] = v return pv
Find the X, Y coordinates of the CIP and the CIO locator, s. Parameters ---------- jd1 : float or `np.ndarray` First part of two part Julian date (TDB) jd2 : float or `np.ndarray` Second part of two part Julian date (TDB) Returns ------- x : float or `np.ndarray` x coordinate of the CIP y : float or `np.ndarray` y coordinate of the CIP s : float or `np.ndarray` CIO locator, s
def get_cip(jd1, jd2): """ Find the X, Y coordinates of the CIP and the CIO locator, s. Parameters ---------- jd1 : float or `np.ndarray` First part of two part Julian date (TDB) jd2 : float or `np.ndarray` Second part of two part Julian date (TDB) Returns ------- x : float or `np.ndarray` x coordinate of the CIP y : float or `np.ndarray` y coordinate of the CIP s : float or `np.ndarray` CIO locator, s """ # classical NPB matrix, IAU 2006/2000A rpnb = erfa.pnm06a(jd1, jd2) # CIP X, Y coordinates from array x, y = erfa.bpn2xy(rpnb) # CIO locator, s s = erfa.s06(jd1, jd2, x, y) return x, y, s