response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Merge the ``left`` and ``right`` metadata objects. This is a simplistic and limited implementation at this point.
def merge( left, right, merge_func=None, metadata_conflicts="warn", warn_str_func=_warn_str_func, error_str_func=_error_str_func, ): """ Merge the ``left`` and ``right`` metadata objects. This is a simplistic and limited implementation at this point. """ if not _both_isinstance(left, right, dict): raise MergeConflictError("Can only merge two dict-based objects") out = deepcopy(left) for key, val in right.items(): # If no conflict then insert val into out dict and continue if key not in out: out[key] = deepcopy(val) continue # There is a conflict that must be resolved if _both_isinstance(left[key], right[key], dict): out[key] = merge( left[key], right[key], merge_func, metadata_conflicts=metadata_conflicts ) else: try: if merge_func is None: for left_type, right_type, merge_cls in MERGE_STRATEGIES: if not merge_cls.enabled: continue if isinstance(left[key], left_type) and isinstance( right[key], right_type ): out[key] = merge_cls.merge(left[key], right[key]) break else: raise MergeConflictError else: out[key] = merge_func(left[key], right[key]) except MergeConflictError: # Pick the metadata item that is not None, or they are both not # None, then if they are equal, there is no conflict, and if # they are different, there is a conflict and we pick the one # on the right (or raise an error). if left[key] is None: # This may not seem necessary since out[key] gets set to # right[key], but not all objects support != which is # needed for one of the if clauses. out[key] = right[key] elif right[key] is None: out[key] = left[key] elif _not_equal(left[key], right[key]): if metadata_conflicts == "warn": warnings.warn( warn_str_func(key, left[key], right[key]), MergeConflictWarning, ) elif metadata_conflicts == "error": raise MergeConflictError( error_str_func(key, left[key], right[key]) ) elif metadata_conflicts != "silent": raise ValueError( "metadata_conflicts argument must be one " 'of "silent", "warn", or "error"' ) out[key] = right[key] else: out[key] = right[key] return out
Use numpy to find the common dtype for a list of ndarrays. Only allow arrays within the following fundamental numpy data types: ``np.bool_``, ``np.object_``, ``np.number``, ``np.character``, ``np.void`` Parameters ---------- arrs : list of ndarray Arrays for which to find the common dtype Returns ------- dtype_str : str String representation of dytpe (dtype ``str`` attribute)
def common_dtype(arrs): """ Use numpy to find the common dtype for a list of ndarrays. Only allow arrays within the following fundamental numpy data types: ``np.bool_``, ``np.object_``, ``np.number``, ``np.character``, ``np.void`` Parameters ---------- arrs : list of ndarray Arrays for which to find the common dtype Returns ------- dtype_str : str String representation of dytpe (dtype ``str`` attribute) """ np_types = (np.bool_, np.object_, np.number, np.character, np.void) uniq_types = { tuple(issubclass(dtype(arr).type, np_type) for np_type in np_types) for arr in arrs } if len(uniq_types) > 1: # Embed into the exception the actual list of incompatible types. incompat_types = [dtype(arr).name for arr in arrs] tme = MergeConflictError(f"Arrays have incompatible types {incompat_types}") tme._incompat_types = incompat_types raise tme arrs = [np.empty(1, dtype=dtype(arr)) for arr in arrs] # For string-type arrays need to explicitly fill in non-zero # values or the final arr_common = .. step is unpredictable. for i, arr in enumerate(arrs): if arr.dtype.kind in ("S", "U"): arrs[i] = [ ("0" if arr.dtype.kind == "U" else b"0") * dtype_bytes_or_chars(arr.dtype) ] arr_common = np.array([arr[0] for arr in arrs]) return ( arr_common.dtype.str if arr_common.dtype.names is None else arr_common.dtype.descr )
Test the default_factory argument to MetaData.
def test_metadata_default_factory(): """Test the default_factory argument to MetaData.""" class ExampleData: meta = MetaData(default_factory=defaultdict) def __init__(self, meta=None): self.meta = meta data = ExampleData() assert isinstance(data.meta, defaultdict) assert len(data.meta) == 0
Regression test for issue #3294. Ensure that an exception is raised when a metadata conflict exists and ``metadata_conflicts='error'`` has been set.
def test_metadata_merging_conflict_exception(): """Regression test for issue #3294. Ensure that an exception is raised when a metadata conflict exists and ``metadata_conflicts='error'`` has been set. """ data1 = ExampleData() data2 = ExampleData() data1.meta["somekey"] = {"x": 1, "y": 1} data2.meta["somekey"] = {"x": 1, "y": 999} with pytest.raises(MergeConflictError): merge(data1.meta, data2.meta, metadata_conflicts="error")
Tests that a function made with ``make_function_with_signature`` is give the correct line number into the module it was created from (i.e. the line ``make_function_with_signature`` was called from).
def test_make_function_with_signature_lineno(): """ Tests that a function made with ``make_function_with_signature`` is give the correct line number into the module it was created from (i.e. the line ``make_function_with_signature`` was called from). """ def crashy_function(*args, **kwargs): 1 / 0 # Make a wrapper around this function with the signature: # crashy_function(a, b) # Note: the signature is not really relevant to this test wrapped = make_function_with_signature(crashy_function, ("a", "b")) line = """ wrapped = make_function_with_signature(crashy_function, ('a', 'b')) """.strip() try: wrapped(1, 2) except Exception: exc_cls, exc, tb = sys.exc_info() assert exc_cls is ZeroDivisionError # The *last* line in the traceback should be the 1 / 0 line in # crashy_function; the next line up should be the line that the # make_function_with_signature call was one tb_lines = traceback.format_tb(tb) assert "1 / 0" in tb_lines[-1] else: pytest.fail("This should have caused an exception")
Regression test for #1760 Ensures that the spinner can fall go into fallback mode when using the unicode spinner on a terminal whose default encoding cannot encode the unicode characters.
def test_spinner_non_unicode_console(): """Regression test for #1760 Ensures that the spinner can fall go into fallback mode when using the unicode spinner on a terminal whose default encoding cannot encode the unicode characters. """ stream = FakeTTY("ascii") chars = console.Spinner._default_unicode_chars with console.Spinner("Reticulating splines", file=stream, chars=chars) as s: next(s)
Hammer download_file with multiple threaded requests. The goal is to stress-test the locking system. Normal parallel downloading also does this but coverage tools lose track of which paths are explored.
def test_download_file_threaded_many(temp_cache, valid_urls): """Hammer download_file with multiple threaded requests. The goal is to stress-test the locking system. Normal parallel downloading also does this but coverage tools lose track of which paths are explored. """ urls = list(islice(valid_urls, N_THREAD_HAMMER)) with ThreadPoolExecutor(max_workers=len(urls)) as P: r = list(P.map(lambda u: download_file(u, cache=True), [u for (u, c) in urls])) check_download_cache() assert len(r) == len(urls) for r_, (u, c) in zip(r, urls): assert get_file_contents(r_) == c
Demonstrate urllib's segfault.
def test_threaded_segfault(valid_urls): """Demonstrate urllib's segfault.""" def slurp_url(u): with urllib.request.urlopen(u) as remote: block = True while block: block = remote.read(1024) urls = list(islice(valid_urls, N_THREAD_HAMMER)) with ThreadPoolExecutor(max_workers=len(urls)) as P: list(P.map(lambda u: slurp_url(u), [u for (u, c) in urls]))
Hammer download_file with multiple threaded requests. Because some of these requests fail, the locking context manager is exercised with exceptions as well as success returns. I do not expect many surprises from the threaded version, but the process version gave trouble here.
def test_download_file_threaded_many_partial_success( temp_cache, valid_urls, invalid_urls ): """Hammer download_file with multiple threaded requests. Because some of these requests fail, the locking context manager is exercised with exceptions as well as success returns. I do not expect many surprises from the threaded version, but the process version gave trouble here. """ urls = [] contents = {} for (u, c), i in islice(zip(valid_urls, invalid_urls), N_THREAD_HAMMER): urls.append(u) contents[u] = c urls.append(i) def get(u): try: return download_file(u, cache=True) except OSError: return None with ThreadPoolExecutor(max_workers=len(urls)) as P: r = list(P.map(get, urls)) check_download_cache() assert len(r) == len(urls) for r_, u in zip(r, urls): if u in contents: assert get_file_contents(r_) == contents[u] else: assert r_ is None
Check that files with the same hash don't confuse the storage.
def test_clear_download_multiple_references_doesnt_corrupt_storage( temp_cache, tmp_path ): """Check that files with the same hash don't confuse the storage.""" content = "Test data; doesn't matter much.\n" def make_url(): with NamedTemporaryFile("w", dir=tmp_path, delete=False) as f: f.write(content) url = url_to(f.name) clear_download_cache(url) filename = download_file(url, cache=True) return url, filename a_url, a_filename = make_url() clear_download_cache(a_filename) assert not is_url_in_cache(a_url) f_url, f_filename = make_url() g_url, g_filename = make_url() assert f_url != g_url assert is_url_in_cache(f_url) assert is_url_in_cache(g_url) clear_download_cache(f_url) assert not is_url_in_cache(f_url) assert is_url_in_cache(g_url) assert os.path.exists( g_filename ), "Contents should not be deleted while a reference exists" clear_download_cache(g_url) assert not os.path.exists( g_filename ), "No reference exists any more, file should be deleted"
Confirm that downloading a local file does not delete it. When implemented with urlretrieve (rather than urlopen) local files are not copied to create temporaries, so importing them to the cache deleted the original from wherever it was in the filesystem. I lost some built-in astropy data.
def test_download_file_local_cache_survives(tmp_path, temp_cache, use_cache): """Confirm that downloading a local file does not delete it. When implemented with urlretrieve (rather than urlopen) local files are not copied to create temporaries, so importing them to the cache deleted the original from wherever it was in the filesystem. I lost some built-in astropy data. """ fn = tmp_path / "file" contents = "some text" with open(fn, "w") as f: f.write(contents) u = url_to(fn) f = download_file(u, cache=use_cache) assert fn not in _tempfilestodel, "File should not be deleted!" assert os.path.isfile(fn), "File should not be deleted!" assert get_file_contents(f) == contents
Tests for https://github.com/astropy/astropy/pull/10434
def test_download_certificate_verification_failed(): """Tests for https://github.com/astropy/astropy/pull/10434""" # First test the expected exception when download fails due to a # certificate verification error; we simulate this by passing a bogus # CA directory to the ssl_context argument ssl_context = {"cafile": None, "capath": "/does/not/exist"} msg = f"Verification of TLS/SSL certificate at {TESTURL_SSL} failed" with pytest.raises(urllib.error.URLError, match=msg): download_file(TESTURL_SSL, cache=False, ssl_context=ssl_context) with pytest.warns(AstropyWarning, match=msg) as warning_lines: fnout = download_file( TESTURL_SSL, cache=False, ssl_context=ssl_context, allow_insecure=True ) assert len(warning_lines) == 1 assert os.path.isfile(fnout)
Check that a partially successful download works. Even in the presence of many requested URLs, presumably hitting all the parallelism this system can manage, a download failure leads to a tidy shutdown.
def test_download_parallel_partial_success(temp_cache, valid_urls, invalid_urls): """Check that a partially successful download works. Even in the presence of many requested URLs, presumably hitting all the parallelism this system can manage, a download failure leads to a tidy shutdown. """ td = list(islice(valid_urls, N_PARALLEL_HAMMER)) u_bad = next(invalid_urls) with pytest.raises(urllib.request.URLError): download_files_in_parallel([u_bad] + [u for (u, c) in td])
Check that a partially successful parallel download leaves the cache unlocked. This needs to be repeated many times because race conditions are what cause this sort of thing, especially situations where a process might be forcibly shut down while it holds the lock.
def test_download_parallel_partial_success_lock_safe( temp_cache, valid_urls, invalid_urls ): """Check that a partially successful parallel download leaves the cache unlocked. This needs to be repeated many times because race conditions are what cause this sort of thing, especially situations where a process might be forcibly shut down while it holds the lock. """ s = random.getstate() try: random.seed(0) for _ in range(N_PARALLEL_HAMMER): td = list(islice(valid_urls, FEW)) u_bad = next(invalid_urls) urls = [u_bad] + [u for (u, c) in td] random.shuffle(urls) with pytest.raises(urllib.request.URLError): download_files_in_parallel(urls) finally: random.setstate(s)
Test can create a file path to an invalid file.
def test_get_invalid(package): """Test can create a file path to an invalid file.""" path = get_pkg_data_path("kjfrhgjkla", "hgiulrhgiu", package=package) assert not os.path.isfile(path) assert not os.path.isdir(path)
Regression test for issue #1256 Tests that `get_pkg_data_filename` works in a third-party package that doesn't make any relative imports from the module it's used from. Uses a test package under ``data/test_package``.
def test_data_name_third_party_package(): """Regression test for issue #1256 Tests that `get_pkg_data_filename` works in a third-party package that doesn't make any relative imports from the module it's used from. Uses a test package under ``data/test_package``. """ # Get the actual data dir: data_dir = os.path.join(os.path.dirname(__file__), "data") sys.path.insert(0, data_dir) try: import test_package filename = test_package.get_data_filename() assert os.path.normcase(filename) == ( os.path.normcase(os.path.join(data_dir, "test_package", "data", "foo.txt")) ) finally: sys.path.pop(0)
Tests to make sure the default behavior when the cache directory can't be located is correct
def test_data_noastropy_fallback(monkeypatch): """ Tests to make sure the default behavior when the cache directory can't be located is correct """ # better yet, set the configuration to make sure the temp files are deleted conf.delete_temporary_downloads_at_exit = True # make sure the config and cache directories are not searched monkeypatch.setenv("XDG_CONFIG_HOME", "foo") monkeypatch.delenv("XDG_CONFIG_HOME") monkeypatch.setenv("XDG_CACHE_HOME", "bar") monkeypatch.delenv("XDG_CACHE_HOME") monkeypatch.setattr(paths.set_temp_config, "_temp_path", None) monkeypatch.setattr(paths.set_temp_cache, "_temp_path", None) # make sure the _find_or_create_astropy_dir function fails as though the # astropy dir could not be accessed def osraiser(dirnm, linkto, pkgname=None): raise OSError() monkeypatch.setattr(paths, "_find_or_create_root_dir", osraiser) with pytest.raises(OSError): # make sure the config dir search fails paths.get_cache_dir(rootname="astropy") with pytest.warns(CacheMissingWarning) as warning_lines: fnout = download_file(TESTURL, cache=True) n_warns = len(warning_lines) partial_warn_msgs = ["remote data cache could not be accessed", "temporary file"] if n_warns == 4: partial_warn_msgs.extend(["socket", "socket"]) for wl in warning_lines: cur_w = str(wl).lower() for i, partial_msg in enumerate(partial_warn_msgs): if partial_msg in cur_w: del partial_warn_msgs[i] break assert ( len(partial_warn_msgs) == 0 ), f"Got some unexpected warnings: {partial_warn_msgs}" assert n_warns in (2, 4), f"Expected 2 or 4 warnings, got {n_warns}" assert os.path.isfile(fnout) # clearing the cache should be a no-up that doesn't affect fnout with pytest.warns(CacheMissingWarning) as record: clear_download_cache(TESTURL) assert len(record) == 2 assert ( record[0].message.args[0] == "Remote data cache could not be accessed due to OSError" ) assert "Not clearing data cache - cache inaccessible" in record[1].message.args[0] assert os.path.isfile(fnout) # now remove it so tests don't clutter up the temp dir this should get # called at exit, anyway, but we do it here just to make sure it's working # correctly _deltemps() assert not os.path.isfile(fnout) # now try with no cache fnnocache = download_file(TESTURL, cache=False) with open(fnnocache, "rb") as page: assert page.read().decode("utf-8").find("Astropy") > -1
checks that download_file gives a URLError and not an AttributeError, as its code pathway involves some fiddling with the exception.
def test_invalid_location_download_raises_urlerror(): """ checks that download_file gives a URLError and not an AttributeError, as its code pathway involves some fiddling with the exception. """ with pytest.raises(urllib.error.URLError): download_file("http://www.astropy.org/nonexistentfile")
checks that download_file gives an OSError if the socket is blocked
def test_invalid_location_download_noconnect(): """ checks that download_file gives an OSError if the socket is blocked """ # This should invoke socket's monkeypatched failure with pytest.raises(OSError): download_file("http://astropy.org/nonexistentfile")
Make sure we get a URLError rather than OSError even if it's a local directory.
def test_download_file_local_directory(tmp_path): """Make sure we get a URLError rather than OSError even if it's a local directory.""" with pytest.raises(urllib.request.URLError): download_file(url_to(tmp_path))
Ensure that bogus cache settings are handled sensibly. Because the user can specify the cache location in a config file, and because they might try to deduce the location by looking around at what's in their directory tree, and because the cache directory is actual several tree levels down from the directory set in the config file, it's important to check what happens if each of the steps in the path is wrong somehow.
def test_cache_dir_is_actually_a_file(tmp_path, valid_urls): """Ensure that bogus cache settings are handled sensibly. Because the user can specify the cache location in a config file, and because they might try to deduce the location by looking around at what's in their directory tree, and because the cache directory is actual several tree levels down from the directory set in the config file, it's important to check what happens if each of the steps in the path is wrong somehow. """ def check_quietly_ignores_bogus_cache(): """We want a broken cache to produce a warning but then astropy should act like there isn't a cache. """ with pytest.warns(CacheMissingWarning): assert not get_cached_urls() with pytest.warns(CacheMissingWarning): assert not is_url_in_cache("http://www.example.com/") with pytest.warns(CacheMissingWarning): assert not cache_contents() with pytest.warns(CacheMissingWarning): u, c = next(valid_urls) r = download_file(u, cache=True) assert get_file_contents(r) == c # check the filename r appears in a warning message? # check r is added to the delete_at_exit list? # in fact should there be testing of the delete_at_exit mechanism, # as far as that is possible? with pytest.warns(CacheMissingWarning): assert not is_url_in_cache(u) with pytest.warns(CacheMissingWarning): with pytest.raises(OSError): check_download_cache() dldir = _get_download_cache_loc() # set_temp_cache acts weird if it is pointed at a file (see below) # but we want to see what happens when the cache is pointed # at a file instead of a directory, so make a directory we can # replace later. fn = tmp_path / "file" ct = "contents\n" os.mkdir(fn) with paths.set_temp_cache(fn): shutil.rmtree(fn) with open(fn, "w") as f: f.write(ct) with pytest.raises(OSError): paths.get_cache_dir() check_quietly_ignores_bogus_cache() assert dldir == _get_download_cache_loc() assert get_file_contents(fn) == ct, "File should not be harmed." # See what happens when set_temp_cache is pointed at a file with pytest.raises(OSError): with paths.set_temp_cache(fn): pass assert dldir == _get_download_cache_loc() assert get_file_contents(str(fn)) == ct # Now the cache directory is normal but the subdirectory it wants # to make is a file cd = tmp_path / "astropy" with open(cd, "w") as f: f.write(ct) with paths.set_temp_cache(tmp_path): check_quietly_ignores_bogus_cache() assert dldir == _get_download_cache_loc() assert get_file_contents(cd) == ct os.remove(cd) # Ditto one level deeper os.makedirs(cd) cd = tmp_path / "astropy" / "download" with open(cd, "w") as f: f.write(ct) with paths.set_temp_cache(tmp_path): check_quietly_ignores_bogus_cache() assert dldir == _get_download_cache_loc() assert get_file_contents(cd) == ct os.remove(cd) # Ditto another level deeper os.makedirs(cd) cd = tmp_path / "astropy" / "download" / "url" with open(cd, "w") as f: f.write(ct) with paths.set_temp_cache(tmp_path): check_quietly_ignores_bogus_cache() assert dldir == _get_download_cache_loc() assert get_file_contents(cd) == ct os.remove(cd)
checks that get_readable_fileobj leaves no temporary files behind
def test_get_readable_fileobj_cleans_up_temporary_files(tmp_path, monkeypatch): """checks that get_readable_fileobj leaves no temporary files behind""" # Create a 'file://' URL pointing to a path on the local filesystem url = url_to(TESTLOCAL) # Save temporary files to a known location monkeypatch.setattr(tempfile, "tempdir", str(tmp_path)) # Call get_readable_fileobj() as a context manager with get_readable_fileobj(url) as f: f.read() # Get listing of files in temporary directory tempdir_listing = list(tmp_path.iterdir()) # Assert that the temporary file was empty after get_readable_fileobj() # context manager finished running assert len(tempdir_listing) == 0
Ensure fileobj state is as expected when get_readable_fileobj() is called inside another get_readable_fileobj().
def test_nested_get_readable_fileobj(): """Ensure fileobj state is as expected when get_readable_fileobj() is called inside another get_readable_fileobj(). """ with get_readable_fileobj(TESTLOCAL, encoding="binary") as fileobj: with get_readable_fileobj(fileobj, encoding="UTF-8") as fileobj2: fileobj2.seek(1) fileobj.seek(1) # Theoretically, fileobj2 should be closed already here but it is not. # See https://github.com/astropy/astropy/pull/8675. # UNCOMMENT THIS WHEN PYTHON FINALLY LETS IT HAPPEN. # assert fileobj2.closed assert fileobj.closed and fileobj2.closed
Test that download automatically enables TLS/SSL when required
def test_ftp_tls_auto(temp_cache): """Test that download automatically enables TLS/SSL when required""" url = "ftp://anonymous:mail%[email protected]/pub/products/iers/finals2000A.daily" download_file(url)
Test that dtype_info_name is giving the expected output Here the available types:: 'b' boolean 'i' (signed) integer 'u' unsigned integer 'f' floating-point 'c' complex-floating point 'O' (Python) objects 'S', 'a' (byte-)string 'U' Unicode 'V' raw data (void)
def test_dtype_info_name(input, output): """ Test that dtype_info_name is giving the expected output Here the available types:: 'b' boolean 'i' (signed) integer 'u' unsigned integer 'f' floating-point 'c' complex-floating point 'O' (Python) objects 'S', 'a' (byte-)string 'U' Unicode 'V' raw data (void) """ assert dtype_info_name(input) == output
Test that getting a single item from Table column object does not copy info. See #10889.
def test_info_no_copy_numpy(): """Test that getting a single item from Table column object does not copy info. See #10889. """ col = [1, 2] t = QTable([col], names=["col"]) t.add_index("col") val = t["col"][0] # Returns a numpy scalar (e.g. np.float64) with no .info assert isinstance(val, np.number) with pytest.raises(AttributeError): val.info val = t["col"][:] assert val.info.indices == []
Test that getting a single item from Table column object does not copy info. See #10889.
def test_info_no_copy_mixin_with_index(col): """Test that getting a single item from Table column object does not copy info. See #10889. """ t = QTable([col], names=["col"]) t.add_index("col") val = t["col"][0] assert "info" not in val.__dict__ assert val.info.indices == [] val = t["col"][:] assert "info" in val.__dict__ assert val.info.indices == [] val = t[:]["col"] assert "info" in val.__dict__ assert isinstance(val.info.indices[0], SlicedIndex)
Test that getting a single item from Table SkyCoord column object does not copy info. Cannot create an index on a SkyCoord currently.
def test_info_no_copy_skycoord(): """Test that getting a single item from Table SkyCoord column object does not copy info. Cannot create an index on a SkyCoord currently. """ col = (SkyCoord([1, 2], [1, 2], unit="deg"),) t = QTable([col], names=["col"]) val = t["col"][0] assert "info" not in val.__dict__ assert val.info.indices == [] val = t["col"][:] assert val.info.indices == [] val = t[:]["col"] assert val.info.indices == []
Test that a class with __new__ method still works even if it accepts additional arguments. This previously failed because the deprecated decorator would wrap objects __init__ which takes no arguments.
def test_deprecated_class_with_new_method(): """ Test that a class with __new__ method still works even if it accepts additional arguments. This previously failed because the deprecated decorator would wrap objects __init__ which takes no arguments. """ @deprecated("1.0") class A: def __new__(cls, a): return super().__new__(cls) # Creating an instance should work but raise a DeprecationWarning with pytest.warns(AstropyDeprecationWarning) as w: A(1) assert len(w) == 1 @deprecated("1.0") class B: def __new__(cls, a): return super().__new__(cls) def __init__(self, a): pass # Creating an instance should work but raise a DeprecationWarning with pytest.warns(AstropyDeprecationWarning) as w: B(1) assert len(w) == 1
Regression test for an issue where classes that used ``super()`` in their ``__init__`` did not actually call the correct class's ``__init__`` in the MRO.
def test_deprecated_class_with_super(): """ Regression test for an issue where classes that used ``super()`` in their ``__init__`` did not actually call the correct class's ``__init__`` in the MRO. """ @deprecated("100.0") class TB: def __init__(self, a, b): super().__init__() with pytest.warns(AstropyDeprecationWarning) as w: TB(1, 2) assert len(w) == 1 if TB.__doc__ is not None: assert "function" not in TB.__doc__ assert "deprecated" in TB.__doc__ assert "function" not in TB.__init__.__doc__ assert "deprecated" in TB.__init__.__doc__
Regression test for an issue where deprecating a class with a metaclass other than type did not restore the metaclass properly.
def test_deprecated_class_with_custom_metaclass(): """ Regression test for an issue where deprecating a class with a metaclass other than type did not restore the metaclass properly. """ with pytest.warns(AstropyDeprecationWarning) as w: TB() assert len(w) == 1 assert type(TB) is TMeta assert TB.metaclass_attr == 1
Regression test for issue introduced by https://github.com/astropy/astropy/pull/2811 and mentioned also here: https://github.com/astropy/astropy/pull/2580#issuecomment-51049969 where it appears that deprecated staticmethods didn't work on Python 2.6.
def test_deprecated_static_and_classmethod(): """ Regression test for issue introduced by https://github.com/astropy/astropy/pull/2811 and mentioned also here: https://github.com/astropy/astropy/pull/2580#issuecomment-51049969 where it appears that deprecated staticmethods didn't work on Python 2.6. """ class A: """Docstring""" @deprecated("1.0") @staticmethod def B(): pass @deprecated("1.0") @classmethod def C(cls): pass with pytest.warns(AstropyDeprecationWarning) as w: A.B() assert len(w) == 1 if A.__doc__ is not None: assert "deprecated" in A.B.__doc__ # Test that the ``__deprecated__`` attribute is set # See https://peps.python.org/pep-0702/ for more information assert ( A.B.__deprecated__ == "The B method is deprecated and may be removed in a future version." ) # And that it is not set on the original function which doesn't have # the deprecation warning. assert not hasattr(A.B.__wrapped__, "__deprecated__") with pytest.warns(AstropyDeprecationWarning) as w: A.C() assert len(w) == 1 if A.__doc__ is not None: assert "deprecated" in A.C.__doc__ # Test that the ``__deprecated__`` attribute is set # See https://peps.python.org/pep-0702/ for more information assert ( A.C.__deprecated__ == "The C method is deprecated and may be removed in a future version." ) # And that it is not set on the original function which doesn't have # the deprecation warning. assert not hasattr(A.C.__wrapped__, "__deprecated__")
Regression test for an issue where sharedmethod would bind to one class for all time, causing the same method not to work properly on other subclasses of that class. It has the same problem when the same sharedmethod is called on different instances of some class as well.
def test_sharedmethod_reuse_on_subclasses(): """ Regression test for an issue where sharedmethod would bind to one class for all time, causing the same method not to work properly on other subclasses of that class. It has the same problem when the same sharedmethod is called on different instances of some class as well. """ class AMeta(type): def foo(cls): return cls.x class A: x = 3 def __init__(self, x): self.x = x @sharedmethod def foo(self): return self.x a1 = A(1) a2 = A(2) assert a1.foo() == 1 assert a2.foo() == 2 # Similar test now, but for multiple subclasses using the same sharedmethod # as a classmethod assert A.foo() == 3 class B(A): x = 5 assert B.foo() == 5
Tests that the docstring is set correctly on classproperties. This failed previously due to a bug in Python that didn't always set __doc__ properly on instances of property subclasses.
def test_classproperty_docstring(): """ Tests that the docstring is set correctly on classproperties. This failed previously due to a bug in Python that didn't always set __doc__ properly on instances of property subclasses. """ class A: # Inherits docstring from getter @classproperty def foo(cls): """The foo.""" return 1 assert A.__dict__["foo"].__doc__ == "The foo." class B: # Use doc passed to classproperty constructor def _get_foo(cls): return 1 foo = classproperty(_get_foo, doc="The foo.") assert B.__dict__["foo"].__doc__ == "The foo."
Test that a class property with lazy=True is thread-safe.
def test_classproperty_lazy_threadsafe(fast_thread_switching): """ Test that a class property with lazy=True is thread-safe. """ workers = 8 with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as executor: # This is testing for race conditions, so try many times in the # hope that we'll get the timing right. for p in range(10000): class A: @classproperty(lazy=True) def foo(cls): nonlocal calls calls += 1 return object() # Have all worker threads query in parallel calls = 0 futures = [executor.submit(lambda: A.foo) for i in range(workers)] # Check that only one call happened and they all received it values = [future.result() for future in futures] assert calls == 1 assert values[0] is not None assert values == [values[0]] * workers
Test thread safety of lazyproperty.
def test_lazyproperty_threadsafe(fast_thread_switching): """ Test thread safety of lazyproperty. """ # This test is generally similar to test_classproperty_lazy_threadsafe # above. See there for comments. class A: def __init__(self): self.calls = 0 @lazyproperty def foo(self): self.calls += 1 return object() workers = 8 with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as executor: for p in range(10000): a = A() futures = [executor.submit(lambda: a.foo) for i in range(workers)] values = [future.result() for future in futures] assert a.calls == 1 assert a.foo is not None assert values == [a.foo] * workers
Regression test for https://github.com/spacetelescope/PyFITS/issues/21
def test_float_comparison(): """ Regression test for https://github.com/spacetelescope/PyFITS/issues/21 """ f = io.StringIO() a = np.float32(0.029751372) b = np.float32(0.029751368) identical = report_diff_values(a, b, fileobj=f) assert not identical out = f.getvalue() # This test doesn't care about what the exact output is, just that it # did show a difference in their text representations assert "a>" in out assert "b>" in out
Regression test for https://github.com/astropy/astropy/issues/4122
def test_diff_types(): """ Regression test for https://github.com/astropy/astropy/issues/4122 """ f = io.StringIO() a = 1.0 b = "1.0" identical = report_diff_values(a, b, fileobj=f) assert not identical assert f.getvalue().splitlines() == [ " (float) a> 1.0", " (str) b> '1.0'", " ? + +", ]
Test comparison of different numeric scalar types.
def test_diff_numeric_scalar_types(): """Test comparison of different numeric scalar types.""" f = io.StringIO() assert not report_diff_values(1.0, 1, fileobj=f) out = f.getvalue() assert out == " (float) a> 1.0\n (int) b> 1\n"
Test diff-ing two arrays.
def test_array_comparison(): """ Test diff-ing two arrays. """ f = io.StringIO() a = np.arange(9).reshape(3, 3) b = a + 1 identical = report_diff_values(a, b, fileobj=f) assert not identical out = f.getvalue() assert ( out == " at [0, 0]:\n" " a> 0\n" " b> 1\n" " at [0, 1]:\n" " a> 1\n" " b> 2\n" " at [0, 2]:\n" " a> 2\n" " b> 3\n" " ...and at 6 more indices.\n" )
Test diff-ing two differently shaped arrays.
def test_diff_shaped_array_comparison(): """ Test diff-ing two differently shaped arrays. """ f = io.StringIO() a = np.empty((1, 2, 3)) identical = report_diff_values(a, a[0], fileobj=f) assert not identical out = f.getvalue() assert ( out == " Different array shapes:\n a> (1, 2, 3)\n ? ---\n b> (2, 3)\n" )
Test diff-ing two simple Table objects.
def test_tablediff(): """ Test diff-ing two simple Table objects. """ a = Table.read( """name obs_date mag_b mag_v M31 2012-01-02 17.0 16.0 M82 2012-10-29 16.2 15.2 M101 2012-10-31 15.1 15.5""", format="ascii", ) b = Table.read( """name obs_date mag_b mag_v M31 2012-01-02 17.0 16.5 M82 2012-10-29 16.2 15.2 M101 2012-10-30 15.1 15.5 NEW 2018-05-08 nan 9.0""", format="ascii", ) f = io.StringIO() identical = report_diff_values(a, b, fileobj=f) assert not identical out = f.getvalue() assert ( out == " name obs_date mag_b mag_v\n" " ---- ---------- ----- -----\n" " a> M31 2012-01-02 17.0 16.0\n" " ? ^\n" " b> M31 2012-01-02 17.0 16.5\n" " ? ^\n" " M82 2012-10-29 16.2 15.2\n" " a> M101 2012-10-31 15.1 15.5\n" " ? ^\n" " b> M101 2012-10-30 15.1 15.5\n" " ? ^\n" " b> NEW 2018-05-08 nan 9.0\n" ) # Identical assert report_diff_values(a, a, fileobj=f)
Tests that the `find_current_module` function works. Note that this also implicitly tests compat.misc._patched_getmodule
def test_pkg_finder(): """ Tests that the `find_current_module` function works. Note that this also implicitly tests compat.misc._patched_getmodule """ mod1 = "astropy.utils.introspection" mod2 = "astropy.utils.tests.test_introspection" mod3 = "astropy.utils.tests.test_introspection" assert find_current_module(0).__name__ == mod1 assert find_current_module(1).__name__ == mod2 assert find_current_module(0, True).__name__ == mod3
Tests that the `find_current_module` function would work if used inside an application bundle. Since we can't test this directly, we test what would happen if inspect.getmodule returned `None`, which is what happens inside PyInstaller and py2app bundles.
def test_find_current_module_bundle(): """ Tests that the `find_current_module` function would work if used inside an application bundle. Since we can't test this directly, we test what would happen if inspect.getmodule returned `None`, which is what happens inside PyInstaller and py2app bundles. """ with mock.patch("inspect.getmodule", return_value=None): mod1 = "astropy.utils.introspection" mod2 = "astropy.utils.tests.test_introspection" mod3 = "astropy.utils.tests.test_introspection" assert find_current_module(0).__name__ == mod1 assert find_current_module(1).__name__ == mod2 assert find_current_module(0, True).__name__ == mod3
Docstring that's here just to check for -OO.
def _docstring_canary(): """Docstring that's here just to check for -OO."""
An identity function that jitters its execution time by a pseudo-random amount. FIXME: This function should be defined in test_console.py, but Astropy's test runner interacts strangely with Python's `multiprocessing` module. I was getting a mysterious PicklingError until I moved this function into a separate module. (It worked fine in a standalone pytest script.)
def func(i): """An identity function that jitters its execution time by a pseudo-random amount. FIXME: This function should be defined in test_console.py, but Astropy's test runner interacts strangely with Python's `multiprocessing` module. I was getting a mysterious PicklingError until I moved this function into a separate module. (It worked fine in a standalone pytest script.)""" with NumpyRNGContext(i): time.sleep(np.random.uniform(0, 0.01)) return i
Tests a ScienceState and spawned contexts.
def test_ScienceState_and_Context(): """ Tests a ScienceState and spawned contexts. """ class MyState(ScienceState): _value = "A" _state = dict(foo="bar") state = {"foo": "bar"} # test created ScienceState assert MyState.get() == "A" assert MyState.validate("B") == "B" assert MyState._state == state # test setting with MyState.set("B"): assert MyState.get() == "B" assert MyState.get() == "A"
Returns `True` if *ID* is a valid XML ID.
def check_id(ID): """ Returns `True` if *ID* is a valid XML ID. """ return re.match(r"^[A-Za-z_][A-Za-z0-9_\.\-]*$", ID) is not None
Given an arbitrary string, create one that can be used as an xml id. This is rather simplistic at the moment, since it just replaces non-valid characters with underscores.
def fix_id(ID): """ Given an arbitrary string, create one that can be used as an xml id. This is rather simplistic at the moment, since it just replaces non-valid characters with underscores. """ if re.match(r"^[A-Za-z_][A-Za-z0-9_\.\-]*$", ID): return ID if len(ID): corrected = ID if not len(corrected) or re.match("^[^A-Za-z_]$", corrected[0]): corrected = "_" + corrected corrected = re.sub(r"[^A-Za-z_]", "_", corrected[0]) + re.sub( r"[^A-Za-z0-9_\.\-]", "_", corrected[1:] ) return corrected return ""
Returns `True` if *token* is a valid XML token, as defined by XML Schema Part 2.
def check_token(token): """ Returns `True` if *token* is a valid XML token, as defined by XML Schema Part 2. """ return ( token == "" or re.match(r"[^\r\n\t ]?([^\r\n\t ]| [^\r\n\t ])*[^\r\n\t ]?$", token) is not None )
Returns `True` if *content_type* is a valid MIME content type (syntactically at least), as defined by RFC 2045.
def check_mime_content_type(content_type): """ Returns `True` if *content_type* is a valid MIME content type (syntactically at least), as defined by RFC 2045. """ ctrls = "".join(chr(x) for x in range(0x20)) token_regex = f'[^()<>@,;:\\"/[\\]?= {ctrls}\x7f]+' return ( re.match(rf"(?P<type>{token_regex})/(?P<subtype>{token_regex})$", content_type) is not None )
Returns `True` if *uri* is a valid URI as defined in RFC 2396.
def check_anyuri(uri): """ Returns `True` if *uri* is a valid URI as defined in RFC 2396. """ if ( re.match( ( r"(([a-zA-Z][0-9a-zA-Z+\-\.]*:)?/{0,2}[0-9a-zA-Z;" r"/?:@&=+$\.\-_!~*'()%]+)?(#[0-9a-zA-Z;/?:@&=+$\.\-_!~*'()%]+)?" ), uri, ) is None ): return False try: urllib.parse.urlparse(uri) except Exception: return False return True
Returns a function suitable for streaming input, or a file object. This function is only useful if passing off to C code where: - If it's a real file object, we want to use it as a real C file object to avoid the Python overhead. - If it's not a real file object, it's much handier to just have a Python function to call. This is somewhat quirky behavior, of course, which is why it is private. For a more useful version of similar behavior, see `astropy.utils.misc.get_readable_fileobj`. Parameters ---------- fd : object May be: - a file object. If the file is uncompressed, this raw file object is returned verbatim. Otherwise, the read method is returned. - a function that reads from a stream, in which case it is returned verbatim. - a file path, in which case it is opened. Again, like a file object, if it's uncompressed, a raw file object is returned, otherwise its read method. - an object with a :meth:`read` method, in which case that method is returned. Returns ------- fd : context-dependent See above.
def _convert_to_fd_or_read_function(fd): """ Returns a function suitable for streaming input, or a file object. This function is only useful if passing off to C code where: - If it's a real file object, we want to use it as a real C file object to avoid the Python overhead. - If it's not a real file object, it's much handier to just have a Python function to call. This is somewhat quirky behavior, of course, which is why it is private. For a more useful version of similar behavior, see `astropy.utils.misc.get_readable_fileobj`. Parameters ---------- fd : object May be: - a file object. If the file is uncompressed, this raw file object is returned verbatim. Otherwise, the read method is returned. - a function that reads from a stream, in which case it is returned verbatim. - a file path, in which case it is opened. Again, like a file object, if it's uncompressed, a raw file object is returned, otherwise its read method. - an object with a :meth:`read` method, in which case that method is returned. Returns ------- fd : context-dependent See above. """ if callable(fd): yield fd return with data.get_readable_fileobj(fd, encoding="binary") as new_fd: if sys.platform.startswith("win"): yield new_fd.read else: if isinstance(new_fd, io.FileIO): yield new_fd else: yield new_fd.read
Returns an iterator over the elements of an XML file. The iterator doesn't ever build a tree, so it is much more memory and time efficient than the alternative in ``cElementTree``. Parameters ---------- source : path-like, :term:`file-like (readable)`, or callable Handle that contains the data or function that reads it. If a function or callable object, it must directly read from a stream. Non-callable objects must define a ``read`` method. Returns ------- parts : iterator The iterator returns 4-tuples (*start*, *tag*, *data*, *pos*): - *start*: when `True` is a start element event, otherwise an end element event. - *tag*: The name of the element - *data*: Depends on the value of *event*: - if *start* == `True`, data is a dictionary of attributes - if *start* == `False`, data is a string containing the text content of the element - *pos*: Tuple (*line*, *col*) indicating the source of the event.
def get_xml_iterator(source, _debug_python_based_parser=False): """ Returns an iterator over the elements of an XML file. The iterator doesn't ever build a tree, so it is much more memory and time efficient than the alternative in ``cElementTree``. Parameters ---------- source : path-like, :term:`file-like (readable)`, or callable Handle that contains the data or function that reads it. If a function or callable object, it must directly read from a stream. Non-callable objects must define a ``read`` method. Returns ------- parts : iterator The iterator returns 4-tuples (*start*, *tag*, *data*, *pos*): - *start*: when `True` is a start element event, otherwise an end element event. - *tag*: The name of the element - *data*: Depends on the value of *event*: - if *start* == `True`, data is a dictionary of attributes - if *start* == `False`, data is a string containing the text content of the element - *pos*: Tuple (*line*, *col*) indicating the source of the event. """ with _convert_to_fd_or_read_function(source) as fd: if _debug_python_based_parser: context = _slow_iterparse(fd) else: context = _fast_iterparse(fd) yield iter(context)
Determine the encoding of an XML file by reading its header. Parameters ---------- source : path-like, :term:`file-like (readable)`, or callable Handle that contains the data or function that reads it. If a function or callable object, it must directly read from a stream. Non-callable objects must define a ``read`` method. Returns ------- encoding : str
def get_xml_encoding(source): """ Determine the encoding of an XML file by reading its header. Parameters ---------- source : path-like, :term:`file-like (readable)`, or callable Handle that contains the data or function that reads it. If a function or callable object, it must directly read from a stream. Non-callable objects must define a ``read`` method. Returns ------- encoding : str """ with get_xml_iterator(source) as iterator: start, tag, data, pos = next(iterator) if not start or tag != "xml": raise OSError("Invalid XML file") # The XML spec says that no encoding === utf-8 return data.get("encoding") or "utf-8"
Get the lines from a given XML file. Correctly determines the encoding and always returns unicode. Parameters ---------- source : path-like, :term:`file-like (readable)`, or callable Handle that contains the data or function that reads it. If a function or callable object, it must directly read from a stream. Non-callable objects must define a ``read`` method. Returns ------- lines : list of unicode
def xml_readlines(source): """ Get the lines from a given XML file. Correctly determines the encoding and always returns unicode. Parameters ---------- source : path-like, :term:`file-like (readable)`, or callable Handle that contains the data or function that reads it. If a function or callable object, it must directly read from a stream. Non-callable objects must define a ``read`` method. Returns ------- lines : list of unicode """ encoding = get_xml_encoding(source) with data.get_readable_fileobj(source, encoding=encoding) as input: input.seek(0) xml_lines = input.readlines() return xml_lines
Recursively unescape a given URL. .. note:: '&amp;&amp;' becomes a single '&'. Parameters ---------- url : str or bytes URL to unescape. Returns ------- clean_url : str or bytes Unescaped URL.
def unescape_all(url): """Recursively unescape a given URL. .. note:: '&amp;&amp;' becomes a single '&'. Parameters ---------- url : str or bytes URL to unescape. Returns ------- clean_url : str or bytes Unescaped URL. """ if isinstance(url, bytes): func2use = _unescape_bytes keys2use = _bytes_keys else: func2use = _unescape_str keys2use = _str_keys clean_url = func2use(url) not_done = [clean_url.count(key) > 0 for key in keys2use] if True in not_done: return unescape_all(clean_url) else: return clean_url
Validates an XML file against a schema or DTD. Parameters ---------- filename : str The path to the XML file to validate schema_file : str The path to the XML schema or DTD Returns ------- returncode, stdout, stderr : int, str, str Returns the returncode from xmllint and the stdout and stderr as strings
def validate_schema(filename, schema_file): """ Validates an XML file against a schema or DTD. Parameters ---------- filename : str The path to the XML file to validate schema_file : str The path to the XML schema or DTD Returns ------- returncode, stdout, stderr : int, str, str Returns the returncode from xmllint and the stdout and stderr as strings """ base, ext = os.path.splitext(schema_file) if ext == ".xsd": schema_part = "--schema" elif ext == ".dtd": schema_part = "--dtdvalid" else: raise TypeError("schema_file must be a path to an XML Schema or DTD") p = subprocess.Popen( ["xmllint", "--noout", "--nonet", schema_part, schema_file, filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) stdout, stderr = p.communicate() if p.returncode == 127: raise OSError("xmllint not found, so can not validate schema") elif p.returncode < 0: from astropy.utils.misc import signal_number_to_name raise OSError( f"xmllint was terminated by signal '{signal_number_to_name(-p.returncode)}'" ) return p.returncode, stdout, stderr
Enhanced histogram function. This is a histogram function that enables the use of more sophisticated algorithms for determining bins. Aside from the ``bins`` argument allowing a string specified how bins are computed, the parameters are the same as matplotlib.pyplot.hist(). This function was ported from astroML: https://www.astroml.org/ Parameters ---------- x : array-like array of data to be histogrammed bins : int, list, or str, optional If bins is a string, then it must be one of: - 'blocks' : use bayesian blocks for dynamic bin widths - 'knuth' : use Knuth's rule to determine bins - 'scott' : use Scott's rule to determine bins - 'freedman' : use the Freedman-Diaconis rule to determine bins ax : `~matplotlib.axes.Axes` instance, optional Specify the Axes on which to draw the histogram. If not specified, then the current active axes will be used. max_bins : int, optional Maximum number of bins allowed. With more than a few thousand bins the performance of matplotlib will not be great. If the number of bins is large *and* the number of input data points is large then the it will take a very long time to compute the histogram. **kwargs : other keyword arguments are described in ``plt.hist()``. Notes ----- Return values are the same as for ``plt.hist()`` See Also -------- astropy.stats.histogram
def hist(x, bins=10, ax=None, max_bins=1e5, **kwargs): """Enhanced histogram function. This is a histogram function that enables the use of more sophisticated algorithms for determining bins. Aside from the ``bins`` argument allowing a string specified how bins are computed, the parameters are the same as matplotlib.pyplot.hist(). This function was ported from astroML: https://www.astroml.org/ Parameters ---------- x : array-like array of data to be histogrammed bins : int, list, or str, optional If bins is a string, then it must be one of: - 'blocks' : use bayesian blocks for dynamic bin widths - 'knuth' : use Knuth's rule to determine bins - 'scott' : use Scott's rule to determine bins - 'freedman' : use the Freedman-Diaconis rule to determine bins ax : `~matplotlib.axes.Axes` instance, optional Specify the Axes on which to draw the histogram. If not specified, then the current active axes will be used. max_bins : int, optional Maximum number of bins allowed. With more than a few thousand bins the performance of matplotlib will not be great. If the number of bins is large *and* the number of input data points is large then the it will take a very long time to compute the histogram. **kwargs : other keyword arguments are described in ``plt.hist()``. Notes ----- Return values are the same as for ``plt.hist()`` See Also -------- astropy.stats.histogram """ # Note that we only calculate the bin edges...matplotlib will calculate # the actual histogram. range = kwargs.get("range", None) weights = kwargs.get("weights", None) bins = calculate_bin_edges(x, bins, range=range, weights=weights) if len(bins) > max_bins: raise ValueError( "Histogram has too many bins: " f"{len(bins)}. Use max_bins to increase the number " "of allowed bins or range to restrict " "the histogram range." ) if ax is None: # optional dependency; only import if strictly needed. import matplotlib.pyplot as plt ax = plt.gca() return ax.hist(x, bins, **kwargs)
Return a naive total intensity from the red, blue, and green intensities. Parameters ---------- image_r : ndarray Intensity of image to be mapped to red; or total intensity if ``image_g`` and ``image_b`` are None. image_g : ndarray, optional Intensity of image to be mapped to green. image_b : ndarray, optional Intensity of image to be mapped to blue. Returns ------- intensity : ndarray Total intensity from the red, blue and green intensities, or ``image_r`` if green and blue images are not provided.
def compute_intensity(image_r, image_g=None, image_b=None): """ Return a naive total intensity from the red, blue, and green intensities. Parameters ---------- image_r : ndarray Intensity of image to be mapped to red; or total intensity if ``image_g`` and ``image_b`` are None. image_g : ndarray, optional Intensity of image to be mapped to green. image_b : ndarray, optional Intensity of image to be mapped to blue. Returns ------- intensity : ndarray Total intensity from the red, blue and green intensities, or ``image_r`` if green and blue images are not provided. """ if image_g is None or image_b is None: if not (image_g is None and image_b is None): raise ValueError( "please specify either a single image or red, green, and blue images." ) return image_r intensity = (image_r + image_g + image_b) / 3.0 # Repack into whatever type was passed to us return np.asarray(intensity, dtype=image_r.dtype)
Return a Red/Green/Blue color image from up to 3 images using an asinh stretch. The input images can be int or float, and in any range or bit-depth. For a more detailed look at the use of this method, see the document :ref:`astropy:astropy-visualization-rgb`. Parameters ---------- image_r : ndarray Image to map to red. image_g : ndarray Image to map to green. image_b : ndarray Image to map to blue. minimum : float Intensity that should be mapped to black (a scalar or array for R, G, B). stretch : float The linear stretch of the image. Q : float The asinh softening parameter. filename : str Write the resulting RGB image to a file (file type determined from extension). Returns ------- rgb : ndarray RGB (integer, 8-bits per channel) color image as an NxNx3 numpy array.
def make_lupton_rgb( image_r, image_g, image_b, minimum=0, stretch=5, Q=8, filename=None ): """ Return a Red/Green/Blue color image from up to 3 images using an asinh stretch. The input images can be int or float, and in any range or bit-depth. For a more detailed look at the use of this method, see the document :ref:`astropy:astropy-visualization-rgb`. Parameters ---------- image_r : ndarray Image to map to red. image_g : ndarray Image to map to green. image_b : ndarray Image to map to blue. minimum : float Intensity that should be mapped to black (a scalar or array for R, G, B). stretch : float The linear stretch of the image. Q : float The asinh softening parameter. filename : str Write the resulting RGB image to a file (file type determined from extension). Returns ------- rgb : ndarray RGB (integer, 8-bits per channel) color image as an NxNx3 numpy array. """ asinhMap = AsinhMapping(minimum, stretch, Q) rgb = asinhMap.make_rgb_image(image_r, image_g, image_b) if filename: import matplotlib.image matplotlib.image.imsave(filename, rgb, origin="lower") return rgb
Return a Normalization class that can be used for displaying images with Matplotlib. This function enables only a subset of image stretching functions available in `~astropy.visualization.mpl_normalize.ImageNormalize`. This function is used by the ``astropy.visualization.scripts.fits2bitmap`` script. Parameters ---------- data : ndarray The image array. stretch : {'linear', 'sqrt', 'power', log', 'asinh', 'sinh'}, optional The stretch function to apply to the image. The default is 'linear'. power : float, optional The power index for ``stretch='power'``. The default is 1.0. asinh_a : float, optional For ``stretch='asinh'``, the value where the asinh curve transitions from linear to logarithmic behavior, expressed as a fraction of the normalized image. Must be in the range between 0 and 1. The default is 0.1. vmin : float, optional The pixel value of the minimum cut level. Data values less than ``vmin`` will set to ``vmin`` before stretching the image. The default is the image minimum. ``vmin`` overrides ``min_percent``. vmax : float, optional The pixel value of the maximum cut level. Data values greater than ``vmax`` will set to ``vmax`` before stretching the image. The default is the image maximum. ``vmax`` overrides min_percent : float, optional The percentile value used to determine the pixel value of minimum cut level. The default is 0.0. ``min_percent`` overrides ``percent``. max_percent : float, optional The percentile value used to determine the pixel value of maximum cut level. The default is 100.0. ``max_percent`` overrides ``percent``. percent : float, optional The percentage of the image values used to determine the pixel values of the minimum and maximum cut levels. The lower cut level will set at the ``(100 - percent) / 2`` percentile, while the upper cut level will be set at the ``(100 + percent) / 2`` percentile. The default is 100.0. ``percent`` is ignored if either ``min_percent`` or ``max_percent`` is input. clip : bool, optional If `True`, data values outside the [0:1] range are clipped to the [0:1] range. log_a : float, optional The log index for ``stretch='log'``. The default is 1000. invalid : None or float, optional Value to assign NaN values generated by the normalization. NaNs in the input ``data`` array are not changed. For matplotlib normalization, the ``invalid`` value should map to the matplotlib colormap "under" value (i.e., any finite value < 0). If `None`, then NaN values are not replaced. This keyword has no effect if ``clip=True``. sinh_a : float, optional The scaling parameter for ``stretch='sinh'``. The default is 0.3. Returns ------- result : `ImageNormalize` instance An `ImageNormalize` instance that can be used for displaying images with Matplotlib.
def simple_norm( data, stretch="linear", power=1.0, asinh_a=0.1, vmin=None, vmax=None, min_percent=None, max_percent=None, percent=None, clip=False, log_a=1000, invalid=-1.0, sinh_a=0.3, ): """ Return a Normalization class that can be used for displaying images with Matplotlib. This function enables only a subset of image stretching functions available in `~astropy.visualization.mpl_normalize.ImageNormalize`. This function is used by the ``astropy.visualization.scripts.fits2bitmap`` script. Parameters ---------- data : ndarray The image array. stretch : {'linear', 'sqrt', 'power', log', 'asinh', 'sinh'}, optional The stretch function to apply to the image. The default is 'linear'. power : float, optional The power index for ``stretch='power'``. The default is 1.0. asinh_a : float, optional For ``stretch='asinh'``, the value where the asinh curve transitions from linear to logarithmic behavior, expressed as a fraction of the normalized image. Must be in the range between 0 and 1. The default is 0.1. vmin : float, optional The pixel value of the minimum cut level. Data values less than ``vmin`` will set to ``vmin`` before stretching the image. The default is the image minimum. ``vmin`` overrides ``min_percent``. vmax : float, optional The pixel value of the maximum cut level. Data values greater than ``vmax`` will set to ``vmax`` before stretching the image. The default is the image maximum. ``vmax`` overrides min_percent : float, optional The percentile value used to determine the pixel value of minimum cut level. The default is 0.0. ``min_percent`` overrides ``percent``. max_percent : float, optional The percentile value used to determine the pixel value of maximum cut level. The default is 100.0. ``max_percent`` overrides ``percent``. percent : float, optional The percentage of the image values used to determine the pixel values of the minimum and maximum cut levels. The lower cut level will set at the ``(100 - percent) / 2`` percentile, while the upper cut level will be set at the ``(100 + percent) / 2`` percentile. The default is 100.0. ``percent`` is ignored if either ``min_percent`` or ``max_percent`` is input. clip : bool, optional If `True`, data values outside the [0:1] range are clipped to the [0:1] range. log_a : float, optional The log index for ``stretch='log'``. The default is 1000. invalid : None or float, optional Value to assign NaN values generated by the normalization. NaNs in the input ``data`` array are not changed. For matplotlib normalization, the ``invalid`` value should map to the matplotlib colormap "under" value (i.e., any finite value < 0). If `None`, then NaN values are not replaced. This keyword has no effect if ``clip=True``. sinh_a : float, optional The scaling parameter for ``stretch='sinh'``. The default is 0.3. Returns ------- result : `ImageNormalize` instance An `ImageNormalize` instance that can be used for displaying images with Matplotlib. """ if percent is not None: interval = PercentileInterval(percent) elif min_percent is not None or max_percent is not None: interval = AsymmetricPercentileInterval( min_percent or 0.0, max_percent or 100.0 ) elif vmin is not None or vmax is not None: interval = ManualInterval(vmin, vmax) else: interval = MinMaxInterval() if stretch == "linear": stretch = LinearStretch() elif stretch == "sqrt": stretch = SqrtStretch() elif stretch == "power": stretch = PowerStretch(power) elif stretch == "log": stretch = LogStretch(log_a) elif stretch == "asinh": stretch = AsinhStretch(asinh_a) elif stretch == "sinh": stretch = SinhStretch(sinh_a) else: raise ValueError(f"Unknown stretch: {stretch}.") vmin, vmax = interval.get_limits(data) return ImageNormalize( vmin=vmin, vmax=vmax, stretch=stretch, clip=clip, invalid=invalid )
A convenience function to call matplotlib's `matplotlib.pyplot.imshow` function, using an `ImageNormalize` object as the normalization. Parameters ---------- data : 2D or 3D array-like The data to show. Can be whatever `~matplotlib.pyplot.imshow` and `ImageNormalize` both accept. See `~matplotlib.pyplot.imshow`. ax : None or `~matplotlib.axes.Axes`, optional If None, use pyplot's imshow. Otherwise, calls ``imshow`` method of the supplied axes. **kwargs : dict, optional All other keyword arguments are parsed first by the `ImageNormalize` initializer, then to `~matplotlib.pyplot.imshow`. Returns ------- result : tuple A tuple containing the `~matplotlib.image.AxesImage` generated by `~matplotlib.pyplot.imshow` as well as the `ImageNormalize` instance. Notes ----- The ``norm`` matplotlib keyword is not supported. Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.visualization import (imshow_norm, MinMaxInterval, SqrtStretch) # Generate and display a test image image = np.arange(65536).reshape((256, 256)) fig = plt.figure() ax = fig.add_subplot(1, 1, 1) im, norm = imshow_norm(image, ax, origin='lower', interval=MinMaxInterval(), stretch=SqrtStretch()) fig.colorbar(im)
def imshow_norm(data, ax=None, **kwargs): """A convenience function to call matplotlib's `matplotlib.pyplot.imshow` function, using an `ImageNormalize` object as the normalization. Parameters ---------- data : 2D or 3D array-like The data to show. Can be whatever `~matplotlib.pyplot.imshow` and `ImageNormalize` both accept. See `~matplotlib.pyplot.imshow`. ax : None or `~matplotlib.axes.Axes`, optional If None, use pyplot's imshow. Otherwise, calls ``imshow`` method of the supplied axes. **kwargs : dict, optional All other keyword arguments are parsed first by the `ImageNormalize` initializer, then to `~matplotlib.pyplot.imshow`. Returns ------- result : tuple A tuple containing the `~matplotlib.image.AxesImage` generated by `~matplotlib.pyplot.imshow` as well as the `ImageNormalize` instance. Notes ----- The ``norm`` matplotlib keyword is not supported. Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.visualization import (imshow_norm, MinMaxInterval, SqrtStretch) # Generate and display a test image image = np.arange(65536).reshape((256, 256)) fig = plt.figure() ax = fig.add_subplot(1, 1, 1) im, norm = imshow_norm(image, ax, origin='lower', interval=MinMaxInterval(), stretch=SqrtStretch()) fig.colorbar(im) """ if "X" in kwargs: raise ValueError("Cannot give both ``X`` and ``data``") if "norm" in kwargs: raise ValueError( "There is no point in using imshow_norm if you give " "the ``norm`` keyword - use imshow directly if you " "want that." ) imshow_kwargs = dict(kwargs) norm_kwargs = {"data": data} for pname in _norm_sig.parameters: if pname in kwargs: norm_kwargs[pname] = imshow_kwargs.pop(pname) imshow_kwargs["norm"] = ImageNormalize(**norm_kwargs) if ax is None: imshow_result = plt.imshow(data, **imshow_kwargs) else: imshow_result = ax.imshow(data, **imshow_kwargs) return imshow_result, imshow_kwargs["norm"]
Calculate the log base n of x.
def _logn(n, x, out=None): """Calculate the log base n of x.""" # We define this because numpy.emath.logn doesn't support the out # keyword. if out is None: return np.log(x) / np.log(n) else: np.log(x, out=out) np.true_divide(out, np.log(n), out=out) return out
Prepare the data by optionally clipping and copying, and return the array that should be subsequently used for in-place calculations.
def _prepare(values, clip=True, out=None): """ Prepare the data by optionally clipping and copying, and return the array that should be subsequently used for in-place calculations. """ if clip: return np.clip(values, 0.0, 1.0, out=out) else: if out is None: return np.array(values, copy=True) else: out[:] = np.asarray(values) return out
Enable support for plotting `astropy.time.Time` instances in matplotlib. May be (optionally) used with a ``with`` statement. >>> import matplotlib.pyplot as plt >>> from astropy import units as u >>> from astropy import visualization >>> with visualization.time_support(): # doctest: +IGNORE_OUTPUT ... plt.figure() ... plt.plot(Time(['2016-03-22T12:30:31', '2016-03-22T12:30:38', '2016-03-22T12:34:40'])) ... plt.draw() Parameters ---------- scale : str, optional The time scale to use for the times on the axis. If not specified, the scale of the first Time object passed to Matplotlib is used. format : str, optional The time format to use for the times on the axis. If not specified, the format of the first Time object passed to Matplotlib is used. simplify : bool, optional If possible, simplify labels, e.g. by removing 00:00:00.000 times from ISO strings if all labels fall on that time.
def time_support(*, scale=None, format=None, simplify=True): """ Enable support for plotting `astropy.time.Time` instances in matplotlib. May be (optionally) used with a ``with`` statement. >>> import matplotlib.pyplot as plt >>> from astropy import units as u >>> from astropy import visualization >>> with visualization.time_support(): # doctest: +IGNORE_OUTPUT ... plt.figure() ... plt.plot(Time(['2016-03-22T12:30:31', '2016-03-22T12:30:38', '2016-03-22T12:34:40'])) ... plt.draw() Parameters ---------- scale : str, optional The time scale to use for the times on the axis. If not specified, the scale of the first Time object passed to Matplotlib is used. format : str, optional The time format to use for the times on the axis. If not specified, the format of the first Time object passed to Matplotlib is used. simplify : bool, optional If possible, simplify labels, e.g. by removing 00:00:00.000 times from ISO strings if all labels fall on that time. """ import matplotlib.units as units from matplotlib.ticker import MaxNLocator, ScalarFormatter from astropy.visualization.wcsaxes.utils import select_step_hour, select_step_scalar class AstropyTimeLocator(MaxNLocator): # Note: we default to AutoLocator since many time formats # can just use this. def __init__(self, converter, *args, **kwargs): kwargs["nbins"] = 4 super().__init__(*args, **kwargs) self._converter = converter def tick_values(self, vmin, vmax): # Where we put the ticks depends on the format we are using if self._converter.format in YMDHMS_FORMATS: # If we are here, we need to check what the range of values # is and decide how to find tick locations accordingly vrange = vmax - vmin if ( self._converter.format != "yday" and vrange > 31 ) or vrange > 366: # greater than a month # We need to be careful here since not all years and months have # the same length # Start off by converting the values from the range to # datetime objects, so that we can easily extract the year and # month. tmin = Time( vmin, scale=self._converter.scale, format="mjd" ).datetime tmax = Time( vmax, scale=self._converter.scale, format="mjd" ).datetime # Find the range of years ymin = tmin.year ymax = tmax.year if ymax > ymin + 1: # greater than a year # Find the step we want to use ystep = int(select_step_scalar(max(1, (ymax - ymin) / 3))) ymin = ystep * (ymin // ystep) # Generate the years for these steps times = [] for year in range(ymin, ymax + 1, ystep): times.append(datetime(year=year, month=1, day=1)) else: # greater than a month but less than a year mmin = tmin.month mmax = tmax.month + 12 * (ymax - ymin) mstep = int(select_step_scalar(max(1, (mmax - mmin) / 3))) mmin = mstep * max(1, mmin // mstep) # Generate the months for these steps times = [] for month in range(mmin, mmax + 1, mstep): times.append( datetime( year=ymin + (month - 1) // 12, month=(month - 1) % 12 + 1, day=1, ) ) # Convert back to MJD values = Time(times, scale=self._converter.scale).mjd elif vrange > 1: # greater than a day self.set_params(steps=[1, 2, 5, 10]) values = super().tick_values(vmin, vmax) else: # Determine ideal step dv = (vmax - vmin) / 3 * 24 << u.hourangle # And round to nearest sensible value dv = select_step_hour(dv).to_value(u.hourangle) / 24 # Determine tick locations imin = np.ceil(vmin / dv) imax = np.floor(vmax / dv) values = np.arange(imin, imax + 1, dtype=np.int64) * dv else: values = super().tick_values(vmin, vmax) # Get rid of values outside of the input interval values = values[(values >= vmin) & (values <= vmax)] return values def __call__(self): vmin, vmax = self.axis.get_view_interval() return self.tick_values(vmin, vmax) class AstropyTimeFormatter(ScalarFormatter): def __init__(self, converter, *args, **kwargs): super().__init__(*args, **kwargs) self._converter = converter self.set_useOffset(False) self.set_scientific(False) def format_ticks(self, values): if len(values) == 0: return [] if self._converter.format in YMDHMS_FORMATS: times = Time(values, format="mjd", scale=self._converter.scale) formatted = getattr(times, self._converter.format) if self._converter.simplify: if self._converter.format in ("fits", "iso", "isot"): if all(x.endswith("00:00:00.000") for x in formatted): split = " " if self._converter.format == "iso" else "T" formatted = [x.split(split)[0] for x in formatted] elif self._converter.format == "yday": if all(x.endswith(":001:00:00:00.000") for x in formatted): formatted = [x.split(":", 1)[0] for x in formatted] return formatted elif self._converter.format == "byear_str": return Time( values, format="byear", scale=self._converter.scale ).byear_str elif self._converter.format == "jyear_str": return Time( values, format="jyear", scale=self._converter.scale ).jyear_str else: return super().format_ticks(values) class MplTimeConverter(units.ConversionInterface): def __init__(self, scale=None, format=None, simplify=None): super().__init__() self.format = format self.scale = scale self.simplify = simplify # Keep track of original converter in case the context manager is # used in a nested way. self._original_converter = units.registry.get(Time) units.registry[Time] = self @property def format(self): return self._format @format.setter def format(self, value): if value in UNSUPPORTED_FORMATS: raise ValueError(f"time_support does not support format={value}") self._format = value def __enter__(self): return self def __exit__(self, type, value, tb): if self._original_converter is None: del units.registry[Time] else: units.registry[Time] = self._original_converter def default_units(self, x, axis): if isinstance(x, tuple): x = x[0] if self.format is None: self.format = x.format if self.scale is None: self.scale = x.scale return "astropy_time" def convert(self, value, unit, axis): """ Convert a Time value to a scalar or array. """ scaled = getattr(value, self.scale) if self.format in YMDHMS_FORMATS: return scaled.mjd elif self.format == "byear_str": return scaled.byear elif self.format == "jyear_str": return scaled.jyear else: return getattr(scaled, self.format) def axisinfo(self, unit, axis): """ Return major and minor tick locators and formatters. """ majloc = AstropyTimeLocator(self) majfmt = AstropyTimeFormatter(self) return units.AxisInfo( majfmt=majfmt, majloc=majloc, label=f"Time ({self.scale})" ) return MplTimeConverter(scale=scale, format=format, simplify=simplify)
Enable support for plotting `astropy.units.Quantity` instances in matplotlib. May be (optionally) used with a ``with`` statement. >>> import matplotlib.pyplot as plt >>> from astropy import units as u >>> from astropy import visualization >>> with visualization.quantity_support(): ... plt.figure() ... plt.plot([1, 2, 3] * u.m) [...] ... plt.plot([101, 125, 150] * u.cm) [...] ... plt.draw() Parameters ---------- format : `astropy.units.format.Base` instance or str The name of a format or a formatter object. If not provided, defaults to ``latex_inline``.
def quantity_support(format="latex_inline"): """ Enable support for plotting `astropy.units.Quantity` instances in matplotlib. May be (optionally) used with a ``with`` statement. >>> import matplotlib.pyplot as plt >>> from astropy import units as u >>> from astropy import visualization >>> with visualization.quantity_support(): ... plt.figure() ... plt.plot([1, 2, 3] * u.m) [...] ... plt.plot([101, 125, 150] * u.cm) [...] ... plt.draw() Parameters ---------- format : `astropy.units.format.Base` instance or str The name of a format or a formatter object. If not provided, defaults to ``latex_inline``. """ from matplotlib import ticker, units from astropy import units as u def rad_fn(x, pos=None): n = int((x / np.pi) * 2.0 + 0.25) if n == 0: return "0" elif n == 1: return "π/2" elif n == 2: return "π" elif n % 2 == 0: return f"{n // 2}π" else: return f"{n}π/2" class MplQuantityConverter(units.ConversionInterface): def __init__(self): # Keep track of original converter in case the context manager is # used in a nested way. self._original_converter = {u.Quantity: units.registry.get(u.Quantity)} units.registry[u.Quantity] = self @staticmethod def axisinfo(unit, axis): if unit == u.radian: return units.AxisInfo( majloc=ticker.MultipleLocator(base=np.pi / 2), majfmt=ticker.FuncFormatter(rad_fn), label=unit.to_string(), ) elif unit == u.degree: return units.AxisInfo( majloc=ticker.AutoLocator(), majfmt=ticker.FormatStrFormatter("%g°"), label=unit.to_string(), ) elif unit is not None: return units.AxisInfo(label=unit.to_string(format)) return None @staticmethod def convert(val, unit, axis): if isinstance(val, u.Quantity): return val.to_value(unit) elif isinstance(val, list) and val and isinstance(val[0], u.Quantity): return [v.to_value(unit) for v in val] else: return val @staticmethod def default_units(x, axis): if hasattr(x, "unit"): return x.unit return None def __enter__(self): return self def __exit__(self, type, value, tb): if self._original_converter[u.Quantity] is None: del units.registry[u.Quantity] else: units.registry[u.Quantity] = self._original_converter[u.Quantity] return MplQuantityConverter()
Create a bitmap file from a FITS image, applying a stretching transform between minimum and maximum cut levels and a matplotlib colormap. Parameters ---------- filename : str The filename of the FITS file. ext : int FITS extension name or number of the image to convert. The default is 0. out_fn : str The filename of the output bitmap image. The type of bitmap is determined by the filename extension (e.g. '.jpg', '.png'). The default is a PNG file with the same name as the FITS file. stretch : {'linear', 'sqrt', 'power', log', 'asinh'} The stretching function to apply to the image. The default is 'linear'. power : float, optional The power index for ``stretch='power'``. The default is 1.0. asinh_a : float, optional For ``stretch='asinh'``, the value where the asinh curve transitions from linear to logarithmic behavior, expressed as a fraction of the normalized image. Must be in the range between 0 and 1. The default is 0.1. vmin : float, optional The pixel value of the minimum cut level. Data values less than ``vmin`` will set to ``vmin`` before stretching the image. The default is the image minimum. ``vmin`` overrides ``min_percent``. vmax : float, optional The pixel value of the maximum cut level. Data values greater than ``vmax`` will set to ``vmax`` before stretching the image. The default is the image maximum. ``vmax`` overrides ``max_percent``. min_percent : float, optional The percentile value used to determine the pixel value of minimum cut level. The default is 0.0. ``min_percent`` overrides ``percent``. max_percent : float, optional The percentile value used to determine the pixel value of maximum cut level. The default is 100.0. ``max_percent`` overrides ``percent``. percent : float, optional The percentage of the image values used to determine the pixel values of the minimum and maximum cut levels. The lower cut level will set at the ``(100 - percent) / 2`` percentile, while the upper cut level will be set at the ``(100 + percent) / 2`` percentile. The default is 100.0. ``percent`` is ignored if either ``min_percent`` or ``max_percent`` is input. cmap : str The matplotlib color map name. The default is 'Greys_r'.
def fits2bitmap( filename, ext=0, out_fn=None, stretch="linear", power=1.0, asinh_a=0.1, vmin=None, vmax=None, min_percent=None, max_percent=None, percent=None, cmap="Greys_r", ): """ Create a bitmap file from a FITS image, applying a stretching transform between minimum and maximum cut levels and a matplotlib colormap. Parameters ---------- filename : str The filename of the FITS file. ext : int FITS extension name or number of the image to convert. The default is 0. out_fn : str The filename of the output bitmap image. The type of bitmap is determined by the filename extension (e.g. '.jpg', '.png'). The default is a PNG file with the same name as the FITS file. stretch : {'linear', 'sqrt', 'power', log', 'asinh'} The stretching function to apply to the image. The default is 'linear'. power : float, optional The power index for ``stretch='power'``. The default is 1.0. asinh_a : float, optional For ``stretch='asinh'``, the value where the asinh curve transitions from linear to logarithmic behavior, expressed as a fraction of the normalized image. Must be in the range between 0 and 1. The default is 0.1. vmin : float, optional The pixel value of the minimum cut level. Data values less than ``vmin`` will set to ``vmin`` before stretching the image. The default is the image minimum. ``vmin`` overrides ``min_percent``. vmax : float, optional The pixel value of the maximum cut level. Data values greater than ``vmax`` will set to ``vmax`` before stretching the image. The default is the image maximum. ``vmax`` overrides ``max_percent``. min_percent : float, optional The percentile value used to determine the pixel value of minimum cut level. The default is 0.0. ``min_percent`` overrides ``percent``. max_percent : float, optional The percentile value used to determine the pixel value of maximum cut level. The default is 100.0. ``max_percent`` overrides ``percent``. percent : float, optional The percentage of the image values used to determine the pixel values of the minimum and maximum cut levels. The lower cut level will set at the ``(100 - percent) / 2`` percentile, while the upper cut level will be set at the ``(100 + percent) / 2`` percentile. The default is 100.0. ``percent`` is ignored if either ``min_percent`` or ``max_percent`` is input. cmap : str The matplotlib color map name. The default is 'Greys_r'. """ import matplotlib as mpl import matplotlib.image as mimg from astropy.utils.introspection import minversion # __main__ gives ext as a string try: ext = int(ext) except ValueError: pass try: image = getdata(filename, ext) except Exception as e: log.critical(e) return 1 if image.ndim != 2: log.critical(f"data in FITS extension {ext} is not a 2D array") if out_fn is None: out_fn = os.path.splitext(filename)[0] if out_fn.endswith(".fits"): out_fn = os.path.splitext(out_fn)[0] out_fn += ".png" # explicitly define the output format out_format = os.path.splitext(out_fn)[1][1:] try: if minversion(mpl, "3.5"): mpl.colormaps[cmap] else: from matplotlib import cm cm.get_cmap(cmap) except (ValueError, KeyError): log.critical(f"{cmap} is not a valid matplotlib colormap name.") return 1 norm = simple_norm( image, stretch=stretch, power=power, asinh_a=asinh_a, vmin=vmin, vmax=vmax, min_percent=min_percent, max_percent=max_percent, percent=percent, ) mimg.imsave(out_fn, norm(image), cmap=cmap, origin="lower", format=out_format) log.info(f"Saved file to {out_fn}.")
Regression test to ensure ZScaleInterval returns the minimum and maximum of the data if the number of data points is less than ``min_pixels``.
def test_zscale_npoints(): """ Regression test to ensure ZScaleInterval returns the minimum and maximum of the data if the number of data points is less than ``min_pixels``. """ data = np.arange(4).reshape((2, 2)) interval = ZScaleInterval(min_npixels=5) vmin, vmax = interval.get_limits(data) assert vmin == 0 assert vmax == 3
Test intervals with constant data (avoiding divide-by-zero).
def test_constant_data(): """Test intervals with constant data (avoiding divide-by-zero).""" shape = (10, 10) data = np.ones(shape) interval = MinMaxInterval() limits = interval.get_limits(data) values = interval(data) np.testing.assert_allclose(limits, (1.0, 1.0)) np.testing.assert_allclose(values, np.zeros(shape))
Display an rgb image using matplotlib (useful for debugging)
def display_rgb(rgb, title=None): """Display an rgb image using matplotlib (useful for debugging)""" import matplotlib.pyplot as plt plt.imshow(rgb, interpolation="nearest", origin="lower") if title: plt.title(title) plt.show() return plt
Return image with all points above satValue set to NaN. Simulates saturation on an image, so we can test 'replace_saturated_pixels'
def saturate(image, satValue): """ Return image with all points above satValue set to NaN. Simulates saturation on an image, so we can test 'replace_saturated_pixels' """ result = image.copy() saturated = image > satValue result[saturated] = np.nan return result
Check that subclasses are recognized. Also see https://github.com/matplotlib/matplotlib/pull/13536
def test_quantity_subclass(): """Check that subclasses are recognized. Also see https://github.com/matplotlib/matplotlib/pull/13536 """ plt.figure() with quantity_support(): plt.scatter(Angle([1, 2, 3], u.deg), [3, 4, 5] * u.kg) plt.scatter([105, 210, 315] * u.arcsec, [3050, 3025, 3010] * u.g) plt.plot(Angle([105, 210, 315], u.arcsec), [3050, 3025, 3010] * u.g) assert plt.gca().xaxis.get_units() == u.deg assert plt.gca().yaxis.get_units() == u.kg
Find the range of coordinates to use for ticks/grids. Parameters ---------- transform : func Function to transform pixel to world coordinates. Should take two values (the pixel coordinates) and return two values (the world coordinates). extent : iterable The range of the image viewport in pixel coordinates, given as [xmin, xmax, ymin, ymax]. coord_types : list of str Whether each coordinate is a ``'longitude'``, ``'latitude'``, or ``'scalar'`` value. coord_units : list of `astropy.units.Unit` The units for each coordinate. coord_wraps : list of `astropy.units.Quantity` The wrap angles for longitudes.
def find_coordinate_range(transform, extent, coord_types, coord_units, coord_wraps): """ Find the range of coordinates to use for ticks/grids. Parameters ---------- transform : func Function to transform pixel to world coordinates. Should take two values (the pixel coordinates) and return two values (the world coordinates). extent : iterable The range of the image viewport in pixel coordinates, given as [xmin, xmax, ymin, ymax]. coord_types : list of str Whether each coordinate is a ``'longitude'``, ``'latitude'``, or ``'scalar'`` value. coord_units : list of `astropy.units.Unit` The units for each coordinate. coord_wraps : list of `astropy.units.Quantity` The wrap angles for longitudes. """ # Sample coordinates on a NX x NY grid. from . import conf if len(extent) == 4: nx = ny = conf.coordinate_range_samples x = np.linspace(extent[0], extent[1], nx + 1) y = np.linspace(extent[2], extent[3], ny + 1) xp, yp = np.meshgrid(x, y) with np.errstate(invalid="ignore"): world = transform.transform(np.vstack([xp.ravel(), yp.ravel()]).transpose()) else: nx = conf.coordinate_range_samples xp = np.linspace(extent[0], extent[1], nx + 1)[None] with np.errstate(invalid="ignore"): world = transform.transform(xp.T) ranges = [] for coord_index, coord_type in enumerate(coord_types): xw = world[:, coord_index].reshape(xp.shape) if coord_type in LONLAT: unit = coord_units[coord_index] xw = xw * unit.to(u.deg) # Iron out coordinates along first row wjump = xw[0, 1:] - xw[0, :-1] with np.errstate(invalid="ignore"): reset = np.abs(wjump) > 180.0 if np.any(reset): wjump = wjump + np.sign(wjump) * 180.0 wjump = 360.0 * np.trunc(wjump / 360.0) xw[0, 1:][reset] -= wjump[reset] # Now iron out coordinates along all columns, starting with first row. wjump = xw[1:] - xw[:1] with np.errstate(invalid="ignore"): reset = np.abs(wjump) > 180.0 if np.any(reset): wjump = wjump + np.sign(wjump) * 180.0 wjump = 360.0 * np.trunc(wjump / 360.0) xw[1:][reset] -= wjump[reset] with warnings.catch_warnings(): warnings.simplefilter("ignore", RuntimeWarning) xw_min = np.nanmin(xw) xw_max = np.nanmax(xw) # Check if range is smaller when normalizing to the range 0 to 360 if coord_type in LONLAT: with warnings.catch_warnings(): warnings.simplefilter("ignore", RuntimeWarning) xw_min_check = np.nanmin(xw % 360.0) xw_max_check = np.nanmax(xw % 360.0) if xw_max_check - xw_min_check <= xw_max - xw_min < 360.0: xw_min = xw_min_check xw_max = xw_max_check # Check if range is smaller when normalizing to the range -180 to 180 if coord_type in LONLAT: with warnings.catch_warnings(): warnings.simplefilter("ignore", RuntimeWarning) xw_min_check = np.nanmin(wrap_180(xw)) xw_max_check = np.nanmax(wrap_180(xw)) if ( xw_max_check - xw_min_check < 360.0 and xw_max - xw_min >= xw_max_check - xw_min_check ): xw_min = xw_min_check xw_max = xw_max_check x_range = xw_max - xw_min if coord_type == "longitude": if x_range > 300.0: coord_wrap = coord_wraps[coord_index] if not isinstance(coord_wrap, u.Quantity): warnings.warn( "Passing 'coord_wraps' as numbers is deprecated. Use a Quantity with units convertible to angular degrees instead.", AstropyDeprecationWarning, ) coord_wrap = coord_wrap * u.deg xw_min = coord_wrap.to_value(u.deg) - 360 xw_max = coord_wrap.to_value(u.deg) - np.spacing(360.0) elif xw_min < 0.0: xw_min = max(-180.0, xw_min - 0.1 * x_range) xw_max = min(+180.0, xw_max + 0.1 * x_range) else: xw_min = max(0.0, xw_min - 0.1 * x_range) xw_max = min(360.0, xw_max + 0.1 * x_range) elif coord_type == "latitude": xw_min = max(-90.0, xw_min - 0.1 * x_range) xw_max = min(+90.0, xw_max + 0.1 * x_range) if coord_type in LONLAT: xw_min *= u.deg.to(unit) xw_max *= u.deg.to(unit) ranges.append((xw_min, xw_max)) return ranges
Draw a curve, taking into account discontinuities. Parameters ---------- lon_lat : ndarray The longitude and latitude values along the curve, given as a (n,2) array. pixel : ndarray The pixel coordinates corresponding to ``lon_lat`` lon_lat_check : ndarray The world coordinates derived from converting from ``pixel``, which is used to ensure round-tripping.
def get_lon_lat_path(lon_lat, pixel, lon_lat_check): """ Draw a curve, taking into account discontinuities. Parameters ---------- lon_lat : ndarray The longitude and latitude values along the curve, given as a (n,2) array. pixel : ndarray The pixel coordinates corresponding to ``lon_lat`` lon_lat_check : ndarray The world coordinates derived from converting from ``pixel``, which is used to ensure round-tripping. """ # In some spherical projections, some parts of the curve are 'behind' or # 'in front of' the plane of the image, so we find those by reversing the # transformation and finding points where the result is not consistent. sep = angular_separation( np.radians(lon_lat[:, 0]), np.radians(lon_lat[:, 1]), np.radians(lon_lat_check[:, 0]), np.radians(lon_lat_check[:, 1]), ) # Define the relevant scale size using the separation between the first two points scale_size = angular_separation( *np.radians(lon_lat[0, :]), *np.radians(lon_lat[1, :]) ) with np.errstate(invalid="ignore"): sep[sep > np.pi] -= 2.0 * np.pi mask = np.abs(sep > ROUND_TRIP_RTOL * scale_size) # Mask values with invalid pixel positions mask = mask | np.isnan(pixel[:, 0]) | np.isnan(pixel[:, 1]) # We can now start to set up the codes for the Path. codes = np.zeros(lon_lat.shape[0], dtype=np.uint8) codes[:] = Path.LINETO codes[0] = Path.MOVETO codes[mask] = Path.MOVETO # Also need to move to point *after* a hidden value codes[1:][mask[:-1]] = Path.MOVETO # We now go through and search for discontinuities in the curve that would # be due to the curve going outside the field of view, invalid WCS values, # or due to discontinuities in the projection. # We start off by pre-computing the step in pixel coordinates from one # point to the next. The idea is to look for large jumps that might indicate # discontinuities. step = np.sqrt( (pixel[1:, 0] - pixel[:-1, 0]) ** 2 + (pixel[1:, 1] - pixel[:-1, 1]) ** 2 ) # We search for discontinuities by looking for places where the step # is larger by more than a given factor compared to the median # discontinuous = step > DISCONT_FACTOR * np.median(step) discontinuous = step[1:] > DISCONT_FACTOR * step[:-1] # Skip over discontinuities codes[2:][discontinuous] = Path.MOVETO # The above missed the first step, so check that too if step[0] > DISCONT_FACTOR * step[1]: codes[1] = Path.MOVETO # Create the path path = Path(pixel, codes=codes) return path
Draw a grid line. Parameters ---------- world : ndarray The longitude and latitude values along the curve, given as a (n,2) array. pixel : ndarray The pixel coordinates corresponding to ``lon_lat``
def get_gridline_path(world, pixel): """ Draw a grid line. Parameters ---------- world : ndarray The longitude and latitude values along the curve, given as a (n,2) array. pixel : ndarray The pixel coordinates corresponding to ``lon_lat`` """ # Mask values with invalid pixel positions mask = np.isnan(pixel[:, 0]) | np.isnan(pixel[:, 1]) # We can now start to set up the codes for the Path. codes = np.zeros(world.shape[0], dtype=np.uint8) codes[:] = Path.LINETO codes[0] = Path.MOVETO codes[mask] = Path.MOVETO # Also need to move to point *after* a hidden value codes[1:][mask[:-1]] = Path.MOVETO # We now go through and search for discontinuities in the curve that would # be due to the curve going outside the field of view, invalid WCS values, # or due to discontinuities in the projection. # Create the path path = Path(pixel, codes=codes) return path
Display the beam shape and size. Parameters ---------- ax : :class:`~astropy.visualization.wcsaxes.WCSAxes` WCSAxes instance in which the beam shape and size is displayed. The WCS must be celestial. header : :class:`~astropy.io.fits.Header`, optional Header containing the beam parameters. If specified, the ``BMAJ``, ``BMIN``, and ``BPA`` keywords will be searched in the FITS header to set the major and minor axes and the position angle on the sky. major : float or :class:`~astropy.units.Quantity`, optional Major axis of the beam in degrees or an angular quantity. minor : float, or :class:`~astropy.units.Quantity`, optional Minor axis of the beam in degrees or an angular quantity. angle : float or :class:`~astropy.units.Quantity`, optional Position angle of the beam on the sky in degrees or an angular quantity in the anticlockwise direction. corner : str, optional The beam location. Acceptable values are ``'left'``, ``'right'``, ``'top'``, 'bottom', ``'top left'``, ``'top right'``, ``'bottom left'`` (default), and ``'bottom right'``. frame : bool, optional Whether to display a frame behind the beam (default is ``False``). borderpad : float, optional Border padding, in fraction of the font size. Default is 0.4. pad : float, optional Padding around the beam, in fraction of the font size. Default is 0.5. kwargs Additional arguments are passed to :class:`matplotlib.patches.Ellipse`. Notes ----- This function may be inaccurate when: - The pixel scales at the reference pixel are different from the pixel scales within the image extent (e.g., when the reference pixel is well outside of the image extent and the projection is non-linear) - The pixel scales in the two directions are very different from each other (e.g., rectangular pixels)
def add_beam( ax, header=None, major=None, minor=None, angle=None, corner="bottom left", frame=False, borderpad=0.4, pad=0.5, **kwargs, ): """ Display the beam shape and size. Parameters ---------- ax : :class:`~astropy.visualization.wcsaxes.WCSAxes` WCSAxes instance in which the beam shape and size is displayed. The WCS must be celestial. header : :class:`~astropy.io.fits.Header`, optional Header containing the beam parameters. If specified, the ``BMAJ``, ``BMIN``, and ``BPA`` keywords will be searched in the FITS header to set the major and minor axes and the position angle on the sky. major : float or :class:`~astropy.units.Quantity`, optional Major axis of the beam in degrees or an angular quantity. minor : float, or :class:`~astropy.units.Quantity`, optional Minor axis of the beam in degrees or an angular quantity. angle : float or :class:`~astropy.units.Quantity`, optional Position angle of the beam on the sky in degrees or an angular quantity in the anticlockwise direction. corner : str, optional The beam location. Acceptable values are ``'left'``, ``'right'``, ``'top'``, 'bottom', ``'top left'``, ``'top right'``, ``'bottom left'`` (default), and ``'bottom right'``. frame : bool, optional Whether to display a frame behind the beam (default is ``False``). borderpad : float, optional Border padding, in fraction of the font size. Default is 0.4. pad : float, optional Padding around the beam, in fraction of the font size. Default is 0.5. kwargs Additional arguments are passed to :class:`matplotlib.patches.Ellipse`. Notes ----- This function may be inaccurate when: - The pixel scales at the reference pixel are different from the pixel scales within the image extent (e.g., when the reference pixel is well outside of the image extent and the projection is non-linear) - The pixel scales in the two directions are very different from each other (e.g., rectangular pixels) """ if header and major: raise ValueError( "Either header or major/minor/angle must be specified, not both." ) if header: major = header["BMAJ"] minor = header["BMIN"] angle = header["BPA"] if isinstance(major, u.Quantity): major = major.to(u.degree).value if isinstance(minor, u.Quantity): minor = minor.to(u.degree).value if isinstance(angle, u.Quantity): angle = angle.to(u.degree).value if ax.wcs.is_celestial: pix_scale = proj_plane_pixel_scales(ax.wcs) sx = pix_scale[0] sy = pix_scale[1] degrees_per_pixel = np.sqrt(sx * sy) else: raise ValueError("Cannot show beam when WCS is not celestial") minor /= degrees_per_pixel major /= degrees_per_pixel corner = CORNERS[corner] beam = AnchoredEllipse( ax.transData, width=minor, height=major, angle=angle, loc=corner, pad=pad, borderpad=borderpad, frameon=frame, ) beam.ellipse.set(**kwargs) ax.add_artist(beam)
Add a scale bar. Parameters ---------- ax : :class:`~astropy.visualization.wcsaxes.WCSAxes` WCSAxes instance in which the scale bar is displayed. The WCS must be celestial. length : float or :class:`~astropy.units.Quantity` The length of the scalebar in degrees or an angular quantity label : str, optional Label to place below the scale bar corner : str, optional Where to place the scale bar. Acceptable values are:, ``'left'``, ``'right'``, ``'top'``, ``'bottom'``, ``'top left'``, ``'top right'``, ``'bottom left'`` and ``'bottom right'`` (default) frame : bool, optional Whether to display a frame behind the scale bar (default is ``False``) borderpad : float, optional Border padding, in fraction of the font size. Default is 0.4. pad : float, optional Padding around the scale bar, in fraction of the font size. Default is 0.5. kwargs Additional arguments are passed to :class:`mpl_toolkits.axes_grid1.anchored_artists.AnchoredSizeBar`. Notes ----- This function may be inaccurate when: - The pixel scales at the reference pixel are different from the pixel scales within the image extent (e.g., when the reference pixel is well outside of the image extent and the projection is non-linear) - The pixel scales in the two directions are very different from each other (e.g., rectangular pixels)
def add_scalebar( ax, length, label=None, corner="bottom right", frame=False, borderpad=0.4, pad=0.5, **kwargs, ): """Add a scale bar. Parameters ---------- ax : :class:`~astropy.visualization.wcsaxes.WCSAxes` WCSAxes instance in which the scale bar is displayed. The WCS must be celestial. length : float or :class:`~astropy.units.Quantity` The length of the scalebar in degrees or an angular quantity label : str, optional Label to place below the scale bar corner : str, optional Where to place the scale bar. Acceptable values are:, ``'left'``, ``'right'``, ``'top'``, ``'bottom'``, ``'top left'``, ``'top right'``, ``'bottom left'`` and ``'bottom right'`` (default) frame : bool, optional Whether to display a frame behind the scale bar (default is ``False``) borderpad : float, optional Border padding, in fraction of the font size. Default is 0.4. pad : float, optional Padding around the scale bar, in fraction of the font size. Default is 0.5. kwargs Additional arguments are passed to :class:`mpl_toolkits.axes_grid1.anchored_artists.AnchoredSizeBar`. Notes ----- This function may be inaccurate when: - The pixel scales at the reference pixel are different from the pixel scales within the image extent (e.g., when the reference pixel is well outside of the image extent and the projection is non-linear) - The pixel scales in the two directions are very different from each other (e.g., rectangular pixels) """ if isinstance(length, u.Quantity): length = length.to(u.degree).value if ax.wcs.is_celestial: pix_scale = proj_plane_pixel_scales(ax.wcs) sx = pix_scale[0] sy = pix_scale[1] degrees_per_pixel = np.sqrt(sx * sy) else: raise ValueError("Cannot show scalebar when WCS is not celestial") length = length / degrees_per_pixel corner = CORNERS[corner] scalebar = AnchoredSizeBar( ax.transData, length, label, corner, pad=pad, borderpad=borderpad, sep=5, frameon=frame, **kwargs, ) ax.add_artist(scalebar)
Given a polygon with vertices defined by (lon, lat), rotate the polygon such that the North pole of the spherical coordinates is now at (lon0, lat0). Therefore, to end up with a polygon centered on (lon0, lat0), the polygon should initially be drawn around the North pole.
def _rotate_polygon(lon, lat, lon0, lat0): """ Given a polygon with vertices defined by (lon, lat), rotate the polygon such that the North pole of the spherical coordinates is now at (lon0, lat0). Therefore, to end up with a polygon centered on (lon0, lat0), the polygon should initially be drawn around the North pole. """ # Create a representation object polygon = UnitSphericalRepresentation(lon=lon, lat=lat) # Determine rotation matrix to make it so that the circle is centered # on the correct longitude/latitude. transform_matrix = rotation_matrix(-lon0, axis="z") @ rotation_matrix( -(0.5 * np.pi * u.radian - lat0), axis="y" ) # Apply 3D rotation polygon = polygon.to_cartesian() polygon = polygon.transform(transform_matrix) polygon = UnitSphericalRepresentation.from_cartesian(polygon) return polygon.lon, polygon.lat
Transform a contour set in-place using a specified :class:`matplotlib.transform.Transform`. Using transforms with the native Matplotlib contour/contourf can be slow if the transforms have a non-negligible overhead (which is the case for WCS/SkyCoord transforms) since the transform is called for each individual contour line. It is more efficient to stack all the contour lines together temporarily and transform them in one go.
def transform_contour_set_inplace(cset, transform): """ Transform a contour set in-place using a specified :class:`matplotlib.transform.Transform`. Using transforms with the native Matplotlib contour/contourf can be slow if the transforms have a non-negligible overhead (which is the case for WCS/SkyCoord transforms) since the transform is called for each individual contour line. It is more efficient to stack all the contour lines together temporarily and transform them in one go. """ # The contours are represented as paths grouped into levels. Each can have # one or more paths. The approach we take here is to stack the vertices of # all paths and transform them in one go. The pos_level list helps us keep # track of where the set of segments for each overall contour level ends. # The pos_segments list helps us keep track of where each segmnt ends for # each contour level. all_paths = [] pos_level = [] pos_segments = [] if MATPLOTLIB_LT_3_8: for collection in cset.collections: paths = collection.get_paths() if len(paths) == 0: continue all_paths.append(paths) # The last item in pos isn't needed for np.split and in fact causes # issues if we keep it because it will cause an extra empty array to be # returned. pos = np.cumsum([len(x) for x in paths]) pos_segments.append(pos[:-1]) pos_level.append(pos[-1]) else: paths = cset.get_paths() if len(paths) > 0: all_paths.append(paths) # The last item in pos isn't needed for np.split and in fact causes # issues if we keep it because it will cause an extra empty array to be # returned. pos = np.cumsum([len(x) for x in paths]) pos_segments.append(pos[:-1]) pos_level.append(pos[-1]) # As above the last item isn't needed pos_level = np.cumsum(pos_level)[:-1] # Stack all the segments into a single (n, 2) array vertices = [path.vertices for paths in all_paths for path in paths] if len(vertices) > 0: vertices = np.concatenate(vertices) else: return # Transform all coordinates in one go vertices = transform.transform(vertices) # Split up into levels again vertices = np.split(vertices, pos_level) # Now re-populate the segments in the line collections for ilevel, vert in enumerate(vertices): vert = np.split(vert, pos_segments[ilevel]) for iseg, ivert in enumerate(vert): all_paths[ilevel][iseg].vertices = ivert
Take the input WCS and slices and return a sliced WCS for the transform and a mapping of world axes in the sliced WCS to the input WCS.
def apply_slices(wcs, slices): """ Take the input WCS and slices and return a sliced WCS for the transform and a mapping of world axes in the sliced WCS to the input WCS. """ if isinstance(wcs, SlicedLowLevelWCS): world_keep = list(wcs._world_keep) else: world_keep = list(range(wcs.world_n_dim)) # world_map is the index of the world axis in the input WCS for a given # axis in the transform_wcs world_map = list(range(wcs.world_n_dim)) transform_wcs = wcs invert_xy = False if slices is not None: wcs_slice = list(slices) wcs_slice[wcs_slice.index("x")] = slice(None) if "y" in slices: wcs_slice[wcs_slice.index("y")] = slice(None) invert_xy = slices.index("x") > slices.index("y") transform_wcs = SlicedLowLevelWCS(wcs, wcs_slice[::-1]) world_map = tuple(world_keep.index(i) for i in transform_wcs._world_keep) return transform_wcs, invert_xy, world_map
This test ensures that the format unit is updated and displayed for both the axis ticks and default axis labels.
def test_1d_plot_1d_wcs_format_unit(wave_wcs_1d): """ This test ensures that the format unit is updated and displayed for both the axis ticks and default axis labels. """ fig = plt.figure() ax = fig.add_subplot(1, 1, 1, projection=wave_wcs_1d) (lines,) = ax.plot([10, 12, 14, 12, 10]) ax.coords[0].set_format_unit("nm") return fig
This WCS has an almost linear correlation between the pixel and world axes close to the reference pixel.
def spatial_wcs_2d_small_angle(): """ This WCS has an almost linear correlation between the pixel and world axes close to the reference pixel. """ wcs = WCS(naxis=2) wcs.wcs.ctype = ["HPLN-TAN", "HPLT-TAN"] wcs.wcs.crpix = [3.0] * 2 wcs.wcs.cdelt = [10 / 3600, 5 / 3600] wcs.wcs.crval = [0] * 2 wcs.wcs.set() return wcs
Test that a SLLWCS through a coupled 2D WCS plots as line OK.
def test_1d_plot_1d_sliced_low_level_wcs( spatial_wcs_2d_small_angle, slices, bottom_axis ): """ Test that a SLLWCS through a coupled 2D WCS plots as line OK. """ fig = plt.figure() ax = fig.add_subplot(1, 1, 1, projection=spatial_wcs_2d_small_angle[slices]) (lines,) = ax.plot([10, 12, 14, 12, 10], "-o", color="orange") # Draw to trigger rendering the ticks. plt.draw() assert ax.coords[bottom_axis].ticks.get_visible_axes() == ["b"] return fig
When we plot a 1D slice through spatial axes, we want to put the axis which actually changes on the bottom. For example an aligned wcs, pixel grid where you plot a lon slice through a lat axis, you would end up with no ticks on the bottom as the lon doesn't change, and a set of lat ticks on the top because it does but it's the correlated axis not the actual one you are plotting against.
def test_1d_plot_put_varying_axis_on_bottom_lon( spatial_wcs_2d_small_angle, slices, bottom_axis ): """ When we plot a 1D slice through spatial axes, we want to put the axis which actually changes on the bottom. For example an aligned wcs, pixel grid where you plot a lon slice through a lat axis, you would end up with no ticks on the bottom as the lon doesn't change, and a set of lat ticks on the top because it does but it's the correlated axis not the actual one you are plotting against. """ fig = plt.figure() ax = fig.add_subplot(1, 1, 1, projection=spatial_wcs_2d_small_angle, slices=slices) (lines,) = ax.plot([10, 12, 14, 12, 10], "-o", color="orange") # Draw to trigger rendering the ticks. plt.draw() assert ax.coords[bottom_axis].ticks.get_visible_axes() == ["b"] return fig
Regression test for https://github.com/astropy/astropy/issues/8004.
def test_simplify_labels_usetex(ignore_matplotlibrc, tmp_path): """Regression test for https://github.com/astropy/astropy/issues/8004.""" plt.rc("text", usetex=True) header = { "NAXIS": 2, "NAXIS1": 360, "NAXIS2": 180, "CRPIX1": 180.5, "CRPIX2": 90.5, "CRVAL1": 180.0, "CRVAL2": 0.0, "CDELT1": -2 * np.sqrt(2) / np.pi, "CDELT2": 2 * np.sqrt(2) / np.pi, "CTYPE1": "RA---MOL", "CTYPE2": "DEC--MOL", "RADESYS": "ICRS", } wcs = WCS(header) fig, ax = plt.subplots(subplot_kw=dict(frame_class=EllipticalFrame, projection=wcs)) ax.set_xlim(-0.5, header["NAXIS1"] - 0.5) ax.set_ylim(-0.5, header["NAXIS2"] - 0.5) ax.coords[0].set_ticklabel(exclude_overlapping=True) ax.coords[1].set_ticklabel(exclude_overlapping=True) ax.coords[0].set_ticks(spacing=45 * u.deg) ax.coords[1].set_ticks(spacing=30 * u.deg) ax.grid() fig.savefig(tmp_path / "plot.png")
Test if ``axis.set_xlabel()`` calls the correct ``coords[i]_set_axislabel()`` in a WCS plot. Regression test for https://github.com/astropy/astropy/issues/10435.
def test_set_labels_with_coords(ignore_matplotlibrc, frame_class): """Test if ``axis.set_xlabel()`` calls the correct ``coords[i]_set_axislabel()`` in a WCS plot. Regression test for https://github.com/astropy/astropy/issues/10435. """ labels = ["RA", "Declination"] header = { "NAXIS": 2, "NAXIS1": 360, "NAXIS2": 180, "CRPIX1": 180.5, "CRPIX2": 90.5, "CRVAL1": 180.0, "CRVAL2": 0.0, "CDELT1": -2 * np.sqrt(2) / np.pi, "CDELT2": 2 * np.sqrt(2) / np.pi, "CTYPE1": "RA---AIT", "CTYPE2": "DEC--AIT", } wcs = WCS(header) fig, ax = plt.subplots(subplot_kw=dict(frame_class=frame_class, projection=wcs)) ax.set_xlabel(labels[0]) ax.set_ylabel(labels[1]) assert ax.get_xlabel() == labels[0] assert ax.get_ylabel() == labels[1] for i in range(2): assert ax.coords[i].get_axislabel() == labels[i]
Test that the Matplotlib subtraction shorthand for composing and inverting transformations works.
def test_shorthand_inversion(): """ Test that the Matplotlib subtraction shorthand for composing and inverting transformations works. """ w1 = WCS(naxis=2) w1.wcs.ctype = ["RA---TAN", "DEC--TAN"] w1.wcs.crpix = [256.0, 256.0] w1.wcs.cdelt = [-0.05, 0.05] w1.wcs.crval = [120.0, -19.0] w2 = WCS(naxis=2) w2.wcs.ctype = ["RA---SIN", "DEC--SIN"] w2.wcs.crpix = [256.0, 256.0] w2.wcs.cdelt = [-0.05, 0.05] w2.wcs.crval = [235.0, +23.7] t1 = WCSWorld2PixelTransform(w1) t2 = WCSWorld2PixelTransform(w2) assert t1 - t2 == t1 + t2.inverted() assert t1 - t2 != t2.inverted() + t1 assert t1 - t1 == IdentityTransform()
The only configuration parameter needed at compile-time is how to specify a 64-bit signed integer. Python's ctypes module can get us that information. If we can't be absolutely certain, we default to "long long int", which is correct on most platforms (x86, x86_64). If we find platforms where this heuristic doesn't work, we may need to hardcode for them.
def determine_64_bit_int(): """ The only configuration parameter needed at compile-time is how to specify a 64-bit signed integer. Python's ctypes module can get us that information. If we can't be absolutely certain, we default to "long long int", which is correct on most platforms (x86, x86_64). If we find platforms where this heuristic doesn't work, we may need to hardcode for them. """ try: try: import ctypes except ImportError: raise ValueError() if ctypes.sizeof(ctypes.c_longlong) == 8: return "long long int" elif ctypes.sizeof(ctypes.c_long) == 8: return "long int" elif ctypes.sizeof(ctypes.c_int) == 8: return "int" else: raise ValueError() except ValueError: return "long long int"
Writes out the wcsconfig.h header with local configuration.
def write_wcsconfig_h(paths): """ Writes out the wcsconfig.h header with local configuration. """ h_file = io.StringIO() h_file.write( f""" /* The bundled version has WCSLIB_VERSION */ #define HAVE_WCSLIB_VERSION 1 /* WCSLIB library version number. */ #define WCSLIB_VERSION {WCSVERSION} /* 64-bit integer data type. */ #define WCSLIB_INT64 {determine_64_bit_int()} /* Windows needs some other defines to prevent inclusion of wcsset() which conflicts with wcslib's wcsset(). These need to be set on code that *uses* astropy.wcs, in addition to astropy.wcs itself. */ #if defined(_WIN32) || defined(_MSC_VER) || defined(__MINGW32__) || defined (__MINGW64__) #ifndef YY_NO_UNISTD_H #define YY_NO_UNISTD_H #endif #ifndef _CRT_SECURE_NO_WARNINGS #define _CRT_SECURE_NO_WARNINGS #endif #ifndef _NO_OLDNAMES #define _NO_OLDNAMES #endif #ifndef NO_OLDNAMES #define NO_OLDNAMES #endif #ifndef __STDC__ #define __STDC__ 1 #endif #endif """ ) content = h_file.getvalue().encode("ascii") for path in paths: write_if_different(path, content)
Add a new Stokes axis that is uncorrelated with any other axes. Parameters ---------- wcs : `~astropy.wcs.WCS` The WCS to add to add_before_ind : int Index of the WCS to insert the new Stokes axis in front of. To add at the end, do add_before_ind = wcs.wcs.naxis The beginning is at position 0. Returns ------- `~astropy.wcs.WCS` A new `~astropy.wcs.WCS` instance with an additional axis
def add_stokes_axis_to_wcs(wcs, add_before_ind): """ Add a new Stokes axis that is uncorrelated with any other axes. Parameters ---------- wcs : `~astropy.wcs.WCS` The WCS to add to add_before_ind : int Index of the WCS to insert the new Stokes axis in front of. To add at the end, do add_before_ind = wcs.wcs.naxis The beginning is at position 0. Returns ------- `~astropy.wcs.WCS` A new `~astropy.wcs.WCS` instance with an additional axis """ inds = [i + 1 for i in range(wcs.wcs.naxis)] inds.insert(add_before_ind, 0) newwcs = wcs.sub(inds) newwcs.wcs.ctype[add_before_ind] = "STOKES" newwcs.wcs.cname[add_before_ind] = "STOKES" return newwcs
For a given WCS, return the coordinate frame that matches the celestial component of the WCS. Parameters ---------- wcs : :class:`~astropy.wcs.WCS` instance The WCS to find the frame for Returns ------- frame : :class:`~astropy.coordinates.BaseCoordinateFrame` subclass instance An instance of a :class:`~astropy.coordinates.BaseCoordinateFrame` subclass instance that best matches the specified WCS. Notes ----- To extend this function to frames not defined in astropy.coordinates, you can write your own function which should take a :class:`~astropy.wcs.WCS` instance and should return either an instance of a frame, or `None` if no matching frame was found. You can register this function temporarily with:: >>> from astropy.wcs.utils import wcs_to_celestial_frame, custom_wcs_to_frame_mappings >>> with custom_wcs_to_frame_mappings(my_function): ... wcs_to_celestial_frame(...)
def wcs_to_celestial_frame(wcs): """ For a given WCS, return the coordinate frame that matches the celestial component of the WCS. Parameters ---------- wcs : :class:`~astropy.wcs.WCS` instance The WCS to find the frame for Returns ------- frame : :class:`~astropy.coordinates.BaseCoordinateFrame` subclass instance An instance of a :class:`~astropy.coordinates.BaseCoordinateFrame` subclass instance that best matches the specified WCS. Notes ----- To extend this function to frames not defined in astropy.coordinates, you can write your own function which should take a :class:`~astropy.wcs.WCS` instance and should return either an instance of a frame, or `None` if no matching frame was found. You can register this function temporarily with:: >>> from astropy.wcs.utils import wcs_to_celestial_frame, custom_wcs_to_frame_mappings >>> with custom_wcs_to_frame_mappings(my_function): ... wcs_to_celestial_frame(...) """ for mapping_set in WCS_FRAME_MAPPINGS: for func in mapping_set: frame = func(wcs) if frame is not None: return frame raise ValueError( "Could not determine celestial frame corresponding to the specified WCS object" )
For a given coordinate frame, return the corresponding WCS object. Note that the returned WCS object has only the elements corresponding to coordinate frames set (e.g. ctype, equinox, radesys). Parameters ---------- frame : :class:`~astropy.coordinates.BaseCoordinateFrame` subclass instance An instance of a :class:`~astropy.coordinates.BaseCoordinateFrame` subclass instance for which to find the WCS projection : str Projection code to use in ctype, if applicable Returns ------- wcs : :class:`~astropy.wcs.WCS` instance The corresponding WCS object Examples -------- :: >>> from astropy.wcs.utils import celestial_frame_to_wcs >>> from astropy.coordinates import FK5 >>> frame = FK5(equinox='J2010') >>> wcs = celestial_frame_to_wcs(frame) >>> wcs.to_header() WCSAXES = 2 / Number of coordinate axes CRPIX1 = 0.0 / Pixel coordinate of reference point CRPIX2 = 0.0 / Pixel coordinate of reference point CDELT1 = 1.0 / [deg] Coordinate increment at reference point CDELT2 = 1.0 / [deg] Coordinate increment at reference point CUNIT1 = 'deg' / Units of coordinate increment and value CUNIT2 = 'deg' / Units of coordinate increment and value CTYPE1 = 'RA---TAN' / Right ascension, gnomonic projection CTYPE2 = 'DEC--TAN' / Declination, gnomonic projection CRVAL1 = 0.0 / [deg] Coordinate value at reference point CRVAL2 = 0.0 / [deg] Coordinate value at reference point LONPOLE = 180.0 / [deg] Native longitude of celestial pole LATPOLE = 0.0 / [deg] Native latitude of celestial pole RADESYS = 'FK5' / Equatorial coordinate system EQUINOX = 2010.0 / [yr] Equinox of equatorial coordinates Notes ----- To extend this function to frames not defined in astropy.coordinates, you can write your own function which should take a :class:`~astropy.coordinates.BaseCoordinateFrame` subclass instance and a projection (given as a string) and should return either a WCS instance, or `None` if the WCS could not be determined. You can register this function temporarily with:: >>> from astropy.wcs.utils import celestial_frame_to_wcs, custom_frame_to_wcs_mappings >>> with custom_frame_to_wcs_mappings(my_function): ... celestial_frame_to_wcs(...)
def celestial_frame_to_wcs(frame, projection="TAN"): """ For a given coordinate frame, return the corresponding WCS object. Note that the returned WCS object has only the elements corresponding to coordinate frames set (e.g. ctype, equinox, radesys). Parameters ---------- frame : :class:`~astropy.coordinates.BaseCoordinateFrame` subclass instance An instance of a :class:`~astropy.coordinates.BaseCoordinateFrame` subclass instance for which to find the WCS projection : str Projection code to use in ctype, if applicable Returns ------- wcs : :class:`~astropy.wcs.WCS` instance The corresponding WCS object Examples -------- :: >>> from astropy.wcs.utils import celestial_frame_to_wcs >>> from astropy.coordinates import FK5 >>> frame = FK5(equinox='J2010') >>> wcs = celestial_frame_to_wcs(frame) >>> wcs.to_header() WCSAXES = 2 / Number of coordinate axes CRPIX1 = 0.0 / Pixel coordinate of reference point CRPIX2 = 0.0 / Pixel coordinate of reference point CDELT1 = 1.0 / [deg] Coordinate increment at reference point CDELT2 = 1.0 / [deg] Coordinate increment at reference point CUNIT1 = 'deg' / Units of coordinate increment and value CUNIT2 = 'deg' / Units of coordinate increment and value CTYPE1 = 'RA---TAN' / Right ascension, gnomonic projection CTYPE2 = 'DEC--TAN' / Declination, gnomonic projection CRVAL1 = 0.0 / [deg] Coordinate value at reference point CRVAL2 = 0.0 / [deg] Coordinate value at reference point LONPOLE = 180.0 / [deg] Native longitude of celestial pole LATPOLE = 0.0 / [deg] Native latitude of celestial pole RADESYS = 'FK5' / Equatorial coordinate system EQUINOX = 2010.0 / [yr] Equinox of equatorial coordinates Notes ----- To extend this function to frames not defined in astropy.coordinates, you can write your own function which should take a :class:`~astropy.coordinates.BaseCoordinateFrame` subclass instance and a projection (given as a string) and should return either a WCS instance, or `None` if the WCS could not be determined. You can register this function temporarily with:: >>> from astropy.wcs.utils import celestial_frame_to_wcs, custom_frame_to_wcs_mappings >>> with custom_frame_to_wcs_mappings(my_function): ... celestial_frame_to_wcs(...) """ for mapping_set in FRAME_WCS_MAPPINGS: for func in mapping_set: wcs = func(frame, projection=projection) if wcs is not None: return wcs raise ValueError( "Could not determine WCS corresponding to the specified coordinate frame." )
For a WCS returns pixel scales along each axis of the image pixel at the ``CRPIX`` location once it is projected onto the "plane of intermediate world coordinates" as defined in `Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_. .. note:: This function is concerned **only** about the transformation "image plane"->"projection plane" and **not** about the transformation "celestial sphere"->"projection plane"->"image plane". Therefore, this function ignores distortions arising due to non-linear nature of most projections. .. note:: In order to compute the scales corresponding to celestial axes only, make sure that the input `~astropy.wcs.WCS` object contains celestial axes only, e.g., by passing in the `~astropy.wcs.WCS.celestial` WCS object. Parameters ---------- wcs : `~astropy.wcs.WCS` A world coordinate system object. Returns ------- scale : ndarray A vector (`~numpy.ndarray`) of projection plane increments corresponding to each pixel side (axis). The units of the returned results are the same as the units of `~astropy.wcs.Wcsprm.cdelt`, `~astropy.wcs.Wcsprm.crval`, and `~astropy.wcs.Wcsprm.cd` for the celestial WCS and can be obtained by inquiring the value of `~astropy.wcs.Wcsprm.cunit` property of the input `~astropy.wcs.WCS` WCS object. See Also -------- astropy.wcs.utils.proj_plane_pixel_area
def proj_plane_pixel_scales(wcs): """ For a WCS returns pixel scales along each axis of the image pixel at the ``CRPIX`` location once it is projected onto the "plane of intermediate world coordinates" as defined in `Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_. .. note:: This function is concerned **only** about the transformation "image plane"->"projection plane" and **not** about the transformation "celestial sphere"->"projection plane"->"image plane". Therefore, this function ignores distortions arising due to non-linear nature of most projections. .. note:: In order to compute the scales corresponding to celestial axes only, make sure that the input `~astropy.wcs.WCS` object contains celestial axes only, e.g., by passing in the `~astropy.wcs.WCS.celestial` WCS object. Parameters ---------- wcs : `~astropy.wcs.WCS` A world coordinate system object. Returns ------- scale : ndarray A vector (`~numpy.ndarray`) of projection plane increments corresponding to each pixel side (axis). The units of the returned results are the same as the units of `~astropy.wcs.Wcsprm.cdelt`, `~astropy.wcs.Wcsprm.crval`, and `~astropy.wcs.Wcsprm.cd` for the celestial WCS and can be obtained by inquiring the value of `~astropy.wcs.Wcsprm.cunit` property of the input `~astropy.wcs.WCS` WCS object. See Also -------- astropy.wcs.utils.proj_plane_pixel_area """ return np.sqrt((wcs.pixel_scale_matrix**2).sum(axis=0, dtype=float))
For a **celestial** WCS (see `astropy.wcs.WCS.celestial`) returns pixel area of the image pixel at the ``CRPIX`` location once it is projected onto the "plane of intermediate world coordinates" as defined in `Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_. .. note:: This function is concerned **only** about the transformation "image plane"->"projection plane" and **not** about the transformation "celestial sphere"->"projection plane"->"image plane". Therefore, this function ignores distortions arising due to non-linear nature of most projections. .. note:: In order to compute the area of pixels corresponding to celestial axes only, this function uses the `~astropy.wcs.WCS.celestial` WCS object of the input ``wcs``. This is different from the `~astropy.wcs.utils.proj_plane_pixel_scales` function that computes the scales for the axes of the input WCS itself. Parameters ---------- wcs : `~astropy.wcs.WCS` A world coordinate system object. Returns ------- area : float Area (in the projection plane) of the pixel at ``CRPIX`` location. The units of the returned result are the same as the units of the `~astropy.wcs.Wcsprm.cdelt`, `~astropy.wcs.Wcsprm.crval`, and `~astropy.wcs.Wcsprm.cd` for the celestial WCS and can be obtained by inquiring the value of `~astropy.wcs.Wcsprm.cunit` property of the `~astropy.wcs.WCS.celestial` WCS object. Raises ------ ValueError Pixel area is defined only for 2D pixels. Most likely the `~astropy.wcs.Wcsprm.cd` matrix of the `~astropy.wcs.WCS.celestial` WCS is not a square matrix of second order. Notes ----- Depending on the application, square root of the pixel area can be used to represent a single pixel scale of an equivalent square pixel whose area is equal to the area of a generally non-square pixel. See Also -------- astropy.wcs.utils.proj_plane_pixel_scales
def proj_plane_pixel_area(wcs): """ For a **celestial** WCS (see `astropy.wcs.WCS.celestial`) returns pixel area of the image pixel at the ``CRPIX`` location once it is projected onto the "plane of intermediate world coordinates" as defined in `Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_. .. note:: This function is concerned **only** about the transformation "image plane"->"projection plane" and **not** about the transformation "celestial sphere"->"projection plane"->"image plane". Therefore, this function ignores distortions arising due to non-linear nature of most projections. .. note:: In order to compute the area of pixels corresponding to celestial axes only, this function uses the `~astropy.wcs.WCS.celestial` WCS object of the input ``wcs``. This is different from the `~astropy.wcs.utils.proj_plane_pixel_scales` function that computes the scales for the axes of the input WCS itself. Parameters ---------- wcs : `~astropy.wcs.WCS` A world coordinate system object. Returns ------- area : float Area (in the projection plane) of the pixel at ``CRPIX`` location. The units of the returned result are the same as the units of the `~astropy.wcs.Wcsprm.cdelt`, `~astropy.wcs.Wcsprm.crval`, and `~astropy.wcs.Wcsprm.cd` for the celestial WCS and can be obtained by inquiring the value of `~astropy.wcs.Wcsprm.cunit` property of the `~astropy.wcs.WCS.celestial` WCS object. Raises ------ ValueError Pixel area is defined only for 2D pixels. Most likely the `~astropy.wcs.Wcsprm.cd` matrix of the `~astropy.wcs.WCS.celestial` WCS is not a square matrix of second order. Notes ----- Depending on the application, square root of the pixel area can be used to represent a single pixel scale of an equivalent square pixel whose area is equal to the area of a generally non-square pixel. See Also -------- astropy.wcs.utils.proj_plane_pixel_scales """ psm = wcs.celestial.pixel_scale_matrix if psm.shape != (2, 2): raise ValueError("Pixel area is defined only for 2D pixels.") return np.abs(np.linalg.det(psm))
For a WCS returns `False` if square image (detector) pixels stay square when projected onto the "plane of intermediate world coordinates" as defined in `Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_. It will return `True` if transformation from image (detector) coordinates to the focal plane coordinates is non-orthogonal or if WCS contains non-linear (e.g., SIP) distortions. .. note:: Since this function is concerned **only** about the transformation "image plane"->"focal plane" and **not** about the transformation "celestial sphere"->"focal plane"->"image plane", this function ignores distortions arising due to non-linear nature of most projections. Let's denote by *C* either the original or the reconstructed (from ``PC`` and ``CDELT``) CD matrix. `is_proj_plane_distorted` verifies that the transformation from image (detector) coordinates to the focal plane coordinates is orthogonal using the following check: .. math:: \left \| \frac{C \cdot C^{\mathrm{T}}} {| det(C)|} - I \right \|_{\mathrm{max}} < \epsilon . Parameters ---------- wcs : `~astropy.wcs.WCS` World coordinate system object maxerr : float, optional Accuracy to which the CD matrix, **normalized** such that :math:`|det(CD)|=1`, should be close to being an orthogonal matrix as described in the above equation (see :math:`\epsilon`). Returns ------- distorted : bool Returns `True` if focal (projection) plane is distorted and `False` otherwise.
def is_proj_plane_distorted(wcs, maxerr=1.0e-5): r""" For a WCS returns `False` if square image (detector) pixels stay square when projected onto the "plane of intermediate world coordinates" as defined in `Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_. It will return `True` if transformation from image (detector) coordinates to the focal plane coordinates is non-orthogonal or if WCS contains non-linear (e.g., SIP) distortions. .. note:: Since this function is concerned **only** about the transformation "image plane"->"focal plane" and **not** about the transformation "celestial sphere"->"focal plane"->"image plane", this function ignores distortions arising due to non-linear nature of most projections. Let's denote by *C* either the original or the reconstructed (from ``PC`` and ``CDELT``) CD matrix. `is_proj_plane_distorted` verifies that the transformation from image (detector) coordinates to the focal plane coordinates is orthogonal using the following check: .. math:: \left \| \frac{C \cdot C^{\mathrm{T}}} {| det(C)|} - I \right \|_{\mathrm{max}} < \epsilon . Parameters ---------- wcs : `~astropy.wcs.WCS` World coordinate system object maxerr : float, optional Accuracy to which the CD matrix, **normalized** such that :math:`|det(CD)|=1`, should be close to being an orthogonal matrix as described in the above equation (see :math:`\epsilon`). Returns ------- distorted : bool Returns `True` if focal (projection) plane is distorted and `False` otherwise. """ cwcs = wcs.celestial return not _is_cd_orthogonal(cwcs.pixel_scale_matrix, maxerr) or _has_distortion(cwcs)
Calculate the pixel scale along each axis of a non-celestial WCS, for example one with mixed spectral and spatial axes. Parameters ---------- inwcs : `~astropy.wcs.WCS` The world coordinate system object. Returns ------- scale : `numpy.ndarray` The pixel scale along each axis.
def non_celestial_pixel_scales(inwcs): """ Calculate the pixel scale along each axis of a non-celestial WCS, for example one with mixed spectral and spatial axes. Parameters ---------- inwcs : `~astropy.wcs.WCS` The world coordinate system object. Returns ------- scale : `numpy.ndarray` The pixel scale along each axis. """ if inwcs.is_celestial: raise ValueError("WCS is celestial, use celestial_pixel_scales instead") pccd = inwcs.pixel_scale_matrix if np.allclose(np.extract(1 - np.eye(*pccd.shape), pccd), 0): return np.abs(np.diagonal(pccd)) * u.deg else: raise ValueError("WCS is rotated, cannot determine consistent pixel scales")
`True` if contains any SIP or image distortion components.
def _has_distortion(wcs): """ `True` if contains any SIP or image distortion components. """ return any( getattr(wcs, dist_attr) is not None for dist_attr in ["cpdis1", "cpdis2", "det2im1", "det2im2", "sip"] )
Convert a set of SkyCoord coordinates into pixels. Parameters ---------- coords : `~astropy.coordinates.SkyCoord` The coordinates to convert. wcs : `~astropy.wcs.WCS` The WCS transformation to use. origin : int Whether to return 0 or 1-based pixel coordinates. mode : 'all' or 'wcs' Whether to do the transformation including distortions (``'all'``) or only including only the core WCS transformation (``'wcs'``). Returns ------- xp, yp : `numpy.ndarray` The pixel coordinates See Also -------- astropy.coordinates.SkyCoord.from_pixel
def skycoord_to_pixel(coords, wcs, origin=0, mode="all"): """ Convert a set of SkyCoord coordinates into pixels. Parameters ---------- coords : `~astropy.coordinates.SkyCoord` The coordinates to convert. wcs : `~astropy.wcs.WCS` The WCS transformation to use. origin : int Whether to return 0 or 1-based pixel coordinates. mode : 'all' or 'wcs' Whether to do the transformation including distortions (``'all'``) or only including only the core WCS transformation (``'wcs'``). Returns ------- xp, yp : `numpy.ndarray` The pixel coordinates See Also -------- astropy.coordinates.SkyCoord.from_pixel """ if _has_distortion(wcs) and wcs.naxis != 2: raise ValueError("Can only handle WCS with distortions for 2-dimensional WCS") # Keep only the celestial part of the axes, also re-orders lon/lat wcs = wcs.sub([WCSSUB_LONGITUDE, WCSSUB_LATITUDE]) if wcs.naxis != 2: raise ValueError("WCS should contain celestial component") # Check which frame the WCS uses frame = wcs_to_celestial_frame(wcs) # Check what unit the WCS needs xw_unit = u.Unit(wcs.wcs.cunit[0]) yw_unit = u.Unit(wcs.wcs.cunit[1]) # Convert positions to frame coords = coords.transform_to(frame) # Extract longitude and latitude. We first try and use lon/lat directly, # but if the representation is not spherical or unit spherical this will # fail. We should then force the use of the unit spherical # representation. We don't do that directly to make sure that we preserve # custom lon/lat representations if available. try: lon = coords.data.lon.to(xw_unit) lat = coords.data.lat.to(yw_unit) except AttributeError: lon = coords.spherical.lon.to(xw_unit) lat = coords.spherical.lat.to(yw_unit) # Convert to pixel coordinates if mode == "all": xp, yp = wcs.all_world2pix(lon.value, lat.value, origin) elif mode == "wcs": xp, yp = wcs.wcs_world2pix(lon.value, lat.value, origin) else: raise ValueError("mode should be either 'all' or 'wcs'") return xp, yp
Convert a set of pixel coordinates into a `~astropy.coordinates.SkyCoord` coordinate. Parameters ---------- xp, yp : float or ndarray The coordinates to convert. wcs : `~astropy.wcs.WCS` The WCS transformation to use. origin : int Whether to return 0 or 1-based pixel coordinates. mode : 'all' or 'wcs' Whether to do the transformation including distortions (``'all'``) or only including only the core WCS transformation (``'wcs'``). cls : class or None The class of object to create. Should be a `~astropy.coordinates.SkyCoord` subclass. If None, defaults to `~astropy.coordinates.SkyCoord`. Returns ------- coords : `~astropy.coordinates.SkyCoord` subclass The celestial coordinates. Whatever ``cls`` type is. See Also -------- astropy.coordinates.SkyCoord.from_pixel
def pixel_to_skycoord(xp, yp, wcs, origin=0, mode="all", cls=None): """ Convert a set of pixel coordinates into a `~astropy.coordinates.SkyCoord` coordinate. Parameters ---------- xp, yp : float or ndarray The coordinates to convert. wcs : `~astropy.wcs.WCS` The WCS transformation to use. origin : int Whether to return 0 or 1-based pixel coordinates. mode : 'all' or 'wcs' Whether to do the transformation including distortions (``'all'``) or only including only the core WCS transformation (``'wcs'``). cls : class or None The class of object to create. Should be a `~astropy.coordinates.SkyCoord` subclass. If None, defaults to `~astropy.coordinates.SkyCoord`. Returns ------- coords : `~astropy.coordinates.SkyCoord` subclass The celestial coordinates. Whatever ``cls`` type is. See Also -------- astropy.coordinates.SkyCoord.from_pixel """ # Import astropy.coordinates here to avoid circular imports from astropy.coordinates import SkyCoord, UnitSphericalRepresentation # we have to do this instead of actually setting the default to SkyCoord # because importing SkyCoord at the module-level leads to circular # dependencies. if cls is None: cls = SkyCoord if _has_distortion(wcs) and wcs.naxis != 2: raise ValueError("Can only handle WCS with distortions for 2-dimensional WCS") # Keep only the celestial part of the axes, also re-orders lon/lat wcs = wcs.sub([WCSSUB_LONGITUDE, WCSSUB_LATITUDE]) if wcs.naxis != 2: raise ValueError("WCS should contain celestial component") # Check which frame the WCS uses frame = wcs_to_celestial_frame(wcs) # Check what unit the WCS gives lon_unit = u.Unit(wcs.wcs.cunit[0]) lat_unit = u.Unit(wcs.wcs.cunit[1]) # Convert pixel coordinates to celestial coordinates if mode == "all": lon, lat = wcs.all_pix2world(xp, yp, origin) elif mode == "wcs": lon, lat = wcs.wcs_pix2world(xp, yp, origin) else: raise ValueError("mode should be either 'all' or 'wcs'") # Add units to longitude/latitude lon = lon * lon_unit lat = lat * lat_unit # Create a SkyCoord-like object data = UnitSphericalRepresentation(lon=lon, lat=lat) coords = cls(frame.realize_frame(data)) return coords