hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
1fbd748eeec641153888a7435a55159f60c46a242d382b8ce916559b050a7eb5 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import collections
from collections import OrderedDict
from operator import index as operator_index
import numpy as np
class Row:
"""A class to represent one row of a Table object.
A Row object is returned when a Table object is indexed with an integer
or when iterating over a table::
>>> from astropy.table import Table
>>> table = Table([(1, 2), (3, 4)], names=('a', 'b'),
... dtype=('int32', 'int32'))
>>> row = table[1]
>>> row
<Row index=1>
a b
int32 int32
----- -----
2 4
>>> row['a']
2
>>> row[1]
4
"""
def __init__(self, table, index):
# Ensure that the row index is a valid index (int)
index = operator_index(index)
n = len(table)
if index < -n or index >= n:
raise IndexError('index {0} out of range for table with length {1}'
.format(index, len(table)))
# Finally, ensure the index is positive [#8422] and set Row attributes
self._index = index % n
self._table = table
def __getitem__(self, item):
try:
# Try the most common use case of accessing a single column in the Row.
# Bypass the TableColumns __getitem__ since that does more testing
# and allows a list of tuple or str, which is not the right thing here.
out = OrderedDict.__getitem__(self._table.columns, item)[self._index]
except (KeyError, TypeError):
if self._table._is_list_or_tuple_of_str(item):
cols = [self._table[name] for name in item]
out = self._table.__class__(cols, copy=False)[self._index]
else:
# This is only to raise an exception
out = self._table.columns[item][self._index]
return out
def __setitem__(self, item, val):
if self._table._is_list_or_tuple_of_str(item):
self._table._set_row(self._index, colnames=item, vals=val)
else:
self._table.columns[item][self._index] = val
def _ipython_key_completions_(self):
return self.colnames
def __eq__(self, other):
if self._table.masked:
# Sent bug report to numpy-discussion group on 2012-Oct-21, subject:
# "Comparing rows in a structured masked array raises exception"
# No response, so this is still unresolved.
raise ValueError('Unable to compare rows for masked table due to numpy.ma bug')
return self.as_void() == other
def __ne__(self, other):
if self._table.masked:
raise ValueError('Unable to compare rows for masked table due to numpy.ma bug')
return self.as_void() != other
def __array__(self, dtype=None):
"""Support converting Row to np.array via np.array(table).
Coercion to a different dtype via np.array(table, dtype) is not
supported and will raise a ValueError.
If the parent table is masked then the mask information is dropped.
"""
if dtype is not None:
raise ValueError('Datatype coercion is not allowed')
return np.asarray(self.as_void())
def __len__(self):
return len(self._table.columns)
def __iter__(self):
index = self._index
for col in self._table.columns.values():
yield col[index]
@property
def table(self):
return self._table
@property
def index(self):
return self._index
def as_void(self):
"""
Returns a *read-only* copy of the row values in the form of np.void or
np.ma.mvoid objects. This corresponds to the object types returned for
row indexing of a pure numpy structured array or masked array. This
method is slow and its use is discouraged when possible.
Returns
-------
void_row : np.void (unmasked) or np.ma.mvoid (masked)
Copy of row values
"""
index = self._index
cols = self._table.columns.values()
vals = tuple(np.asarray(col)[index] for col in cols)
if self._table.masked:
# The logic here is a little complicated to work around
# bug in numpy < 1.8 (numpy/numpy#483). Need to build up
# a np.ma.mvoid object by hand.
from .table import descr
# Make np.void version of masks. Use the table dtype but
# substitute bool for data type
masks = tuple(col.mask[index] if hasattr(col, 'mask') else False
for col in cols)
descrs = (descr(col) for col in cols)
mask_dtypes = [(name, bool, shape) for name, type_, shape in descrs]
row_mask = np.array([masks], dtype=mask_dtypes)[0]
# Make np.void version of values, and then the final mvoid row
row_vals = np.array([vals], dtype=self.dtype)[0]
void_row = np.ma.mvoid(data=row_vals, mask=row_mask)
else:
void_row = np.array([vals], dtype=self.dtype)[0]
return void_row
@property
def meta(self):
return self._table.meta
@property
def columns(self):
return self._table.columns
@property
def colnames(self):
return self._table.colnames
@property
def dtype(self):
return self._table.dtype
def _base_repr_(self, html=False):
"""
Display row as a single-line table but with appropriate header line.
"""
index = self.index if (self.index >= 0) else self.index + len(self._table)
table = self._table[index:index + 1]
descr_vals = [self.__class__.__name__,
'index={0}'.format(self.index)]
if table.masked:
descr_vals.append('masked=True')
return table._base_repr_(html, descr_vals, max_width=-1,
tableid='table{0}'.format(id(self._table)))
def _repr_html_(self):
return self._base_repr_(html=True)
def __repr__(self):
return self._base_repr_(html=False)
def __str__(self):
index = self.index if (self.index >= 0) else self.index + len(self._table)
return '\n'.join(self.table[index:index + 1].pformat(max_width=-1))
def __bytes__(self):
return str(self).encode('utf-8')
collections.abc.Sequence.register(Row)
|
822859fb9649f71241b45e07077a08d012d8f902d9a2ddc42ec44013a76182bd | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import platform
import warnings
import numpy as np
from .index import get_index_by_names
from astropy.utils.exceptions import AstropyUserWarning
__all__ = ['TableGroups', 'ColumnGroups']
def table_group_by(table, keys):
# index copies are unnecessary and slow down _table_group_by
with table.index_mode('discard_on_copy'):
return _table_group_by(table, keys)
def _table_group_by(table, keys):
"""
Get groups for ``table`` on specified ``keys``.
Parameters
----------
table : `Table`
Table to group
keys : str, list of str, `Table`, or Numpy array
Grouping key specifier
Returns
-------
grouped_table : Table object with groups attr set accordingly
"""
from .table import Table
from .serialize import represent_mixins_as_columns
# Pre-convert string to tuple of strings, or Table to the underlying structured array
if isinstance(keys, str):
keys = (keys,)
if isinstance(keys, (list, tuple)):
for name in keys:
if name not in table.colnames:
raise ValueError('Table does not have key column {0!r}'.format(name))
if table.masked and np.any(table[name].mask):
raise ValueError('Missing values in key column {0!r} are not allowed'.format(name))
# Make a column slice of the table without copying
table_keys = table.__class__([table[key] for key in keys], copy=False)
# If available get a pre-existing index for these columns
table_index = get_index_by_names(table, keys)
grouped_by_table_cols = True
elif isinstance(keys, (np.ndarray, Table)):
table_keys = keys
if len(table_keys) != len(table):
raise ValueError('Input keys array length {0} does not match table length {1}'
.format(len(table_keys), len(table)))
table_index = None
grouped_by_table_cols = False
else:
raise TypeError('Keys input must be string, list, tuple, Table or numpy array, but got {0}'
.format(type(keys)))
# If there is not already an available index and table_keys is a Table then ensure
# that all cols (including mixins) are in a form that can sorted with the code below.
if not table_index and isinstance(table_keys, Table):
table_keys = represent_mixins_as_columns(table_keys)
# Get the argsort index `idx_sort`, accounting for particulars
try:
# take advantage of index internal sort if possible
if table_index is not None:
idx_sort = table_index.sorted_data()
else:
idx_sort = table_keys.argsort(kind='mergesort')
stable_sort = True
except TypeError:
# Some versions (likely 1.6 and earlier) of numpy don't support
# 'mergesort' for all data types. MacOSX (Darwin) doesn't have a stable
# sort by default, nor does Windows, while Linux does (or appears to).
idx_sort = table_keys.argsort()
stable_sort = platform.system() not in ('Darwin', 'Windows')
# Finally do the actual sort of table_keys values
table_keys = table_keys[idx_sort]
# Get all keys
diffs = np.concatenate(([True], table_keys[1:] != table_keys[:-1], [True]))
indices = np.flatnonzero(diffs)
# If the sort is not stable (preserves original table order) then sort idx_sort in
# place within each group.
if not stable_sort:
for i0, i1 in zip(indices[:-1], indices[1:]):
idx_sort[i0:i1].sort()
# Make a new table and set the _groups to the appropriate TableGroups object.
# Take the subset of the original keys at the indices values (group boundaries).
out = table.__class__(table[idx_sort])
out_keys = table_keys[indices[:-1]]
if isinstance(out_keys, Table):
out_keys.meta['grouped_by_table_cols'] = grouped_by_table_cols
out._groups = TableGroups(out, indices=indices, keys=out_keys)
return out
def column_group_by(column, keys):
"""
Get groups for ``column`` on specified ``keys``
Parameters
----------
column : Column object
Column to group
keys : Table or Numpy array of same length as col
Grouping key specifier
Returns
-------
grouped_column : Column object with groups attr set accordingly
"""
from .table import Table
from .serialize import represent_mixins_as_columns
if isinstance(keys, Table):
keys = represent_mixins_as_columns(keys)
keys = keys.as_array()
if not isinstance(keys, np.ndarray):
raise TypeError('Keys input must be numpy array, but got {0}'
.format(type(keys)))
if len(keys) != len(column):
raise ValueError('Input keys array length {0} does not match column length {1}'
.format(len(keys), len(column)))
idx_sort = keys.argsort()
keys = keys[idx_sort]
# Get all keys
diffs = np.concatenate(([True], keys[1:] != keys[:-1], [True]))
indices = np.flatnonzero(diffs)
# Make a new column and set the _groups to the appropriate ColumnGroups object.
# Take the subset of the original keys at the indices values (group boundaries).
out = column.__class__(column[idx_sort])
out._groups = ColumnGroups(out, indices=indices, keys=keys[indices[:-1]])
return out
class BaseGroups:
"""
A class to represent groups within a table of heterogeneous data.
- ``keys``: key values corresponding to each group
- ``indices``: index values in parent table or column corresponding to group boundaries
- ``aggregate()``: method to create new table by aggregating within groups
"""
@property
def parent(self):
return self.parent_column if isinstance(self, ColumnGroups) else self.parent_table
def __iter__(self):
self._iter_index = 0
return self
def next(self):
ii = self._iter_index
if ii < len(self.indices) - 1:
i0, i1 = self.indices[ii], self.indices[ii + 1]
self._iter_index += 1
return self.parent[i0:i1]
else:
raise StopIteration
__next__ = next
def __getitem__(self, item):
parent = self.parent
if isinstance(item, (int, np.integer)):
i0, i1 = self.indices[item], self.indices[item + 1]
out = parent[i0:i1]
out.groups._keys = parent.groups.keys[item]
else:
indices0, indices1 = self.indices[:-1], self.indices[1:]
try:
i0s, i1s = indices0[item], indices1[item]
except Exception:
raise TypeError('Index item for groups attribute must be a slice, '
'numpy mask or int array')
mask = np.zeros(len(parent), dtype=bool)
# Is there a way to vectorize this in numpy?
for i0, i1 in zip(i0s, i1s):
mask[i0:i1] = True
out = parent[mask]
out.groups._keys = parent.groups.keys[item]
out.groups._indices = np.concatenate([[0], np.cumsum(i1s - i0s)])
return out
def __repr__(self):
return '<{0} indices={1}>'.format(self.__class__.__name__, self.indices)
def __len__(self):
return len(self.indices) - 1
class ColumnGroups(BaseGroups):
def __init__(self, parent_column, indices=None, keys=None):
self.parent_column = parent_column # parent Column
self.parent_table = parent_column.parent_table
self._indices = indices
self._keys = keys
@property
def indices(self):
# If the parent column is in a table then use group indices from table
if self.parent_table:
return self.parent_table.groups.indices
else:
if self._indices is None:
return np.array([0, len(self.parent_column)])
else:
return self._indices
@property
def keys(self):
# If the parent column is in a table then use group indices from table
if self.parent_table:
return self.parent_table.groups.keys
else:
return self._keys
def aggregate(self, func):
from .column import MaskedColumn
i0s, i1s = self.indices[:-1], self.indices[1:]
par_col = self.parent_column
masked = isinstance(par_col, MaskedColumn)
reduceat = hasattr(func, 'reduceat')
sum_case = func is np.sum
mean_case = func is np.mean
try:
if not masked and (reduceat or sum_case or mean_case):
if mean_case:
vals = np.add.reduceat(par_col, i0s) / np.diff(self.indices)
else:
if sum_case:
func = np.add
vals = func.reduceat(par_col, i0s)
else:
vals = np.array([func(par_col[i0: i1]) for i0, i1 in zip(i0s, i1s)])
except Exception:
raise TypeError("Cannot aggregate column '{0}' with type '{1}'"
.format(par_col.info.name,
par_col.info.dtype))
out = par_col.__class__(data=vals,
name=par_col.info.name,
description=par_col.info.description,
unit=par_col.info.unit,
format=par_col.info.format,
meta=par_col.info.meta)
return out
def filter(self, func):
"""
Filter groups in the Column based on evaluating function ``func`` on each
group sub-table.
The function which is passed to this method must accept one argument:
- ``column`` : `Column` object
It must then return either `True` or `False`. As an example, the following
will select all column groups with only positive values::
def all_positive(column):
if np.any(column < 0):
return False
return True
Parameters
----------
func : function
Filter function
Returns
-------
out : Column
New column with the aggregated rows.
"""
mask = np.empty(len(self), dtype=bool)
for i, group_column in enumerate(self):
mask[i] = func(group_column)
return self[mask]
class TableGroups(BaseGroups):
def __init__(self, parent_table, indices=None, keys=None):
self.parent_table = parent_table # parent Table
self._indices = indices
self._keys = keys
@property
def key_colnames(self):
"""
Return the names of columns in the parent table that were used for grouping.
"""
# If the table was grouped by key columns *in* the table then treat those columns
# differently in aggregation. In this case keys will be a Table with
# keys.meta['grouped_by_table_cols'] == True. Keys might not be a Table so we
# need to handle this.
grouped_by_table_cols = getattr(self.keys, 'meta', {}).get('grouped_by_table_cols', False)
return self.keys.colnames if grouped_by_table_cols else ()
@property
def indices(self):
if self._indices is None:
return np.array([0, len(self.parent_table)])
else:
return self._indices
def aggregate(self, func):
"""
Aggregate each group in the Table into a single row by applying the reduction
function ``func`` to group values in each column.
Parameters
----------
func : function
Function that reduces an array of values to a single value
Returns
-------
out : Table
New table with the aggregated rows.
"""
i0s, i1s = self.indices[:-1], self.indices[1:]
out_cols = []
parent_table = self.parent_table
for col in parent_table.columns.values():
# For key columns just pick off first in each group since they are identical
if col.info.name in self.key_colnames:
new_col = col.take(i0s)
else:
try:
new_col = col.groups.aggregate(func)
except TypeError as err:
warnings.warn(str(err), AstropyUserWarning)
continue
out_cols.append(new_col)
return parent_table.__class__(out_cols, meta=parent_table.meta)
def filter(self, func):
"""
Filter groups in the Table based on evaluating function ``func`` on each
group sub-table.
The function which is passed to this method must accept two arguments:
- ``table`` : `Table` object
- ``key_colnames`` : tuple of column names in ``table`` used as keys for grouping
It must then return either `True` or `False`. As an example, the following
will select all table groups with only positive values in the non-key columns::
def all_positive(table, key_colnames):
colnames = [name for name in table.colnames if name not in key_colnames]
for colname in colnames:
if np.any(table[colname] < 0):
return False
return True
Parameters
----------
func : function
Filter function
Returns
-------
out : Table
New table with the aggregated rows.
"""
mask = np.empty(len(self), dtype=bool)
key_colnames = self.key_colnames
for i, group_table in enumerate(self):
mask[i] = func(group_table, key_colnames)
return self[mask]
@property
def keys(self):
return self._keys
|
fd48aa22753dc157fb44c60364198ea2e5fc25b00a40e81cf6bb3b3fe6951d8e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from .index import TableIndices, TableLoc, TableILoc, TableLocIndices
import sys
from collections import OrderedDict
from collections.abc import Mapping
import warnings
from copy import deepcopy
import numpy as np
from numpy import ma
from astropy import log
from astropy.units import Quantity, QuantityInfo
from astropy.utils import isiterable, ShapedLikeNDArray
from astropy.utils.console import color_print
from astropy.utils.metadata import MetaData
from astropy.utils.data_info import BaseColumnInfo, MixinInfo, ParentDtypeInfo, DataInfo
from astropy.utils.decorators import format_doc
from astropy.utils.exceptions import AstropyDeprecationWarning, NoValue
from astropy.io.registry import UnifiedReadWriteMethod
from . import groups
from .pprint import TableFormatter
from .column import (BaseColumn, Column, MaskedColumn, _auto_names, FalseArray,
col_copy)
from .row import Row
from .np_utils import fix_column_name, recarray_fromrecords
from .info import TableInfo
from .index import Index, _IndexModeContext, get_index
from .connect import TableRead, TableWrite
from . import conf
__doctest_skip__ = ['Table.read', 'Table.write', 'Table._read',
'Table.convert_bytestring_to_unicode',
'Table.convert_unicode_to_bytestring',
]
__doctest_requires__ = {'*pandas': ['pandas']}
_pprint_docs = """
{__doc__}
Parameters
----------
max_lines : int or `None`
Maximum number of lines in table output.
max_width : int or `None`
Maximum character width of output.
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is True.
align : str or list or tuple or `None`
Left/right alignment of columns. Default is right (None) for all
columns. Other allowed values are '>', '<', '^', and '0=' for
right, left, centered, and 0-padded, respectively. A list of
strings can be provided for alignment of tables with multiple
columns.
"""
_pformat_docs = """
{__doc__}
Parameters
----------
max_lines : int or `None`
Maximum number of rows to output
max_width : int or `None`
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is True.
html : bool
Format the output as an HTML table. Default is False.
tableid : str or `None`
An ID tag for the table; only used if html is set. Default is
"table{id}", where id is the unique integer id of the table object,
id(self)
align : str or list or tuple or `None`
Left/right alignment of columns. Default is right (None) for all
columns. Other allowed values are '>', '<', '^', and '0=' for
right, left, centered, and 0-padded, respectively. A list of
strings can be provided for alignment of tables with multiple
columns.
tableclass : str or list of str or `None`
CSS classes for the table; only used if html is set. Default is
None.
Returns
-------
lines : list
Formatted table as a list of strings.
"""
class TableReplaceWarning(UserWarning):
"""
Warning class for cases when a table column is replaced via the
Table.__setitem__ syntax e.g. t['a'] = val.
This does not inherit from AstropyWarning because we want to use
stacklevel=3 to show the user where the issue occurred in their code.
"""
pass
def descr(col):
"""Array-interface compliant full description of a column.
This returns a 3-tuple (name, type, shape) that can always be
used in a structured array dtype definition.
"""
col_dtype = 'O' if (col.info.dtype is None) else col.info.dtype
col_shape = col.shape[1:] if hasattr(col, 'shape') else ()
return (col.info.name, col_dtype, col_shape)
def has_info_class(obj, cls):
return hasattr(obj, 'info') and isinstance(obj.info, cls)
# Note to future maintainers: when transitioning this to dict
# be sure to change the OrderedDict ref(s) in Row and in __len__().
class TableColumns(OrderedDict):
"""OrderedDict subclass for a set of columns.
This class enhances item access to provide convenient access to columns
by name or index, including slice access. It also handles renaming
of columns.
The initialization argument ``cols`` can be a list of ``Column`` objects
or any structure that is valid for initializing a Python dict. This
includes a dict, list of (key, val) tuples or [key, val] lists, etc.
Parameters
----------
cols : dict, list, tuple; optional
Column objects as data structure that can init dict (see above)
"""
def __init__(self, cols={}):
if isinstance(cols, (list, tuple)):
# `cols` should be a list of two-tuples, but it is allowed to have
# columns (BaseColumn or mixins) in the list.
newcols = []
for col in cols:
if has_info_class(col, BaseColumnInfo):
newcols.append((col.info.name, col))
else:
newcols.append(col)
cols = newcols
super().__init__(cols)
def __getitem__(self, item):
"""Get items from a TableColumns object.
::
tc = TableColumns(cols=[Column(name='a'), Column(name='b'), Column(name='c')])
tc['a'] # Column('a')
tc[1] # Column('b')
tc['a', 'b'] # <TableColumns names=('a', 'b')>
tc[1:3] # <TableColumns names=('b', 'c')>
"""
if isinstance(item, str):
return OrderedDict.__getitem__(self, item)
elif isinstance(item, (int, np.integer)):
return self.values()[item]
elif (isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == 'i'):
return self.values()[item.item()]
elif isinstance(item, tuple):
return self.__class__([self[x] for x in item])
elif isinstance(item, slice):
return self.__class__([self[x] for x in list(self)[item]])
else:
raise IndexError('Illegal key or index value for {} object'
.format(self.__class__.__name__))
def __setitem__(self, item, value):
if item in self:
raise ValueError("Cannot replace column '{0}'. Use Table.replace_column() instead."
.format(item))
super().__setitem__(item, value)
def __repr__(self):
names = ("'{0}'".format(x) for x in self.keys())
return "<{1} names=({0})>".format(",".join(names), self.__class__.__name__)
def _rename_column(self, name, new_name):
if name == new_name:
return
if new_name in self:
raise KeyError("Column {0} already exists".format(new_name))
mapper = {name: new_name}
new_names = [mapper.get(name, name) for name in self]
cols = list(self.values())
self.clear()
self.update(list(zip(new_names, cols)))
# Define keys and values for Python 2 and 3 source compatibility
def keys(self):
return list(OrderedDict.keys(self))
def values(self):
return list(OrderedDict.values(self))
def isinstance(self, cls):
"""
Return a list of columns which are instances of the specified classes.
Parameters
----------
cls : class or tuple of classes
Column class (including mixin) or tuple of Column classes.
Returns
-------
col_list : list of Columns
List of Column objects which are instances of given classes.
"""
cols = [col for col in self.values() if isinstance(col, cls)]
return cols
def not_isinstance(self, cls):
"""
Return a list of columns which are not instances of the specified classes.
Parameters
----------
cls : class or tuple of classes
Column class (including mixin) or tuple of Column classes.
Returns
-------
col_list : list of Columns
List of Column objects which are not instances of given classes.
"""
cols = [col for col in self.values() if not isinstance(col, cls)]
return cols
class TableReadWrite:
def __get__(self, instance, owner_cls):
if instance is None:
# This is an unbound descriptor on the class
info = self
info._parent_cls = owner_cls
else:
info = instance.__dict__.get('info')
if info is None:
info = instance.__dict__['info'] = self.__class__(bound=True)
info._parent = instance
return info
class Table:
"""A class to represent tables of heterogeneous data.
`~astropy.table.Table` provides a class for heterogeneous tabular data,
making use of a `numpy` structured array internally to store the data
values. A key enhancement provided by the `~astropy.table.Table` class is
the ability to easily modify the structure of the table by adding or
removing columns, or adding new rows of data. In addition table and column
metadata are fully supported.
`~astropy.table.Table` differs from `~astropy.nddata.NDData` by the
assumption that the input data consists of columns of homogeneous data,
where each column has a unique identifier and may contain additional
metadata such as the data unit, format, and description.
See also: http://docs.astropy.org/en/stable/table/
Parameters
----------
data : numpy ndarray, dict, list, Table, or table-like object, optional
Data to initialize table.
masked : bool, optional
Specify whether the table is masked.
names : list, optional
Specify column names.
dtype : list, optional
Specify column data types.
meta : dict, optional
Metadata associated with the table.
copy : bool, optional
Copy the input data. If the input is a Table the ``meta`` is always
copied regardless of the ``copy`` parameter.
Default is True.
rows : numpy ndarray, list of lists, optional
Row-oriented data for table instead of ``data`` argument.
copy_indices : bool, optional
Copy any indices in the input data. Default is True.
**kwargs : dict, optional
Additional keyword args when converting table-like object.
"""
meta = MetaData(copy=False)
# Define class attributes for core container objects to allow for subclass
# customization.
Row = Row
Column = Column
MaskedColumn = MaskedColumn
TableColumns = TableColumns
TableFormatter = TableFormatter
# Unified I/O read and write methods from .connect
read = UnifiedReadWriteMethod(TableRead)
write = UnifiedReadWriteMethod(TableWrite)
def as_array(self, keep_byteorder=False, names=None):
"""
Return a new copy of the table in the form of a structured np.ndarray or
np.ma.MaskedArray object (as appropriate).
Parameters
----------
keep_byteorder : bool, optional
By default the returned array has all columns in native byte
order. However, if this option is `True` this preserves the
byte order of all columns (if any are non-native).
names : list, optional:
List of column names to include for returned structured array.
Default is to include all table columns.
Returns
-------
table_array : np.ndarray (unmasked) or np.ma.MaskedArray (masked)
Copy of table as a numpy structured array
"""
empty_init = ma.empty if self.masked else np.empty
if len(self.columns) == 0:
return empty_init(0, dtype=None)
sys_byteorder = ('>', '<')[sys.byteorder == 'little']
native_order = ('=', sys_byteorder)
dtype = []
cols = self.columns.values()
if names != None:
cols = [col for col in cols if col.info.name in names]
for col in cols:
col_descr = descr(col)
byteorder = col.info.dtype.byteorder
if not keep_byteorder and byteorder not in native_order:
new_dt = np.dtype(col_descr[1]).newbyteorder('=')
col_descr = (col_descr[0], new_dt, col_descr[2])
dtype.append(col_descr)
data = empty_init(len(self), dtype=dtype)
for col in cols:
# When assigning from one array into a field of a structured array,
# Numpy will automatically swap those columns to their destination
# byte order where applicable
data[col.info.name] = col
return data
def __init__(self, data=None, masked=None, names=None, dtype=None,
meta=None, copy=True, rows=None, copy_indices=True,
**kwargs):
# Set up a placeholder empty table
self._set_masked(masked)
self.columns = self.TableColumns()
self.formatter = self.TableFormatter()
self._copy_indices = True # copy indices from this Table by default
self._init_indices = copy_indices # whether to copy indices in init
self.primary_key = None
# Must copy if dtype are changing
if not copy and dtype is not None:
raise ValueError('Cannot specify dtype when copy=False')
# Row-oriented input, e.g. list of lists or list of tuples, list of
# dict, Row instance. Set data to something that the subsequent code
# will parse correctly.
is_list_of_dict = False
if rows is not None:
if data is not None:
raise ValueError('Cannot supply both `data` and `rows` values')
if all(isinstance(row, dict) for row in rows):
is_list_of_dict = True # Avoid doing the all(...) test twice.
data = rows
elif isinstance(rows, self.Row):
data = rows
else:
rec_data = recarray_fromrecords(rows)
data = [rec_data[name] for name in rec_data.dtype.names]
# Infer the type of the input data and set up the initialization
# function, number of columns, and potentially the default col names
default_names = None
if hasattr(data, '__astropy_table__'):
# Data object implements the __astropy_table__ interface method.
# Calling that method returns an appropriate instance of
# self.__class__ and respects the `copy` arg. The returned
# Table object should NOT then be copied.
data = data.__astropy_table__(self.__class__, copy, **kwargs)
copy = False
elif kwargs:
raise TypeError('__init__() got unexpected keyword argument {!r}'
.format(list(kwargs.keys())[0]))
if (isinstance(data, np.ndarray) and
data.shape == (0,) and
not data.dtype.names):
data = None
if isinstance(data, self.Row):
data = data._table[data._index:data._index + 1]
if isinstance(data, (list, tuple)):
init_func = self._init_from_list
if data and (is_list_of_dict or all(isinstance(row, dict) for row in data)):
n_cols = len(data[0])
else:
n_cols = len(data)
elif isinstance(data, np.ndarray):
if data.dtype.names:
init_func = self._init_from_ndarray # _struct
n_cols = len(data.dtype.names)
default_names = data.dtype.names
else:
init_func = self._init_from_ndarray # _homog
if data.shape == ():
raise ValueError('Can not initialize a Table with a scalar')
elif len(data.shape) == 1:
data = data[np.newaxis, :]
n_cols = data.shape[1]
elif isinstance(data, Mapping):
init_func = self._init_from_dict
default_names = list(data)
n_cols = len(default_names)
elif isinstance(data, Table):
# If user-input meta is None then use data.meta (if non-trivial)
if meta is None and data.meta:
# At this point do NOT deepcopy data.meta as this will happen after
# table init_func() is called. But for table input the table meta
# gets a key copy here if copy=False because later a direct object ref
# is used.
meta = data.meta if copy else data.meta.copy()
# Handle indices on input table. Copy primary key and don't copy indices
# if the input Table is in non-copy mode.
self.primary_key = data.primary_key
self._init_indices = self._init_indices and data._copy_indices
# Extract default names, n_cols, and then overwrite ``data`` to be the
# table columns so we can use _init_from_list.
default_names = data.colnames
n_cols = len(default_names)
data = list(data.columns.values())
init_func = self._init_from_list
elif data is None:
if names is None:
if dtype is None:
if meta is not None:
self.meta = deepcopy(meta) if copy else meta
return
try:
# No data nor names but dtype is available. This must be
# valid to initialize a structured array.
dtype = np.dtype(dtype)
names = dtype.names
dtype = [dtype[name] for name in names]
except Exception:
raise ValueError('dtype was specified but could not be '
'parsed for column names')
# names is guaranteed to be set at this point
init_func = self._init_from_list
n_cols = len(names)
data = [[]] * n_cols
else:
raise ValueError('Data type {0} not allowed to init Table'
.format(type(data)))
# Set up defaults if names and/or dtype are not specified.
# A value of None means the actual value will be inferred
# within the appropriate initialization routine, either from
# existing specification or auto-generated.
if names is None:
names = default_names or [None] * n_cols
if dtype is None:
dtype = [None] * n_cols
# Numpy does not support bytes column names on Python 3, so fix them
# up now.
names = [fix_column_name(name) for name in names]
self._check_names_dtype(names, dtype, n_cols)
# Finally do the real initialization
init_func(data, names, dtype, n_cols, copy)
# Set table meta. If copy=True then deepcopy meta otherwise use the
# user-supplied meta directly.
if meta is not None:
self.meta = deepcopy(meta) if copy else meta
# Whatever happens above, the masked property should be set to a boolean
if type(self.masked) is not bool:
raise TypeError("masked property has not been set to True or False")
def __getstate__(self):
columns = OrderedDict((key, col if isinstance(col, BaseColumn) else col_copy(col))
for key, col in self.columns.items())
return (columns, self.meta)
def __setstate__(self, state):
columns, meta = state
self.__init__(columns, meta=meta)
@property
def mask(self):
# Dynamic view of available masks
if self.masked:
mask_table = Table([col.mask for col in self.columns.values()],
names=self.colnames, copy=False)
# Set hidden attribute to force inplace setitem so that code like
# t.mask['a'] = [1, 0, 1] will correctly set the underlying mask.
# See #5556 for discussion.
mask_table._setitem_inplace = True
else:
mask_table = None
return mask_table
@mask.setter
def mask(self, val):
self.mask[:] = val
@property
def _mask(self):
"""This is needed so that comparison of a masked Table and a
MaskedArray works. The requirement comes from numpy.ma.core
so don't remove this property."""
return self.as_array().mask
def filled(self, fill_value=None):
"""Return copy of self, with masked values filled.
If input ``fill_value`` supplied then that value is used for all
masked entries in the table. Otherwise the individual
``fill_value`` defined for each table column is used.
Parameters
----------
fill_value : str
If supplied, this ``fill_value`` is used for all masked entries
in the entire table.
Returns
-------
filled_table : Table
New table with masked values filled
"""
if self.masked:
# Get new columns with masked values filled, then create Table with those
# new cols (copy=False) but deepcopy the meta.
data = [col.filled(fill_value) for col in self.columns.values()]
return self.__class__(data, meta=deepcopy(self.meta), copy=False)
else:
# Return copy of the original object.
return self.copy()
@property
def indices(self):
'''
Return the indices associated with columns of the table
as a TableIndices object.
'''
lst = []
for column in self.columns.values():
for index in column.info.indices:
if sum([index is x for x in lst]) == 0: # ensure uniqueness
lst.append(index)
return TableIndices(lst)
@property
def loc(self):
'''
Return a TableLoc object that can be used for retrieving
rows by index in a given data range. Note that both loc
and iloc work only with single-column indices.
'''
return TableLoc(self)
@property
def loc_indices(self):
"""
Return a TableLocIndices object that can be used for retrieving
the row indices corresponding to given table index key value or values.
"""
return TableLocIndices(self)
@property
def iloc(self):
'''
Return a TableILoc object that can be used for retrieving
indexed rows in the order they appear in the index.
'''
return TableILoc(self)
def add_index(self, colnames, engine=None, unique=False):
'''
Insert a new index among one or more columns.
If there are no indices, make this index the
primary table index.
Parameters
----------
colnames : str or list
List of column names (or a single column name) to index
engine : type or None
Indexing engine class to use, from among SortedArray, BST,
FastBST, FastRBT, and SCEngine. If the supplied argument is None
(by default), use SortedArray.
unique : bool
Whether the values of the index must be unique. Default is False.
'''
if isinstance(colnames, str):
colnames = (colnames,)
columns = self.columns[tuple(colnames)].values()
# make sure all columns support indexing
for col in columns:
if not getattr(col.info, '_supports_indexing', False):
raise ValueError('Cannot create an index on column "{0}", of '
'type "{1}"'.format(col.info.name, type(col)))
index = Index(columns, engine=engine, unique=unique)
if not self.indices:
self.primary_key = colnames
for col in columns:
col.info.indices.append(index)
def remove_indices(self, colname):
'''
Remove all indices involving the given column.
If the primary index is removed, the new primary
index will be the most recently added remaining
index.
Parameters
----------
colname : str
Name of column
'''
col = self.columns[colname]
for index in self.indices:
try:
index.col_position(col.info.name)
except ValueError:
pass
else:
for c in index.columns:
c.info.indices.remove(index)
def index_mode(self, mode):
'''
Return a context manager for an indexing mode.
Parameters
----------
mode : str
Either 'freeze', 'copy_on_getitem', or 'discard_on_copy'.
In 'discard_on_copy' mode,
indices are not copied whenever columns or tables are copied.
In 'freeze' mode, indices are not modified whenever columns are
modified; at the exit of the context, indices refresh themselves
based on column values. This mode is intended for scenarios in
which one intends to make many additions or modifications in an
indexed column.
In 'copy_on_getitem' mode, indices are copied when taking column
slices as well as table slices, so col[i0:i1] will preserve
indices.
'''
return _IndexModeContext(self, mode)
def __array__(self, dtype=None):
"""Support converting Table to np.array via np.array(table).
Coercion to a different dtype via np.array(table, dtype) is not
supported and will raise a ValueError.
"""
if dtype is not None:
raise ValueError('Datatype coercion is not allowed')
# This limitation is because of the following unexpected result that
# should have made a table copy while changing the column names.
#
# >>> d = astropy.table.Table([[1,2],[3,4]])
# >>> np.array(d, dtype=[('a', 'i8'), ('b', 'i8')])
# array([(0, 0), (0, 0)],
# dtype=[('a', '<i8'), ('b', '<i8')])
return self.as_array().data if self.masked else self.as_array()
def _check_names_dtype(self, names, dtype, n_cols):
"""Make sure that names and dtype are both iterable and have
the same length as data.
"""
for inp_list, inp_str in ((dtype, 'dtype'), (names, 'names')):
if not isiterable(inp_list):
raise ValueError('{0} must be a list or None'.format(inp_str))
if len(names) != n_cols or len(dtype) != n_cols:
raise ValueError(
'Arguments "names" and "dtype" must match number of columns'
.format(inp_str))
def _set_masked_from_cols(self, cols):
if self.masked is None:
if any(isinstance(col, (MaskedColumn, ma.MaskedArray)) for col in cols):
self._set_masked(True)
else:
self._set_masked(False)
elif not self.masked:
if any(np.any(col.mask) for col in cols if isinstance(col, (MaskedColumn, ma.MaskedArray))):
self._set_masked(True)
def _init_from_list_of_dicts(self, data, names, dtype, n_cols, copy):
names_from_data = set()
for row in data:
names_from_data.update(row)
if (isinstance(data[0], OrderedDict) and
set(data[0].keys()) == names_from_data):
names_from_data = list(data[0].keys())
else:
names_from_data = sorted(names_from_data)
# Note: if set(data[0].keys()) != names_from_data, this will give an
# exception later, so NO need to catch here.
cols = {}
for name in names_from_data:
cols[name] = []
for i, row in enumerate(data):
try:
cols[name].append(row[name])
except KeyError:
raise ValueError('Row {0} has no value for column {1}'.format(i, name))
if all(name is None for name in names):
names = names_from_data
self._init_from_dict(cols, names, dtype, n_cols, copy)
return
def _init_from_list(self, data, names, dtype, n_cols, copy):
"""Initialize table from a list of columns. A column can be a
Column object, np.ndarray, mixin, or any other iterable object.
"""
if data and all(isinstance(row, dict) for row in data):
self._init_from_list_of_dicts(data, names, dtype, n_cols, copy)
return
# Set self.masked appropriately, then get class to create column instances.
self._set_masked_from_cols(data)
cols = []
def_names = _auto_names(n_cols)
for col, name, def_name, dtype in zip(data, names, def_names, dtype):
# Structured ndarray gets viewed as a mixin unless already a valid
# mixin class
if (isinstance(col, np.ndarray) and len(col.dtype) > 1 and
not self._add_as_mixin_column(col)):
col = col.view(NdarrayMixin)
if isinstance(col, (Column, MaskedColumn)):
col = self.ColumnClass(name=(name or col.info.name or def_name),
data=col, dtype=dtype,
copy=copy, copy_indices=self._init_indices)
elif self._add_as_mixin_column(col):
# Copy the mixin column attributes if they exist since the copy below
# may not get this attribute.
if copy:
col = col_copy(col, copy_indices=self._init_indices)
col.info.name = name or col.info.name or def_name
elif isinstance(col, np.ndarray) or isiterable(col):
col = self.ColumnClass(name=(name or def_name), data=col, dtype=dtype,
copy=copy, copy_indices=self._init_indices)
else:
raise ValueError('Elements in list initialization must be '
'either Column or list-like')
cols.append(col)
self._init_from_cols(cols)
def _init_from_ndarray(self, data, names, dtype, n_cols, copy):
"""Initialize table from an ndarray structured array"""
data_names = data.dtype.names or _auto_names(n_cols)
struct = data.dtype.names is not None
names = [name or data_names[i] for i, name in enumerate(names)]
cols = ([data[name] for name in data_names] if struct else
[data[:, i] for i in range(n_cols)])
# Set self.masked appropriately, then get class to create column instances.
self._set_masked_from_cols(cols)
if copy:
self._init_from_list(cols, names, dtype, n_cols, copy)
else:
dtype = [(name, col.dtype, col.shape[1:]) for name, col in zip(names, cols)]
newdata = data.view(dtype).ravel()
columns = self.TableColumns()
for name in names:
columns[name] = self.ColumnClass(name=name, data=newdata[name])
columns[name].info.parent_table = self
self.columns = columns
def _init_from_dict(self, data, names, dtype, n_cols, copy):
"""Initialize table from a dictionary of columns"""
data_list = [data[name] for name in names]
self._init_from_list(data_list, names, dtype, n_cols, copy)
def _convert_col_for_table(self, col):
"""
Make sure that all Column objects have correct class for this type of
Table. For a base Table this most commonly means setting to
MaskedColumn if the table is masked. Table subclasses like QTable
override this method.
"""
if col.__class__ is not self.ColumnClass and isinstance(col, Column):
col = self.ColumnClass(col) # copy attributes and reference data
return col
def _init_from_cols(self, cols):
"""Initialize table from a list of Column or mixin objects"""
lengths = set(len(col) for col in cols)
if len(lengths) > 1:
raise ValueError('Inconsistent data column lengths: {0}'
.format(lengths))
# Set the table masking
self._set_masked_from_cols(cols)
# Make sure that all Column-based objects have correct class. For
# plain Table this is self.ColumnClass, but for instance QTable will
# convert columns with units to a Quantity mixin.
newcols = [self._convert_col_for_table(col) for col in cols]
self._make_table_from_cols(self, newcols)
# Deduplicate indices. It may happen that after pickling or when
# initing from an existing table that column indices which had been
# references to a single index object got *copied* into an independent
# object. This results in duplicates which will cause downstream problems.
index_dict = {}
for col in self.itercols():
for i, index in enumerate(col.info.indices or []):
names = tuple(ind_col.info.name for ind_col in index.columns)
if names in index_dict:
col.info.indices[i] = index_dict[names]
else:
index_dict[names] = index
def _new_from_slice(self, slice_):
"""Create a new table as a referenced slice from self."""
table = self.__class__(masked=self.masked)
if self.meta:
table.meta = self.meta.copy() # Shallow copy for slice
table.primary_key = self.primary_key
newcols = []
for col in self.columns.values():
newcol = col[slice_]
# Note in line below, use direct attribute access to col.indices for Column
# instances instead of the generic col.info.indices. This saves about 4 usec
# per column.
if (col if isinstance(col, Column) else col.info).indices:
# TODO : as far as I can tell the only purpose of setting _copy_indices
# here is to communicate that to the initial test in `slice_indices`.
# Why isn't that just sent as an arg to the function?
col.info._copy_indices = self._copy_indices
newcol = col.info.slice_indices(newcol, slice_, len(col))
# Don't understand why this is forcing a value on the original column.
# Normally col.info does not even have a _copy_indices attribute. Tests
# still pass if this line is deleted. (Each col.info attribute access
# is expensive).
col.info._copy_indices = True
newcols.append(newcol)
self._make_table_from_cols(table, newcols, verify=False, names=self.columns.keys())
return table
@staticmethod
def _make_table_from_cols(table, cols, verify=True, names=None):
"""
Make ``table`` in-place so that it represents the given list of ``cols``.
"""
if names is None:
names = [col.info.name for col in cols]
# Note: we do not test for len(names) == len(cols) if names is not None. In that
# case the function is being called by from "trusted" source (e.g. right above here)
# that is assumed to provide valid inputs. In that case verify=False.
if verify:
if None in names:
raise TypeError('Cannot have None for column name')
if len(set(names)) != len(names):
raise ValueError('Duplicate column names')
columns = table.TableColumns((name, col) for name, col in zip(names, cols))
for col in cols:
# For Column instances it is much faster to do direct attribute access
# instead of going through .info
col_info = col if isinstance(col, Column) else col.info
col_info.parent_table = table
if table.masked and not hasattr(col, 'mask'):
col.mask = FalseArray(col.shape)
table.columns = columns
def itercols(self):
"""
Iterate over the columns of this table.
Examples
--------
To iterate over the columns of a table::
>>> t = Table([[1], [2]])
>>> for col in t.itercols():
... print(col)
col0
----
1
col1
----
2
Using ``itercols()`` is similar to ``for col in t.columns.values()``
but is syntactically preferred.
"""
for colname in self.columns:
yield self[colname]
def _base_repr_(self, html=False, descr_vals=None, max_width=None,
tableid=None, show_dtype=True, max_lines=None,
tableclass=None):
if descr_vals is None:
descr_vals = [self.__class__.__name__]
if self.masked:
descr_vals.append('masked=True')
descr_vals.append('length={0}'.format(len(self)))
descr = ' '.join(descr_vals)
if html:
from astropy.utils.xml.writer import xml_escape
descr = '<i>{0}</i>\n'.format(xml_escape(descr))
else:
descr = '<{0}>\n'.format(descr)
if tableid is None:
tableid = 'table{id}'.format(id=id(self))
data_lines, outs = self.formatter._pformat_table(
self, tableid=tableid, html=html, max_width=max_width,
show_name=True, show_unit=None, show_dtype=show_dtype,
max_lines=max_lines, tableclass=tableclass)
out = descr + '\n'.join(data_lines)
return out
def _repr_html_(self):
return self._base_repr_(html=True, max_width=-1,
tableclass=conf.default_notebook_table_class)
def __repr__(self):
return self._base_repr_(html=False, max_width=None)
def __str__(self):
return '\n'.join(self.pformat())
def __bytes__(self):
return str(self).encode('utf-8')
@property
def has_mixin_columns(self):
"""
True if table has any mixin columns (defined as columns that are not Column
subclasses).
"""
return any(has_info_class(col, MixinInfo) for col in self.columns.values())
def _add_as_mixin_column(self, col):
"""
Determine if ``col`` should be added to the table directly as
a mixin column.
"""
if isinstance(col, BaseColumn):
return False
# Is it a mixin but not not Quantity (which gets converted to Column with
# unit set).
return has_info_class(col, MixinInfo) and not has_info_class(col, QuantityInfo)
@format_doc(_pprint_docs)
def pprint(self, max_lines=None, max_width=None, show_name=True,
show_unit=None, show_dtype=False, align=None):
"""Print a formatted string representation of the table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for max_width except the configuration item is
``astropy.conf.max_width``.
"""
lines, outs = self.formatter._pformat_table(self, max_lines, max_width,
show_name=show_name, show_unit=show_unit,
show_dtype=show_dtype, align=align)
if outs['show_length']:
lines.append('Length = {0} rows'.format(len(self)))
n_header = outs['n_header']
for i, line in enumerate(lines):
if i < n_header:
color_print(line, 'red')
else:
print(line)
@format_doc(_pprint_docs)
def pprint_all(self, max_lines=-1, max_width=-1, show_name=True,
show_unit=None, show_dtype=False, align=None):
"""Print a formatted string representation of the entire table.
This method is the same as `astropy.table.Table.pprint` except that
the default ``max_lines`` and ``max_width`` are both -1 so that by
default the entire table is printed instead of restricting to the size
of the screen terminal.
"""
return self.pprint(max_lines, max_width, show_name,
show_unit, show_dtype, align)
def _make_index_row_display_table(self, index_row_name):
if index_row_name not in self.columns:
idx_col = self.ColumnClass(name=index_row_name, data=np.arange(len(self)))
return self.__class__([idx_col] + self.columns.values(),
copy=False)
else:
return self
def show_in_notebook(self, tableid=None, css=None, display_length=50,
table_class='astropy-default', show_row_index='idx'):
"""Render the table in HTML and show it in the IPython notebook.
Parameters
----------
tableid : str or `None`
An html ID tag for the table. Default is ``table{id}-XXX``, where
id is the unique integer id of the table object, id(self), and XXX
is a random number to avoid conflicts when printing the same table
multiple times.
table_class : str or `None`
A string with a list of HTML classes used to style the table.
The special default string ('astropy-default') means that the string
will be retrieved from the configuration item
``astropy.table.default_notebook_table_class``. Note that these
table classes may make use of bootstrap, as this is loaded with the
notebook. See `this page <http://getbootstrap.com/css/#tables>`_
for the list of classes.
css : string
A valid CSS string declaring the formatting for the table. Defaults
to ``astropy.table.jsviewer.DEFAULT_CSS_NB``.
display_length : int, optional
Number or rows to show. Defaults to 50.
show_row_index : str or False
If this does not evaluate to False, a column with the given name
will be added to the version of the table that gets displayed.
This new column shows the index of the row in the table itself,
even when the displayed table is re-sorted by another column. Note
that if a column with this name already exists, this option will be
ignored. Defaults to "idx".
Notes
-----
Currently, unlike `show_in_browser` (with ``jsviewer=True``), this
method needs to access online javascript code repositories. This is due
to modern browsers' limitations on accessing local files. Hence, if you
call this method while offline (and don't have a cached version of
jquery and jquery.dataTables), you will not get the jsviewer features.
"""
from .jsviewer import JSViewer
from IPython.display import HTML
if tableid is None:
tableid = 'table{0}-{1}'.format(id(self),
np.random.randint(1, 1e6))
jsv = JSViewer(display_length=display_length)
if show_row_index:
display_table = self._make_index_row_display_table(show_row_index)
else:
display_table = self
if table_class == 'astropy-default':
table_class = conf.default_notebook_table_class
html = display_table._base_repr_(html=True, max_width=-1, tableid=tableid,
max_lines=-1, show_dtype=False,
tableclass=table_class)
columns = display_table.columns.values()
sortable_columns = [i for i, col in enumerate(columns)
if col.info.dtype.kind in 'iufc']
html += jsv.ipynb(tableid, css=css, sort_columns=sortable_columns)
return HTML(html)
def show_in_browser(self, max_lines=5000, jsviewer=False,
browser='default', jskwargs={'use_local_files': True},
tableid=None, table_class="display compact",
css=None, show_row_index='idx'):
"""Render the table in HTML and show it in a web browser.
Parameters
----------
max_lines : int
Maximum number of rows to export to the table (set low by default
to avoid memory issues, since the browser view requires duplicating
the table in memory). A negative value of ``max_lines`` indicates
no row limit.
jsviewer : bool
If `True`, prepends some javascript headers so that the table is
rendered as a `DataTables <https://datatables.net>`_ data table.
This allows in-browser searching & sorting.
browser : str
Any legal browser name, e.g. ``'firefox'``, ``'chrome'``,
``'safari'`` (for mac, you may need to use ``'open -a
"/Applications/Google Chrome.app" {}'`` for Chrome). If
``'default'``, will use the system default browser.
jskwargs : dict
Passed to the `astropy.table.JSViewer` init. Defaults to
``{'use_local_files': True}`` which means that the JavaScript
libraries will be served from local copies.
tableid : str or `None`
An html ID tag for the table. Default is ``table{id}``, where id
is the unique integer id of the table object, id(self).
table_class : str or `None`
A string with a list of HTML classes used to style the table.
Default is "display compact", and other possible values can be
found in https://www.datatables.net/manual/styling/classes
css : string
A valid CSS string declaring the formatting for the table. Defaults
to ``astropy.table.jsviewer.DEFAULT_CSS``.
show_row_index : str or False
If this does not evaluate to False, a column with the given name
will be added to the version of the table that gets displayed.
This new column shows the index of the row in the table itself,
even when the displayed table is re-sorted by another column. Note
that if a column with this name already exists, this option will be
ignored. Defaults to "idx".
"""
import os
import webbrowser
import tempfile
from .jsviewer import DEFAULT_CSS
from urllib.parse import urljoin
from urllib.request import pathname2url
if css is None:
css = DEFAULT_CSS
# We can't use NamedTemporaryFile here because it gets deleted as
# soon as it gets garbage collected.
tmpdir = tempfile.mkdtemp()
path = os.path.join(tmpdir, 'table.html')
with open(path, 'w') as tmp:
if jsviewer:
if show_row_index:
display_table = self._make_index_row_display_table(show_row_index)
else:
display_table = self
display_table.write(tmp, format='jsviewer', css=css,
max_lines=max_lines, jskwargs=jskwargs,
table_id=tableid, table_class=table_class)
else:
self.write(tmp, format='html')
try:
br = webbrowser.get(None if browser == 'default' else browser)
except webbrowser.Error:
log.error("Browser '{}' not found.".format(browser))
else:
br.open(urljoin('file:', pathname2url(path)))
@format_doc(_pformat_docs, id="{id}")
def pformat(self, max_lines=None, max_width=None, show_name=True,
show_unit=None, show_dtype=False, html=False, tableid=None,
align=None, tableclass=None):
"""Return a list of lines for the formatted string representation of
the table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for ``max_width`` except the configuration item is
``astropy.conf.max_width``.
"""
lines, outs = self.formatter._pformat_table(
self, max_lines, max_width, show_name=show_name,
show_unit=show_unit, show_dtype=show_dtype, html=html,
tableid=tableid, tableclass=tableclass, align=align)
if outs['show_length']:
lines.append('Length = {0} rows'.format(len(self)))
return lines
@format_doc(_pformat_docs, id="{id}")
def pformat_all(self, max_lines=-1, max_width=-1, show_name=True,
show_unit=None, show_dtype=False, html=False, tableid=None,
align=None, tableclass=None):
"""Return a list of lines for the formatted string representation of
the entire table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for ``max_width`` except the configuration item is
``astropy.conf.max_width``.
"""
return self.pformat(max_lines, max_width, show_name,
show_unit, show_dtype, html, tableid,
align, tableclass)
def more(self, max_lines=None, max_width=None, show_name=True,
show_unit=None, show_dtype=False):
"""Interactively browse table with a paging interface.
Supported keys::
f, <space> : forward one page
b : back one page
r : refresh same page
n : next row
p : previous row
< : go to beginning
> : go to end
q : quit browsing
h : print this help
Parameters
----------
max_lines : int
Maximum number of lines in table output
max_width : int or `None`
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is True.
"""
self.formatter._more_tabcol(self, max_lines, max_width, show_name=show_name,
show_unit=show_unit, show_dtype=show_dtype)
def __getitem__(self, item):
if isinstance(item, str):
return self.columns[item]
elif isinstance(item, (int, np.integer)):
return self.Row(self, item)
elif (isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == 'i'):
return self.Row(self, item.item())
elif self._is_list_or_tuple_of_str(item):
out = self.__class__([self[x] for x in item],
copy_indices=self._copy_indices)
out._groups = groups.TableGroups(out, indices=self.groups._indices,
keys=self.groups._keys)
out.meta = self.meta.copy() # Shallow copy for meta
return out
elif ((isinstance(item, np.ndarray) and item.size == 0) or
(isinstance(item, (tuple, list)) and not item)):
# If item is an empty array/list/tuple then return the table with no rows
return self._new_from_slice([])
elif (isinstance(item, slice) or
isinstance(item, np.ndarray) or
isinstance(item, list) or
isinstance(item, tuple) and all(isinstance(x, np.ndarray)
for x in item)):
# here for the many ways to give a slice; a tuple of ndarray
# is produced by np.where, as in t[np.where(t['a'] > 2)]
# For all, a new table is constructed with slice of all columns
return self._new_from_slice(item)
else:
raise ValueError('Illegal type {0} for table item access'
.format(type(item)))
def __setitem__(self, item, value):
# If the item is a string then it must be the name of a column.
# If that column doesn't already exist then create it now.
if isinstance(item, str) and item not in self.colnames:
NewColumn = self.MaskedColumn if self.masked else self.Column
# If value doesn't have a dtype and won't be added as a mixin then
# convert to a numpy array.
if not hasattr(value, 'dtype') and not self._add_as_mixin_column(value):
value = np.asarray(value)
# Structured ndarray gets viewed as a mixin (unless already a valid
# mixin class).
if (isinstance(value, np.ndarray) and len(value.dtype) > 1 and
not self._add_as_mixin_column(value)):
value = value.view(NdarrayMixin)
# Make new column and assign the value. If the table currently
# has no rows (len=0) of the value is already a Column then
# define new column directly from value. In the latter case
# this allows for propagation of Column metadata. Otherwise
# define a new column with the right length and shape and then
# set it from value. This allows for broadcasting, e.g. t['a']
# = 1.
name = item
# If this is a column-like object that could be added directly to table
if isinstance(value, BaseColumn) or self._add_as_mixin_column(value):
# If we're setting a new column to a scalar, broadcast it.
# (things will fail in _init_from_cols if this doesn't work)
if (len(self) > 0 and (getattr(value, 'isscalar', False) or
getattr(value, 'shape', None) == () or
len(value) == 1)):
new_shape = (len(self),) + getattr(value, 'shape', ())[1:]
if isinstance(value, np.ndarray):
value = np.broadcast_to(value, shape=new_shape,
subok=True)
elif isinstance(value, ShapedLikeNDArray):
value = value._apply(np.broadcast_to, shape=new_shape,
subok=True)
new_column = col_copy(value)
new_column.info.name = name
elif len(self) == 0:
new_column = NewColumn(value, name=name)
else:
new_column = NewColumn(name=name, length=len(self), dtype=value.dtype,
shape=value.shape[1:],
unit=getattr(value, 'unit', None))
new_column[:] = value
# Now add new column to the table
self.add_columns([new_column], copy=False)
else:
n_cols = len(self.columns)
if isinstance(item, str):
# Set an existing column by first trying to replace, and if
# this fails do an in-place update. See definition of mask
# property for discussion of the _setitem_inplace attribute.
if (not getattr(self, '_setitem_inplace', False)
and not conf.replace_inplace):
try:
self._replace_column_warnings(item, value)
return
except Exception:
pass
self.columns[item][:] = value
elif isinstance(item, (int, np.integer)):
self._set_row(idx=item, colnames=self.colnames, vals=value)
elif (isinstance(item, slice) or
isinstance(item, np.ndarray) or
isinstance(item, list) or
(isinstance(item, tuple) and # output from np.where
all(isinstance(x, np.ndarray) for x in item))):
if isinstance(value, Table):
vals = (col for col in value.columns.values())
elif isinstance(value, np.ndarray) and value.dtype.names:
vals = (value[name] for name in value.dtype.names)
elif np.isscalar(value):
import itertools
vals = itertools.repeat(value, n_cols)
else: # Assume this is an iterable that will work
if len(value) != n_cols:
raise ValueError('Right side value needs {0} elements (one for each column)'
.format(n_cols))
vals = value
for col, val in zip(self.columns.values(), vals):
col[item] = val
else:
raise ValueError('Illegal type {0} for table item access'
.format(type(item)))
def __delitem__(self, item):
if isinstance(item, str):
self.remove_column(item)
elif isinstance(item, (int, np.integer)):
self.remove_row(item)
elif (isinstance(item, (list, tuple, np.ndarray)) and
all(isinstance(x, str) for x in item)):
self.remove_columns(item)
elif (isinstance(item, (list, np.ndarray)) and
np.asarray(item).dtype.kind == 'i'):
self.remove_rows(item)
elif isinstance(item, slice):
self.remove_rows(item)
else:
raise IndexError('illegal key or index value')
def _ipython_key_completions_(self):
return self.colnames
def field(self, item):
"""Return column[item] for recarray compatibility."""
return self.columns[item]
@property
def masked(self):
return self._masked
@masked.setter
def masked(self, masked):
raise Exception('Masked attribute is read-only (use t = Table(t, masked=True)'
' to convert to a masked table)')
def _set_masked(self, masked):
"""
Set the table masked property.
Parameters
----------
masked : bool
State of table masking (`True` or `False`)
"""
if hasattr(self, '_masked'):
# The only allowed change is from None to False or True, or False to True
if self._masked is None and masked in [False, True]:
self._masked = masked
elif self._masked is False and masked is True:
log.info("Upgrading Table to masked Table. Use Table.filled() to convert to unmasked table.")
self._masked = masked
elif self._masked is masked:
raise Exception("Masked attribute is already set to {0}".format(masked))
else:
raise Exception("Cannot change masked attribute to {0} once it is set to {1}"
.format(masked, self._masked))
else:
if masked in [True, False, None]:
self._masked = masked
else:
raise ValueError("masked should be one of True, False, None")
if self._masked:
self._column_class = self.MaskedColumn
else:
self._column_class = self.Column
@property
def ColumnClass(self):
if self._column_class is None:
return self.Column
else:
return self._column_class
@property
def dtype(self):
return np.dtype([descr(col) for col in self.columns.values()])
@property
def colnames(self):
return list(self.columns.keys())
@staticmethod
def _is_list_or_tuple_of_str(names):
"""Check that ``names`` is a tuple or list of strings"""
return (isinstance(names, (tuple, list)) and names and
all(isinstance(x, str) for x in names))
def keys(self):
return list(self.columns.keys())
def __len__(self):
# For performance reasons (esp. in Row) cache the first column name
# and use that subsequently for the table length. If might not be
# available yet or the column might be gone now, in which case
# try again in the except block.
try:
return len(OrderedDict.__getitem__(self.columns, self._first_colname))
except (AttributeError, KeyError):
if len(self.columns) == 0:
return 0
# Get the first column name
self._first_colname = next(iter(self.columns))
return len(self.columns[self._first_colname])
def index_column(self, name):
"""
Return the positional index of column ``name``.
Parameters
----------
name : str
column name
Returns
-------
index : int
Positional index of column ``name``.
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Get index of column 'b' of the table::
>>> t.index_column('b')
1
"""
try:
return self.colnames.index(name)
except ValueError:
raise ValueError("Column {0} does not exist".format(name))
def add_column(self, col, index=None, name=None, rename_duplicate=False, copy=True):
"""
Add a new Column object ``col`` to the table. If ``index``
is supplied then insert column before ``index`` position
in the list of columns, otherwise append column to the end
of the list.
Parameters
----------
col : Column
Column object to add.
index : int or `None`
Insert column before this position or at end (default).
name : str
Column name
rename_duplicate : bool
Uniquify column name if it already exist. Default is False.
copy : bool
Make a copy of the new column. Default is True.
Examples
--------
Create a table with two columns 'a' and 'b'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> print(t)
a b
--- ---
1 0.1
2 0.2
3 0.3
Create a third column 'c' and append it to the end of the table::
>>> col_c = Column(name='c', data=['x', 'y', 'z'])
>>> t.add_column(col_c)
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Add column 'd' at position 1. Note that the column is inserted
before the given index::
>>> col_d = Column(name='d', data=['a', 'b', 'c'])
>>> t.add_column(col_d, 1)
>>> print(t)
a d b c
--- --- --- ---
1 a 0.1 x
2 b 0.2 y
3 c 0.3 z
Add second column named 'b' with rename_duplicate::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> col_b = Column(name='b', data=[1.1, 1.2, 1.3])
>>> t.add_column(col_b, rename_duplicate=True)
>>> print(t)
a b b_1
--- --- ---
1 0.1 1.1
2 0.2 1.2
3 0.3 1.3
Add an unnamed column or mixin object in the table using a default name
or by specifying an explicit name with ``name``. Name can also be overridden::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> col_c = Column(data=['x', 'y'])
>>> t.add_column(col_c)
>>> t.add_column(col_c, name='c')
>>> col_b = Column(name='b', data=[1.1, 1.2])
>>> t.add_column(col_b, name='d')
>>> print(t)
a b col2 c d
--- --- ---- --- ---
1 0.1 x x 1.1
2 0.2 y y 1.2
To add several columns use add_columns.
"""
if index is None:
index = len(self.columns)
if name is not None:
name = (name,)
self.add_columns([col], [index], name, copy=copy, rename_duplicate=rename_duplicate)
def add_columns(self, cols, indexes=None, names=None, copy=True, rename_duplicate=False):
"""
Add a list of new Column objects ``cols`` to the table. If a
corresponding list of ``indexes`` is supplied then insert column
before each ``index`` position in the *original* list of columns,
otherwise append columns to the end of the list.
Parameters
----------
cols : list of Columns
Column objects to add.
indexes : list of ints or `None`
Insert column before this position or at end (default).
names : list of str
Column names
copy : bool
Make a copy of the new columns. Default is True.
rename_duplicate : bool
Uniquify new column names if they duplicate the existing ones.
Default is False.
Examples
--------
Create a table with two columns 'a' and 'b'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> print(t)
a b
--- ---
1 0.1
2 0.2
3 0.3
Create column 'c' and 'd' and append them to the end of the table::
>>> col_c = Column(name='c', data=['x', 'y', 'z'])
>>> col_d = Column(name='d', data=['u', 'v', 'w'])
>>> t.add_columns([col_c, col_d])
>>> print(t)
a b c d
--- --- --- ---
1 0.1 x u
2 0.2 y v
3 0.3 z w
Add column 'c' at position 0 and column 'd' at position 1. Note that
the columns are inserted before the given position::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> col_c = Column(name='c', data=['x', 'y', 'z'])
>>> col_d = Column(name='d', data=['u', 'v', 'w'])
>>> t.add_columns([col_c, col_d], [0, 1])
>>> print(t)
c a d b
--- --- --- ---
x 1 u 0.1
y 2 v 0.2
z 3 w 0.3
Add second column 'b' and column 'c' with ``rename_duplicate``::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> col_b = Column(name='b', data=[1.1, 1.2, 1.3])
>>> col_c = Column(name='c', data=['x', 'y', 'z'])
>>> t.add_columns([col_b, col_c], rename_duplicate=True)
>>> print(t)
a b b_1 c
--- --- --- ---
1 0.1 1.1 x
2 0.2 1.2 y
3 0.3 1.3 z
Add unnamed columns or mixin objects in the table using default names
or by specifying explicit names with ``names``. Names can also be overridden::
>>> t = Table()
>>> col_a = Column(data=['x', 'y'])
>>> col_b = Column(name='b', data=['u', 'v'])
>>> t.add_columns([col_a, col_b])
>>> t.add_columns([col_a, col_b], names=['c', 'd'])
>>> print(t)
col0 b c d
---- --- --- ---
x u x u
y v y v
"""
if indexes is None:
indexes = [len(self.columns)] * len(cols)
elif len(indexes) != len(cols):
raise ValueError('Number of indexes must match number of cols')
if copy:
cols = [col_copy(col) for col in cols]
if len(self.columns) == 0:
# No existing table data, init from cols
newcols = cols
else:
newcols = list(self.columns.values())
new_indexes = list(range(len(newcols) + 1))
for col, index in zip(cols, indexes):
i = new_indexes.index(index)
new_indexes.insert(i, None)
newcols.insert(i, col)
if names is None:
names = (None,) * len(cols)
elif len(names) != len(cols):
raise ValueError('Number of names must match number of cols')
for i, (col, name) in enumerate(zip(cols, names)):
if name is None:
if col.info.name is not None:
continue
name = 'col{}'.format(i + len(self.columns))
if col.info.parent_table is not None:
col = col_copy(col)
col.info.name = name
if rename_duplicate:
existing_names = set(self.colnames)
for col in cols:
i = 1
orig_name = col.info.name
if col.info.name in existing_names:
# If the column belongs to another table then copy it
# before renaming
while col.info.name in existing_names:
# Iterate until a unique name is found
if col.info.parent_table is not None:
col = col_copy(col)
new_name = '{0}_{1}'.format(orig_name, i)
col.info.name = new_name
i += 1
existing_names.add(new_name)
self._init_from_cols(newcols)
def _replace_column_warnings(self, name, col):
"""
Same as replace_column but issues warnings under various circumstances.
"""
warns = conf.replace_warnings
if 'refcount' in warns and name in self.colnames:
refcount = sys.getrefcount(self[name])
if name in self.colnames:
old_col = self[name]
# This may raise an exception (e.g. t['a'] = 1) in which case none of
# the downstream code runs.
self.replace_column(name, col)
if 'always' in warns:
warnings.warn("replaced column '{}'".format(name),
TableReplaceWarning, stacklevel=3)
if 'slice' in warns:
try:
# Check for ndarray-subclass slice. An unsliced instance
# has an ndarray for the base while sliced has the same class
# as parent.
if isinstance(old_col.base, old_col.__class__):
msg = ("replaced column '{}' which looks like an array slice. "
"The new column no longer shares memory with the "
"original array.".format(name))
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
except AttributeError:
pass
if 'refcount' in warns:
# Did reference count change?
new_refcount = sys.getrefcount(self[name])
if refcount != new_refcount:
msg = ("replaced column '{}' and the number of references "
"to the column changed.".format(name))
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
if 'attributes' in warns:
# Any of the standard column attributes changed?
changed_attrs = []
new_col = self[name]
# Check base DataInfo attributes that any column will have
for attr in DataInfo.attr_names:
if getattr(old_col.info, attr) != getattr(new_col.info, attr):
changed_attrs.append(attr)
if changed_attrs:
msg = ("replaced column '{}' and column attributes {} changed."
.format(name, changed_attrs))
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
def replace_column(self, name, col):
"""
Replace column ``name`` with the new ``col`` object.
Parameters
----------
name : str
Name of column to replace
col : column object (list, ndarray, Column, etc)
New column object to replace the existing column
Examples
--------
Replace column 'a' with a float version of itself::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> float_a = t['a'].astype(float)
>>> t.replace_column('a', float_a)
"""
if name not in self.colnames:
raise ValueError('column name {0} is not in the table'.format(name))
if self[name].info.indices:
raise ValueError('cannot replace a table index column')
t = self.__class__([col], names=[name])
cols = OrderedDict(self.columns)
cols[name] = t[name]
self._init_from_cols(cols.values())
def remove_row(self, index):
"""
Remove a row from the table.
Parameters
----------
index : int
Index of row to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove row 1 from the table::
>>> t.remove_row(1)
>>> print(t)
a b c
--- --- ---
1 0.1 x
3 0.3 z
To remove several rows at the same time use remove_rows.
"""
# check the index against the types that work with np.delete
if not isinstance(index, (int, np.integer)):
raise TypeError("Row index must be an integer")
self.remove_rows(index)
def remove_rows(self, row_specifier):
"""
Remove rows from the table.
Parameters
----------
row_specifier : slice, int, or array of ints
Specification for rows to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove rows 0 and 2 from the table::
>>> t.remove_rows([0, 2])
>>> print(t)
a b c
--- --- ---
2 0.2 y
Note that there are no warnings if the slice operator extends
outside the data::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.remove_rows(slice(10, 20, 1))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
"""
# Update indices
for index in self.indices:
index.remove_rows(row_specifier)
keep_mask = np.ones(len(self), dtype=bool)
keep_mask[row_specifier] = False
columns = self.TableColumns()
for name, col in self.columns.items():
newcol = col[keep_mask]
newcol.info.parent_table = self
columns[name] = newcol
self._replace_cols(columns)
# Revert groups to default (ungrouped) state
if hasattr(self, '_groups'):
del self._groups
def remove_column(self, name):
"""
Remove a column from the table.
This can also be done with::
del table[name]
Parameters
----------
name : str
Name of column to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove column 'b' from the table::
>>> t.remove_column('b')
>>> print(t)
a c
--- ---
1 x
2 y
3 z
To remove several columns at the same time use remove_columns.
"""
self.remove_columns([name])
def remove_columns(self, names):
'''
Remove several columns from the table.
Parameters
----------
names : list
A list containing the names of the columns to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove columns 'b' and 'c' from the table::
>>> t.remove_columns(['b', 'c'])
>>> print(t)
a
---
1
2
3
Specifying only a single column also works. Remove column 'b' from the table::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.remove_columns('b')
>>> print(t)
a c
--- ---
1 x
2 y
3 z
This gives the same as using remove_column.
'''
if isinstance(names, str):
names = [names]
for name in names:
if name not in self.columns:
raise KeyError("Column {0} does not exist".format(name))
for name in names:
self.columns.pop(name)
def _convert_string_dtype(self, in_kind, out_kind, encode_decode_func):
"""
Convert string-like columns to/from bytestring and unicode (internal only).
Parameters
----------
in_kind : str
Input dtype.kind
out_kind : str
Output dtype.kind
"""
for col in self.itercols():
if col.dtype.kind == in_kind:
try:
# This requires ASCII and is faster by a factor of up to ~8, so
# try that first.
newcol = col.__class__(col, dtype=out_kind)
except (UnicodeEncodeError, UnicodeDecodeError):
newcol = col.__class__(encode_decode_func(col, 'utf-8'))
# Quasi-manually copy info attributes. Unfortunately
# DataInfo.__set__ does not do the right thing in this case
# so newcol.info = col.info does not get the old info attributes.
for attr in col.info.attr_names - col.info._attrs_no_copy - set(['dtype']):
value = deepcopy(getattr(col.info, attr))
setattr(newcol.info, attr, value)
self[col.name] = newcol
def convert_bytestring_to_unicode(self):
"""
Convert bytestring columns (dtype.kind='S') to unicode (dtype.kind='U')
using UTF-8 encoding.
Internally this changes string columns to represent each character
in the string with a 4-byte UCS-4 equivalent, so it is inefficient
for memory but allows scripts to manipulate string arrays with
natural syntax.
"""
self._convert_string_dtype('S', 'U', np.char.decode)
def convert_unicode_to_bytestring(self):
"""
Convert unicode columns (dtype.kind='U') to bytestring (dtype.kind='S')
using UTF-8 encoding.
When exporting a unicode string array to a file, it may be desirable
to encode unicode columns as bytestrings.
"""
self._convert_string_dtype('U', 'S', np.char.encode)
def keep_columns(self, names):
'''
Keep only the columns specified (remove the others).
Parameters
----------
names : list
A list containing the names of the columns to keep. All other
columns will be removed.
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Specifying only a single column name keeps only this column.
Keep only column 'a' of the table::
>>> t.keep_columns('a')
>>> print(t)
a
---
1
2
3
Specifying a list of column names is keeps is also possible.
Keep columns 'a' and 'c' of the table::
>>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.keep_columns(['a', 'c'])
>>> print(t)
a c
--- ---
1 x
2 y
3 z
'''
if isinstance(names, str):
names = [names]
for name in names:
if name not in self.columns:
raise KeyError("Column {0} does not exist".format(name))
remove = list(set(self.keys()) - set(names))
self.remove_columns(remove)
def rename_column(self, name, new_name):
'''
Rename a column.
This can also be done directly with by setting the ``name`` attribute
for a column::
table[name].name = new_name
TODO: this won't work for mixins
Parameters
----------
name : str
The current name of the column.
new_name : str
The new name for the column
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1,2],[3,4],[5,6]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 3 5
2 4 6
Renaming column 'a' to 'aa'::
>>> t.rename_column('a' , 'aa')
>>> print(t)
aa b c
--- --- ---
1 3 5
2 4 6
'''
if name not in self.keys():
raise KeyError("Column {0} does not exist".format(name))
self.columns[name].info.name = new_name
def rename_columns(self, names, new_names):
'''
Rename multiple columns.
Parameters
----------
names : list, tuple
A list or tuple of existing column names.
new_names : list, tuple
A list or tuple of new column names.
Examples
--------
Create a table with three columns 'a', 'b', 'c'::
>>> t = Table([[1,2],[3,4],[5,6]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 3 5
2 4 6
Renaming columns 'a' to 'aa' and 'b' to 'bb'::
>>> names = ('a','b')
>>> new_names = ('aa','bb')
>>> t.rename_columns(names, new_names)
>>> print(t)
aa bb c
--- --- ---
1 3 5
2 4 6
'''
if not self._is_list_or_tuple_of_str(names):
raise TypeError("input 'names' must be a tuple or a list of column names")
if not self._is_list_or_tuple_of_str(new_names):
raise TypeError("input 'new_names' must be a tuple or a list of column names")
if len(names) != len(new_names):
raise ValueError("input 'names' and 'new_names' list arguments must be the same length")
for name, new_name in zip(names, new_names):
self.rename_column(name, new_name)
def _set_row(self, idx, colnames, vals):
try:
assert len(vals) == len(colnames)
except Exception:
raise ValueError('right hand side must be a sequence of values with '
'the same length as the number of selected columns')
# Keep track of original values before setting each column so that
# setting row can be transactional.
orig_vals = []
cols = self.columns
try:
for name, val in zip(colnames, vals):
orig_vals.append(cols[name][idx])
cols[name][idx] = val
except Exception:
# If anything went wrong first revert the row update then raise
for name, val in zip(colnames, orig_vals[:-1]):
cols[name][idx] = val
raise
def add_row(self, vals=None, mask=None):
"""Add a new row to the end of the table.
The ``vals`` argument can be:
sequence (e.g. tuple or list)
Column values in the same order as table columns.
mapping (e.g. dict)
Keys corresponding to column names. Missing values will be
filled with np.zeros for the column dtype.
`None`
All values filled with np.zeros for the column dtype.
This method requires that the Table object "owns" the underlying array
data. In particular one cannot add a row to a Table that was
initialized with copy=False from an existing array.
The ``mask`` attribute should give (if desired) the mask for the
values. The type of the mask should match that of the values, i.e. if
``vals`` is an iterable, then ``mask`` should also be an iterable
with the same length, and if ``vals`` is a mapping, then ``mask``
should be a dictionary.
Parameters
----------
vals : tuple, list, dict or `None`
Use the specified values in the new row
mask : tuple, list, dict or `None`
Use the specified mask values in the new row
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1,2],[4,5],[7,8]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 4 7
2 5 8
Adding a new row with entries '3' in 'a', '6' in 'b' and '9' in 'c'::
>>> t.add_row([3,6,9])
>>> print(t)
a b c
--- --- ---
1 4 7
2 5 8
3 6 9
"""
self.insert_row(len(self), vals, mask)
def insert_row(self, index, vals=None, mask=None):
"""Add a new row before the given ``index`` position in the table.
The ``vals`` argument can be:
sequence (e.g. tuple or list)
Column values in the same order as table columns.
mapping (e.g. dict)
Keys corresponding to column names. Missing values will be
filled with np.zeros for the column dtype.
`None`
All values filled with np.zeros for the column dtype.
The ``mask`` attribute should give (if desired) the mask for the
values. The type of the mask should match that of the values, i.e. if
``vals`` is an iterable, then ``mask`` should also be an iterable
with the same length, and if ``vals`` is a mapping, then ``mask``
should be a dictionary.
Parameters
----------
vals : tuple, list, dict or `None`
Use the specified values in the new row
mask : tuple, list, dict or `None`
Use the specified mask values in the new row
"""
colnames = self.colnames
N = len(self)
if index < -N or index > N:
raise IndexError("Index {0} is out of bounds for table with length {1}"
.format(index, N))
if index < 0:
index += N
def _is_mapping(obj):
"""Minimal checker for mapping (dict-like) interface for obj"""
attrs = ('__getitem__', '__len__', '__iter__', 'keys', 'values', 'items')
return all(hasattr(obj, attr) for attr in attrs)
if mask is not None and not self.masked:
# Possibly issue upgrade warning and update self.ColumnClass. This
# does not change the existing columns.
self._set_masked(True)
if _is_mapping(vals) or vals is None:
# From the vals and/or mask mappings create the corresponding lists
# that have entries for each table column.
if mask is not None and not _is_mapping(mask):
raise TypeError("Mismatch between type of vals and mask")
# Now check that the mask is specified for the same keys as the
# values, otherwise things get really confusing.
if mask is not None and set(vals.keys()) != set(mask.keys()):
raise ValueError('keys in mask should match keys in vals')
if vals and any(name not in colnames for name in vals):
raise ValueError('Keys in vals must all be valid column names')
vals_list = []
mask_list = []
for name in colnames:
if vals and name in vals:
vals_list.append(vals[name])
mask_list.append(False if mask is None else mask[name])
else:
col = self[name]
if hasattr(col, 'dtype'):
# Make a placeholder zero element of the right type which is masked.
# This assumes the appropriate insert() method will broadcast a
# numpy scalar to the right shape.
vals_list.append(np.zeros(shape=(), dtype=col.dtype))
# For masked table any unsupplied values are masked by default.
mask_list.append(self.masked and vals is not None)
else:
raise ValueError("Value must be supplied for column '{0}'".format(name))
vals = vals_list
mask = mask_list
if isiterable(vals):
if mask is not None and (not isiterable(mask) or _is_mapping(mask)):
raise TypeError("Mismatch between type of vals and mask")
if len(self.columns) != len(vals):
raise ValueError('Mismatch between number of vals and columns')
if mask is not None:
if len(self.columns) != len(mask):
raise ValueError('Mismatch between number of masks and columns')
else:
mask = [False] * len(self.columns)
else:
raise TypeError('Vals must be an iterable or mapping or None')
columns = self.TableColumns()
try:
# Insert val at index for each column
for name, col, val, mask_ in zip(colnames, self.columns.values(), vals, mask):
# If the new row caused a change in self.ColumnClass then
# Column-based classes need to be converted first. This is
# typical for adding a row with mask values to an unmasked table.
if isinstance(col, Column) and not isinstance(col, self.ColumnClass):
col = self.ColumnClass(col, copy=False)
newcol = col.insert(index, val, axis=0)
if not isinstance(newcol, BaseColumn):
newcol.info.name = name
if self.masked:
newcol.mask = FalseArray(newcol.shape)
if len(newcol) != N + 1:
raise ValueError('Incorrect length for column {0} after inserting {1}'
' (expected {2}, got {3})'
.format(name, val, len(newcol), N + 1))
newcol.info.parent_table = self
# Set mask if needed
if self.masked:
newcol.mask[index] = mask_
columns[name] = newcol
# insert row in indices
for table_index in self.indices:
table_index.insert_row(index, vals, self.columns.values())
except Exception as err:
raise ValueError("Unable to insert row because of exception in column '{0}':\n{1}"
.format(name, err))
else:
self._replace_cols(columns)
# Revert groups to default (ungrouped) state
if hasattr(self, '_groups'):
del self._groups
def _replace_cols(self, columns):
for col, new_col in zip(self.columns.values(), columns.values()):
new_col.info.indices = []
for index in col.info.indices:
index.columns[index.col_position(col.info.name)] = new_col
new_col.info.indices.append(index)
self.columns = columns
def argsort(self, keys=None, kind=None, reverse=False):
"""
Return the indices which would sort the table according to one or
more key columns. This simply calls the `numpy.argsort` function on
the table with the ``order`` parameter set to ``keys``.
Parameters
----------
keys : str or list of str
The column name(s) to order the table by
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
reverse : bool
Sort in reverse order (default=False)
Returns
-------
index_array : ndarray, int
Array of indices that sorts the table by the specified key
column(s).
"""
if isinstance(keys, str):
keys = [keys]
# use index sorted order if possible
if keys is not None:
index = get_index(self, names=keys)
if index is not None:
return index.sorted_data()
kwargs = {}
if keys:
kwargs['order'] = keys
if kind:
kwargs['kind'] = kind
if keys:
data = self.as_array(names=keys)
else:
data = self.as_array()
idx = data.argsort(**kwargs)
if reverse:
return idx[::-1]
return idx
def sort(self, keys=None, reverse=False):
'''
Sort the table according to one or more keys. This operates
on the existing table and does not return a new table.
Parameters
----------
keys : str or list of str
The key(s) to order the table by. If None, use the
primary index of the Table.
reverse : bool
Sort in reverse order (default=False)
Examples
--------
Create a table with 3 columns::
>>> t = Table([['Max', 'Jo', 'John'], ['Miller', 'Miller', 'Jackson'],
... [12, 15, 18]], names=('firstname', 'name', 'tel'))
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
Jo Miller 15
John Jackson 18
Sorting according to standard sorting rules, first 'name' then 'firstname'::
>>> t.sort(['name', 'firstname'])
>>> print(t)
firstname name tel
--------- ------- ---
John Jackson 18
Jo Miller 15
Max Miller 12
Sorting according to standard sorting rules, first 'firstname' then 'tel', in reverse order::
>>> t.sort(['firstname', 'tel'], reverse=True)
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
John Jackson 18
Jo Miller 15
'''
if keys is None:
if not self.indices:
raise ValueError("Table sort requires input keys or a table index")
keys = [x.info.name for x in self.indices[0].columns]
if isinstance(keys, str):
keys = [keys]
indexes = self.argsort(keys)
if reverse:
indexes = indexes[::-1]
sort_index = get_index(self, names=keys)
if sort_index is not None:
# avoid inefficient relabelling of sorted index
prev_frozen = sort_index._frozen
sort_index._frozen = True
for col in self.columns.values():
col[:] = col.take(indexes, axis=0)
if sort_index is not None:
# undo index freeze
sort_index._frozen = prev_frozen
# now relabel the sort index appropriately
sort_index.sort()
def reverse(self):
'''
Reverse the row order of table rows. The table is reversed
in place and there are no function arguments.
Examples
--------
Create a table with three columns::
>>> t = Table([['Max', 'Jo', 'John'], ['Miller','Miller','Jackson'],
... [12,15,18]], names=('firstname','name','tel'))
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
Jo Miller 15
John Jackson 18
Reversing order::
>>> t.reverse()
>>> print(t)
firstname name tel
--------- ------- ---
John Jackson 18
Jo Miller 15
Max Miller 12
'''
for col in self.columns.values():
col[:] = col[::-1]
for index in self.indices:
index.reverse()
def copy(self, copy_data=True):
'''
Return a copy of the table.
Parameters
----------
copy_data : bool
If `True` (the default), copy the underlying data array.
Otherwise, use the same data array. The ``meta`` is always
deepcopied regardless of the value for ``copy_data``.
'''
out = self.__class__(self, copy=copy_data)
# If the current table is grouped then do the same in the copy
if hasattr(self, '_groups'):
out._groups = groups.TableGroups(out, indices=self._groups._indices,
keys=self._groups._keys)
return out
def __deepcopy__(self, memo=None):
return self.copy(True)
def __copy__(self):
return self.copy(False)
def __lt__(self, other):
return super().__lt__(other)
def __gt__(self, other):
return super().__gt__(other)
def __le__(self, other):
return super().__le__(other)
def __ge__(self, other):
return super().__ge__(other)
def __eq__(self, other):
if isinstance(other, Table):
other = other.as_array()
if self.masked:
if isinstance(other, np.ma.MaskedArray):
result = self.as_array() == other
else:
# If mask is True, then by definition the row doesn't match
# because the other array is not masked.
false_mask = np.zeros(1, dtype=[(n, bool) for n in self.dtype.names])
result = (self.as_array().data == other) & (self.mask == false_mask)
else:
if isinstance(other, np.ma.MaskedArray):
# If mask is True, then by definition the row doesn't match
# because the other array is not masked.
false_mask = np.zeros(1, dtype=[(n, bool) for n in other.dtype.names])
result = (self.as_array() == other.data) & (other.mask == false_mask)
else:
result = self.as_array() == other
return result
def __ne__(self, other):
return ~self.__eq__(other)
@property
def groups(self):
if not hasattr(self, '_groups'):
self._groups = groups.TableGroups(self)
return self._groups
def group_by(self, keys):
"""
Group this table by the specified ``keys``
This effectively splits the table into groups which correspond to unique
values of the ``keys`` grouping object. The output is a new
`~astropy.table.TableGroups` which contains a copy of this table but
sorted by row according to ``keys``.
The ``keys`` input to `group_by` can be specified in different ways:
- String or list of strings corresponding to table column name(s)
- Numpy array (homogeneous or structured) with same length as this table
- `~astropy.table.Table` with same length as this table
Parameters
----------
keys : str, list of str, numpy array, or `~astropy.table.Table`
Key grouping object
Returns
-------
out : `~astropy.table.Table`
New table with groups set
"""
return groups.table_group_by(self, keys)
def to_pandas(self, index=None):
"""
Return a :class:`pandas.DataFrame` instance
The index of the created DataFrame is controlled by the ``index``
argument. For ``index=True`` or the default ``None``, an index will be
specified for the DataFrame if there is a primary key index on the
Table *and* if it corresponds to a single column. If ``index=False``
then no DataFrame index will be specified. If ``index`` is the name of
a column in the table then that will be the DataFrame index.
In additional to vanilla columns or masked columns, this supports Table
mixin columns like Quantity, Time, or SkyCoord. In many cases these
objects have no analog in pandas and will be converted to a "encoded"
representation using only Column or MaskedColumn. The exception is
Time or TimeDelta columns, which will be converted to the corresponding
representation in pandas using ``np.datetime64`` or ``np.timedelta64``.
See the example below.
Returns
-------
dataframe : :class:`pandas.DataFrame`
A pandas :class:`pandas.DataFrame` instance
index : None, bool, str
Specify DataFrame index mode
Raises
------
ImportError
If pandas is not installed
ValueError
If the Table has multi-dimensional columns
Examples
--------
Here we convert a table with a few mixins to a
:class:`pandas.DataFrame` instance.
>>> import pandas as pd
>>> from astropy.table import QTable
>>> import astropy.units as u
>>> from astropy.time import Time, TimeDelta
>>> from astropy.coordinates import SkyCoord
>>> q = [1, 2] * u.m
>>> tm = Time([1998, 2002], format='jyear')
>>> sc = SkyCoord([5, 6], [7, 8], unit='deg')
>>> dt = TimeDelta([3, 200] * u.s)
>>> t = QTable([q, tm, sc, dt], names=['q', 'tm', 'sc', 'dt'])
>>> df = t.to_pandas(index='tm')
>>> with pd.option_context('display.max_columns', 20):
... print(df)
q sc.ra sc.dec dt
tm
1998-01-01 1.0 5.0 7.0 00:00:03
2002-01-01 2.0 6.0 8.0 00:03:20
"""
from pandas import DataFrame
if index is not False:
if index in (None, True):
# Default is to use the table primary key if available and a single column
if self.primary_key and len(self.primary_key) == 1:
index = self.primary_key[0]
else:
index = False
else:
if index not in self.colnames:
raise ValueError('index must be None, False, True or a table '
'column name')
def _encode_mixins(tbl):
"""Encode a Table ``tbl`` that may have mixin columns to a Table with only
astropy Columns + appropriate meta-data to allow subsequent decoding.
"""
from . import serialize
from astropy.utils.data_info import MixinInfo, serialize_context_as
from astropy.time import Time, TimeDelta
# Convert any Time or TimeDelta columns and pay attention to masking
time_cols = [col for col in tbl.itercols() if isinstance(col, Time)]
if time_cols:
# Make a light copy of table and clear any indices
new_cols = []
for col in tbl.itercols():
new_col = col_copy(col, copy_indices=False) if col.info.indices else col
new_cols.append(new_col)
tbl = tbl.__class__(new_cols, copy=False)
for col in time_cols:
if isinstance(col, TimeDelta):
# Convert to nanoseconds (matches astropy datetime64 support)
new_col = (col.sec * 1e9).astype('timedelta64[ns]')
nat = np.timedelta64('NaT')
else:
new_col = col.datetime64.copy()
nat = np.datetime64('NaT')
if col.masked:
new_col[col.mask] = nat
tbl[col.info.name] = new_col
# Convert the table to one with no mixins, only Column objects.
encode_tbl = serialize.represent_mixins_as_columns(tbl)
return encode_tbl
tbl = _encode_mixins(self)
badcols = [name for name, col in self.columns.items()
if (getattr(col, 'ndim', 1) > 1)]
if badcols:
raise ValueError(
"Cannot convert a table with multi-dimensional columns to a "
"pandas DataFrame. Offending columns are: {}".format(badcols))
out = OrderedDict()
for name, column in tbl.columns.items():
if isinstance(column, MaskedColumn) and np.any(column.mask):
if column.dtype.kind in ['i', 'u']:
out[name] = column.astype(float).filled(np.nan)
warnings.warn(
"converted column '{}' from integer to float".format(
name), TableReplaceWarning, stacklevel=3)
elif column.dtype.kind in ['f', 'c']:
out[name] = column.filled(np.nan)
else:
out[name] = column.astype(object).filled(np.nan)
else:
out[name] = column
if out[name].dtype.byteorder not in ('=', '|'):
out[name] = out[name].byteswap().newbyteorder()
kwargs = {'index': out.pop(index)} if index else {}
return DataFrame(out, **kwargs)
@classmethod
def from_pandas(cls, dataframe, index=False):
"""
Create a `~astropy.table.Table` from a :class:`pandas.DataFrame` instance
In addition to converting generic numeric or string columns, this supports
conversion of pandas Date and Time delta columns to `~astropy.time.Time`
and `~astropy.time.TimeDelta` columns, respectively.
Parameters
----------
dataframe : :class:`pandas.DataFrame`
A pandas :class:`pandas.DataFrame` instance
index : bool
Include the index column in the returned table (default=False)
Returns
-------
table : `~astropy.table.Table`
A `~astropy.table.Table` (or subclass) instance
Raises
------
ImportError
If pandas is not installed
Examples
--------
Here we convert a :class:`pandas.DataFrame` instance
to a `~astropy.table.QTable`.
>>> import numpy as np
>>> import pandas as pd
>>> from astropy.table import QTable
>>> time = pd.Series(['1998-01-01', '2002-01-01'], dtype='datetime64[ns]')
>>> dt = pd.Series(np.array([1, 300], dtype='timedelta64[s]'))
>>> df = pd.DataFrame({'time': time})
>>> df['dt'] = dt
>>> df['x'] = [3., 4.]
>>> with pd.option_context('display.max_columns', 20):
... print(df)
time dt x
0 1998-01-01 00:00:01 3.0
1 2002-01-01 00:05:00 4.0
>>> QTable.from_pandas(df)
<QTable length=2>
time dt x
object object float64
----------------------- ------ -------
1998-01-01T00:00:00.000 1.0 3.0
2002-01-01T00:00:00.000 300.0 4.0
"""
out = OrderedDict()
names = list(dataframe.columns)
columns = [dataframe[name] for name in names]
datas = [np.array(column) for column in columns]
masks = [np.array(column.isnull()) for column in columns]
if index:
index_name = dataframe.index.name or 'index'
while index_name in names:
index_name = '_' + index_name + '_'
names.insert(0, index_name)
columns.insert(0, dataframe.index)
datas.insert(0, np.array(dataframe.index))
masks.insert(0, np.zeros(len(dataframe), dtype=bool))
for name, column, data, mask in zip(names, columns, datas, masks):
if data.dtype.kind == 'O':
# If all elements of an object array are string-like or np.nan
# then coerce back to a native numpy str/unicode array.
string_types = (str, bytes)
nan = np.nan
if all(isinstance(x, string_types) or x is nan for x in data):
# Force any missing (null) values to b''. Numpy will
# upcast to str/unicode as needed.
data[mask] = b''
# When the numpy object array is represented as a list then
# numpy initializes to the correct string or unicode type.
data = np.array([x for x in data])
# Numpy datetime64
if data.dtype.kind == 'M':
from astropy.time import Time
out[name] = Time(data, format='datetime64')
if np.any(mask):
out[name][mask] = np.ma.masked
out[name].format = 'isot'
# Numpy timedelta64
elif data.dtype.kind == 'm':
from astropy.time import TimeDelta
data_sec = data.astype('timedelta64[ns]').astype(np.float64) / 1e9
out[name] = TimeDelta(data_sec, format='sec')
if np.any(mask):
out[name][mask] = np.ma.masked
else:
if np.any(mask):
out[name] = MaskedColumn(data=data, name=name, mask=mask)
else:
out[name] = Column(data=data, name=name)
return cls(out)
info = TableInfo()
class QTable(Table):
"""A class to represent tables of heterogeneous data.
`~astropy.table.QTable` provides a class for heterogeneous tabular data
which can be easily modified, for instance adding columns or new rows.
The `~astropy.table.QTable` class is identical to `~astropy.table.Table`
except that columns with an associated ``unit`` attribute are converted to
`~astropy.units.Quantity` objects.
See also:
- http://docs.astropy.org/en/stable/table/
- http://docs.astropy.org/en/stable/table/mixin_columns.html
Parameters
----------
data : numpy ndarray, dict, list, Table, or table-like object, optional
Data to initialize table.
masked : bool, optional
Specify whether the table is masked.
names : list, optional
Specify column names.
dtype : list, optional
Specify column data types.
meta : dict, optional
Metadata associated with the table.
copy : bool, optional
Copy the input data. Default is True.
rows : numpy ndarray, list of lists, optional
Row-oriented data for table instead of ``data`` argument.
copy_indices : bool, optional
Copy any indices in the input data. Default is True.
**kwargs : dict, optional
Additional keyword args when converting table-like object.
"""
def _add_as_mixin_column(self, col):
"""
Determine if ``col`` should be added to the table directly as
a mixin column.
"""
return has_info_class(col, MixinInfo)
def _convert_col_for_table(self, col):
if (isinstance(col, Column) and getattr(col, 'unit', None) is not None):
# We need to turn the column into a quantity, or a subclass
# identified in the unit (such as u.mag()).
q_cls = getattr(col.unit, '_quantity_class', Quantity)
qcol = q_cls(col.data, col.unit, copy=False)
qcol.info = col.info
col = qcol
else:
col = super()._convert_col_for_table(col)
return col
class NdarrayMixin(np.ndarray):
"""
Mixin column class to allow storage of arbitrary numpy
ndarrays within a Table. This is a subclass of numpy.ndarray
and has the same initialization options as ndarray().
"""
info = ParentDtypeInfo()
def __new__(cls, obj, *args, **kwargs):
self = np.array(obj, *args, **kwargs).view(cls)
if 'info' in getattr(obj, '__dict__', ()):
self.info = obj.info
return self
def __array_finalize__(self, obj):
if obj is None:
return
if callable(super().__array_finalize__):
super().__array_finalize__(obj)
# Self was created from template (e.g. obj[slice] or (obj * 2))
# or viewcast e.g. obj.view(Column). In either case we want to
# init Column attributes for self from obj if possible.
if 'info' in getattr(obj, '__dict__', ()):
self.info = obj.info
def __reduce__(self):
# patch to pickle Quantity objects (ndarray subclasses), see
# http://www.mail-archive.com/[email protected]/msg02446.html
object_state = list(super().__reduce__())
object_state[2] = (object_state[2], self.__dict__)
return tuple(object_state)
def __setstate__(self, state):
# patch to unpickle NdarrayMixin objects (ndarray subclasses), see
# http://www.mail-archive.com/[email protected]/msg02446.html
nd_state, own_state = state
super().__setstate__(nd_state)
self.__dict__.update(own_state)
|
3343bd9598a924ba96455d41cbadb2be4ba40d0eb62d0824a741804aa878724e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy import config as _config
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.table`.
"""
auto_colname = _config.ConfigItem(
'col{0}',
'The template that determines the name of a column if it cannot be '
'determined. Uses new-style (format method) string formatting.',
aliases=['astropy.table.column.auto_colname'])
default_notebook_table_class = _config.ConfigItem(
'table-striped table-bordered table-condensed',
'The table class to be used in Jupyter notebooks when displaying '
'tables (and not overridden). See <http://getbootstrap.com/css/#tables '
'for a list of useful bootstrap classes.')
replace_warnings = _config.ConfigItem(
['slice'],
'List of conditions for issuing a warning when replacing a table '
"column using setitem, e.g. t['a'] = value. Allowed options are "
"'always', 'slice', 'refcount', 'attributes'.",
'list',
)
replace_inplace = _config.ConfigItem(
False,
'Always use in-place update of a table column when using setitem, '
"e.g. t['a'] = value. This overrides the default behavior of "
"replacing the column entirely with the new value when possible. "
"This configuration option will be deprecated and then removed in "
"subsequent major releases."
)
conf = Conf()
from .column import Column, MaskedColumn, StringTruncateWarning, ColumnInfo
from .groups import TableGroups, ColumnGroups
from .table import (Table, QTable, TableColumns, Row, TableFormatter,
NdarrayMixin, TableReplaceWarning)
from .operations import join, setdiff, hstack, vstack, unique, TableMergeError
from .bst import BST, FastBST, FastRBT
from .sorted_array import SortedArray
from .soco import SCEngine
from .serialize import SerializedColumn, represent_mixins_as_columns
# Finally import the formats for the read and write method but delay building
# the documentation until all are loaded. (#5275)
from astropy.io import registry
with registry.delay_doc_updates(Table):
# Import routines that connect readers/writers to astropy.table
from .jsviewer import JSViewer
from astropy.io.ascii import connect
from astropy.io.fits import connect
from astropy.io.misc import connect
from astropy.io.votable import connect
from astropy.io.misc.asdf import connect
from astropy.io.misc.pandas import connect
|
e5276889cfe5c089e04d63559b00a6f717a12d5491f96f6fdc6cd2479cb2c324 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import weakref
import re
from copy import deepcopy
import numpy as np
from numpy import ma
# Remove this when Numpy no longer emits this warning and that Numpy version
# becomes the minimum required version for Astropy.
# https://github.com/astropy/astropy/issues/6285
try:
from numpy.ma.core import MaskedArrayFutureWarning
except ImportError:
# For Numpy versions that do not raise this warning.
MaskedArrayFutureWarning = None
from astropy.units import Unit, Quantity
from astropy.utils.console import color_print
from astropy.utils.metadata import MetaData
from astropy.utils.data_info import BaseColumnInfo, dtype_info_name
from astropy.utils.misc import dtype_bytes_or_chars
from . import groups
from . import pprint
from .np_utils import fix_column_name
# These "shims" provide __getitem__ implementations for Column and MaskedColumn
from ._column_mixins import _ColumnGetitemShim, _MaskedColumnGetitemShim
# Create a generic TableFormatter object for use by bare columns with no
# parent table.
FORMATTER = pprint.TableFormatter()
class StringTruncateWarning(UserWarning):
"""
Warning class for when a string column is assigned a value
that gets truncated because the base (numpy) string length
is too short.
This does not inherit from AstropyWarning because we want to use
stacklevel=2 to show the user where the issue occurred in their code.
"""
pass
# Always emit this warning, not just the first instance
warnings.simplefilter('always', StringTruncateWarning)
def _auto_names(n_cols):
from . import conf
return [str(conf.auto_colname).format(i) for i in range(n_cols)]
# list of one and two-dimensional comparison functions, which sometimes return
# a Column class and sometimes a plain array. Used in __array_wrap__ to ensure
# they only return plain (masked) arrays (see #1446 and #1685)
_comparison_functions = set(
[np.greater, np.greater_equal, np.less, np.less_equal,
np.not_equal, np.equal,
np.isfinite, np.isinf, np.isnan, np.sign, np.signbit])
def col_copy(col, copy_indices=True):
"""
Mixin-safe version of Column.copy() (with copy_data=True).
Parameters
----------
col : Column or mixin column
Input column
copy_indices : bool
Copy the column ``indices`` attribute
Returns
-------
col : Copy of input column
"""
if isinstance(col, BaseColumn):
return col.copy()
# The new column should have None for the parent_table ref. If the
# original parent_table weakref there at the point of copying then it
# generates an infinite recursion. Instead temporarily remove the weakref
# on the original column and restore after the copy in an exception-safe
# manner.
parent_table = col.info.parent_table
indices = col.info.indices
col.info.parent_table = None
col.info.indices = []
try:
newcol = col.copy() if hasattr(col, 'copy') else deepcopy(col)
newcol.info = col.info
newcol.info.indices = deepcopy(indices or []) if copy_indices else []
for index in newcol.info.indices:
index.replace_col(col, newcol)
finally:
col.info.parent_table = parent_table
col.info.indices = indices
return newcol
class FalseArray(np.ndarray):
"""
Boolean mask array that is always False.
This is used to create a stub ``mask`` property which is a boolean array of
``False`` used by default for mixin columns and corresponding to the mixin
column data shape. The ``mask`` looks like a normal numpy array but an
exception will be raised if ``True`` is assigned to any element. The
consequences of the limitation are most obvious in the high-level table
operations.
Parameters
----------
shape : tuple
Data shape
"""
def __new__(cls, shape):
obj = np.zeros(shape, dtype=bool).view(cls)
return obj
def __setitem__(self, item, val):
val = np.asarray(val)
if np.any(val):
raise ValueError('Cannot set any element of {0} class to True'
.format(self.__class__.__name__))
class ColumnInfo(BaseColumnInfo):
"""
Container for meta information like name, description, format.
This is required when the object is used as a mixin column within a table,
but can be used as a general way to store meta information.
"""
attrs_from_parent = BaseColumnInfo.attr_names
_supports_indexing = True
def new_like(self, cols, length, metadata_conflicts='warn', name=None):
"""
Return a new Column instance which is consistent with the
input ``cols`` and has ``length`` rows.
This is intended for creating an empty column object whose elements can
be set in-place for table operations like join or vstack.
Parameters
----------
cols : list
List of input columns
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : Column (or subclass)
New instance of this class consistent with ``cols``
"""
attrs = self.merge_cols_attributes(cols, metadata_conflicts, name,
('meta', 'unit', 'format', 'description'))
return self._parent_cls(length=length, **attrs)
class BaseColumn(_ColumnGetitemShim, np.ndarray):
meta = MetaData()
def __new__(cls, data=None, name=None,
dtype=None, shape=(), length=0,
description=None, unit=None, format=None, meta=None,
copy=False, copy_indices=True):
if data is None:
dtype = (np.dtype(dtype).str, shape)
self_data = np.zeros(length, dtype=dtype)
elif isinstance(data, BaseColumn) and hasattr(data, '_name'):
# When unpickling a MaskedColumn, ``data`` will be a bare
# BaseColumn with none of the expected attributes. In this case
# do NOT execute this block which initializes from ``data``
# attributes.
self_data = np.array(data.data, dtype=dtype, copy=copy)
if description is None:
description = data.description
if unit is None:
unit = unit or data.unit
if format is None:
format = data.format
if meta is None:
meta = data.meta
if name is None:
name = data.name
elif isinstance(data, Quantity):
if unit is None:
self_data = np.array(data, dtype=dtype, copy=copy)
unit = data.unit
else:
self_data = np.array(data.to(unit), dtype=dtype, copy=copy)
if description is None:
description = data.info.description
if format is None:
format = data.info.format
if meta is None:
meta = data.info.meta
else:
if np.dtype(dtype).char == 'S':
data = cls._encode_str(data)
self_data = np.array(data, dtype=dtype, copy=copy)
self = self_data.view(cls)
self._name = fix_column_name(name)
self._parent_table = None
self.unit = unit
self._format = format
self.description = description
self.meta = meta
self.indices = deepcopy(getattr(data, 'indices', [])) if copy_indices else []
for index in self.indices:
index.replace_col(data, self)
return self
@property
def data(self):
return self.view(np.ndarray)
@property
def parent_table(self):
# Note: It seems there are some cases where _parent_table is not set,
# such after restoring from a pickled Column. Perhaps that should be
# fixed, but this is also okay for now.
if getattr(self, '_parent_table', None) is None:
return None
else:
return self._parent_table()
@parent_table.setter
def parent_table(self, table):
if table is None:
self._parent_table = None
else:
self._parent_table = weakref.ref(table)
info = ColumnInfo()
def copy(self, order='C', data=None, copy_data=True):
"""
Return a copy of the current instance.
If ``data`` is supplied then a view (reference) of ``data`` is used,
and ``copy_data`` is ignored.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if ``a`` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of ``a`` as closely
as possible. (Note that this function and :func:numpy.copy are very
similar, but have different default values for their order=
arguments.) Default is 'C'.
data : array, optional
If supplied then use a view of ``data`` instead of the instance
data. This allows copying the instance attributes and meta.
copy_data : bool, optional
Make a copy of the internal numpy array instead of using a
reference. Default is True.
Returns
-------
col : Column or MaskedColumn
Copy of the current column (same type as original)
"""
if data is None:
data = self.data
if copy_data:
data = data.copy(order)
out = data.view(self.__class__)
out.__array_finalize__(self)
# If there is meta on the original column then deepcopy (since "copy" of column
# implies complete independence from original). __array_finalize__ will have already
# made a light copy. I'm not sure how to avoid that initial light copy.
if self.meta is not None:
out.meta = self.meta # MetaData descriptor does a deepcopy here
# for MaskedColumn, MaskedArray.__array_finalize__ also copies mask
# from self, which is not the idea here, so undo
if isinstance(self, MaskedColumn):
out._mask = data._mask
self._copy_groups(out)
return out
def __setstate__(self, state):
"""
Restore the internal state of the Column/MaskedColumn for pickling
purposes. This requires that the last element of ``state`` is a
5-tuple that has Column-specific state values.
"""
# Get the Column attributes
names = ('_name', '_unit', '_format', 'description', 'meta', 'indices')
attrs = {name: val for name, val in zip(names, state[-1])}
state = state[:-1]
# Using super().__setstate__(state) gives
# "TypeError 'int' object is not iterable", raised in
# astropy.table._column_mixins._ColumnGetitemShim.__setstate_cython__()
# Previously, it seems to have given an infinite recursion.
# Hence, manually call the right super class to actually set up
# the array object.
super_class = ma.MaskedArray if isinstance(self, ma.MaskedArray) else np.ndarray
super_class.__setstate__(self, state)
# Set the Column attributes
for name, val in attrs.items():
setattr(self, name, val)
self._parent_table = None
def __reduce__(self):
"""
Return a 3-tuple for pickling a Column. Use the super-class
functionality but then add in a 5-tuple of Column-specific values
that get used in __setstate__.
"""
super_class = ma.MaskedArray if isinstance(self, ma.MaskedArray) else np.ndarray
reconstruct_func, reconstruct_func_args, state = super_class.__reduce__(self)
# Define Column-specific attrs and meta that gets added to state.
column_state = (self.name, self.unit, self.format, self.description,
self.meta, self.indices)
state = state + (column_state,)
return reconstruct_func, reconstruct_func_args, state
def __array_finalize__(self, obj):
# Obj will be none for direct call to Column() creator
if obj is None:
return
if callable(super().__array_finalize__):
super().__array_finalize__(obj)
# Self was created from template (e.g. obj[slice] or (obj * 2))
# or viewcast e.g. obj.view(Column). In either case we want to
# init Column attributes for self from obj if possible.
self.parent_table = None
if not hasattr(self, 'indices'): # may have been copied in __new__
self.indices = []
self._copy_attrs(obj)
def __array_wrap__(self, out_arr, context=None):
"""
__array_wrap__ is called at the end of every ufunc.
Normally, we want a Column object back and do not have to do anything
special. But there are two exceptions:
1) If the output shape is different (e.g. for reduction ufuncs
like sum() or mean()), a Column still linking to a parent_table
makes little sense, so we return the output viewed as the
column content (ndarray or MaskedArray).
For this case, we use "[()]" to select everything, and to ensure we
convert a zero rank array to a scalar. (For some reason np.sum()
returns a zero rank scalar array while np.mean() returns a scalar;
So the [()] is needed for this case.
2) When the output is created by any function that returns a boolean
we also want to consistently return an array rather than a column
(see #1446 and #1685)
"""
out_arr = super().__array_wrap__(out_arr, context)
if (self.shape != out_arr.shape or
(isinstance(out_arr, BaseColumn) and
(context is not None and context[0] in _comparison_functions))):
return out_arr.data[()]
else:
return out_arr
@property
def name(self):
"""
The name of this column.
"""
return self._name
@name.setter
def name(self, val):
val = fix_column_name(val)
if self.parent_table is not None:
table = self.parent_table
table.columns._rename_column(self.name, val)
self._name = val
@property
def format(self):
"""
Format string for displaying values in this column.
"""
return self._format
@format.setter
def format(self, format_string):
prev_format = getattr(self, '_format', None)
self._format = format_string # set new format string
try:
# test whether it formats without error exemplarily
self.pformat(max_lines=1)
except Exception as err:
# revert to restore previous format if there was one
self._format = prev_format
raise ValueError(
"Invalid format for column '{0}': could not display "
"values in this column using this format ({1})".format(
self.name, err.args[0]))
@property
def descr(self):
"""Array-interface compliant full description of the column.
This returns a 3-tuple (name, type, shape) that can always be
used in a structured array dtype definition.
"""
return (self.name, self.dtype.str, self.shape[1:])
def iter_str_vals(self):
"""
Return an iterator that yields the string-formatted values of this
column.
Returns
-------
str_vals : iterator
Column values formatted as strings
"""
# Iterate over formatted values with no max number of lines, no column
# name, no unit, and ignoring the returned header info in outs.
_pformat_col_iter = self._formatter._pformat_col_iter
for str_val in _pformat_col_iter(self, -1, show_name=False, show_unit=False,
show_dtype=False, outs={}):
yield str_val
def attrs_equal(self, col):
"""Compare the column attributes of ``col`` to this object.
The comparison attributes are: ``name``, ``unit``, ``dtype``,
``format``, ``description``, and ``meta``.
Parameters
----------
col : Column
Comparison column
Returns
-------
equal : boolean
True if all attributes are equal
"""
if not isinstance(col, BaseColumn):
raise ValueError('Comparison `col` must be a Column or '
'MaskedColumn object')
attrs = ('name', 'unit', 'dtype', 'format', 'description', 'meta')
equal = all(getattr(self, x) == getattr(col, x) for x in attrs)
return equal
@property
def _formatter(self):
return FORMATTER if (self.parent_table is None) else self.parent_table.formatter
def pformat(self, max_lines=None, show_name=True, show_unit=False, show_dtype=False,
html=False):
"""Return a list of formatted string representation of column values.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default will be
determined using the ``astropy.conf.max_lines`` configuration
item. If a negative value of ``max_lines`` is supplied then
there is no line limit applied.
Parameters
----------
max_lines : int
Maximum lines of output (header + data rows)
show_name : bool
Include column name. Default is True.
show_unit : bool
Include a header row for unit. Default is False.
show_dtype : bool
Include column dtype. Default is False.
html : bool
Format the output as an HTML table. Default is False.
Returns
-------
lines : list
List of lines with header and formatted column values
"""
_pformat_col = self._formatter._pformat_col
lines, outs = _pformat_col(self, max_lines, show_name=show_name,
show_unit=show_unit, show_dtype=show_dtype,
html=html)
return lines
def pprint(self, max_lines=None, show_name=True, show_unit=False, show_dtype=False):
"""Print a formatted string representation of column values.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default will be
determined using the ``astropy.conf.max_lines`` configuration
item. If a negative value of ``max_lines`` is supplied then
there is no line limit applied.
Parameters
----------
max_lines : int
Maximum number of values in output
show_name : bool
Include column name. Default is True.
show_unit : bool
Include a header row for unit. Default is False.
show_dtype : bool
Include column dtype. Default is True.
"""
_pformat_col = self._formatter._pformat_col
lines, outs = _pformat_col(self, max_lines, show_name=show_name, show_unit=show_unit,
show_dtype=show_dtype)
n_header = outs['n_header']
for i, line in enumerate(lines):
if i < n_header:
color_print(line, 'red')
else:
print(line)
def more(self, max_lines=None, show_name=True, show_unit=False):
"""Interactively browse column with a paging interface.
Supported keys::
f, <space> : forward one page
b : back one page
r : refresh same page
n : next row
p : previous row
< : go to beginning
> : go to end
q : quit browsing
h : print this help
Parameters
----------
max_lines : int
Maximum number of lines in table output.
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is False.
"""
_more_tabcol = self._formatter._more_tabcol
_more_tabcol(self, max_lines=max_lines, show_name=show_name,
show_unit=show_unit)
@property
def unit(self):
"""
The unit associated with this column. May be a string or a
`astropy.units.UnitBase` instance.
Setting the ``unit`` property does not change the values of the
data. To perform a unit conversion, use ``convert_unit_to``.
"""
return self._unit
@unit.setter
def unit(self, unit):
if unit is None:
self._unit = None
else:
self._unit = Unit(unit, parse_strict='silent')
@unit.deleter
def unit(self):
self._unit = None
def convert_unit_to(self, new_unit, equivalencies=[]):
"""
Converts the values of the column in-place from the current
unit to the given unit.
To change the unit associated with this column without
actually changing the data values, simply set the ``unit``
property.
Parameters
----------
new_unit : str or `astropy.units.UnitBase` instance
The unit to convert to.
equivalencies : list of equivalence pairs, optional
A list of equivalence pairs to try if the unit are not
directly convertible. See :ref:`unit_equivalencies`.
Raises
------
astropy.units.UnitsError
If units are inconsistent
"""
if self.unit is None:
raise ValueError("No unit set on column")
self.data[:] = self.unit.to(
new_unit, self.data, equivalencies=equivalencies)
self.unit = new_unit
@property
def groups(self):
if not hasattr(self, '_groups'):
self._groups = groups.ColumnGroups(self)
return self._groups
def group_by(self, keys):
"""
Group this column by the specified ``keys``
This effectively splits the column into groups which correspond to
unique values of the ``keys`` grouping object. The output is a new
`Column` or `MaskedColumn` which contains a copy of this column but
sorted by row according to ``keys``.
The ``keys`` input to ``group_by`` must be a numpy array with the
same length as this column.
Parameters
----------
keys : numpy array
Key grouping object
Returns
-------
out : Column
New column with groups attribute set accordingly
"""
return groups.column_group_by(self, keys)
def _copy_groups(self, out):
"""
Copy current groups into a copy of self ``out``
"""
if self.parent_table:
if hasattr(self.parent_table, '_groups'):
out._groups = groups.ColumnGroups(out, indices=self.parent_table._groups._indices)
elif hasattr(self, '_groups'):
out._groups = groups.ColumnGroups(out, indices=self._groups._indices)
# Strip off the BaseColumn-ness for repr and str so that
# MaskedColumn.data __repr__ does not include masked_BaseColumn(data =
# [1 2], ...).
def __repr__(self):
return np.asarray(self).__repr__()
@property
def quantity(self):
"""
A view of this table column as a `~astropy.units.Quantity` object with
units given by the Column's `unit` parameter.
"""
# the Quantity initializer is used here because it correctly fails
# if the column's values are non-numeric (like strings), while .view
# will happily return a quantity with gibberish for numerical values
return Quantity(self, self.unit, copy=False, dtype=self.dtype, order='A', subok=True)
def to(self, unit, equivalencies=[], **kwargs):
"""
Converts this table column to a `~astropy.units.Quantity` object with
the requested units.
Parameters
----------
unit : `~astropy.units.Unit` or str
The unit to convert to (i.e., a valid argument to the
:meth:`astropy.units.Quantity.to` method).
equivalencies : list of equivalence pairs, optional
Equivalencies to use for this conversion. See
:meth:`astropy.units.Quantity.to` for more details.
Returns
-------
quantity : `~astropy.units.Quantity`
A quantity object with the contents of this column in the units
``unit``.
"""
return self.quantity.to(unit, equivalencies)
def _copy_attrs(self, obj):
"""
Copy key column attributes from ``obj`` to self
"""
for attr in ('name', 'unit', '_format', 'description'):
val = getattr(obj, attr, None)
setattr(self, attr, val)
# Light copy of meta if it is not empty
obj_meta = getattr(obj, 'meta', None)
if obj_meta:
self.meta = obj_meta.copy()
@staticmethod
def _encode_str(value):
"""
Encode anything that is unicode-ish as utf-8. This method is only
called for Py3+.
"""
if isinstance(value, str):
value = value.encode('utf-8')
elif isinstance(value, bytes) or value is np.ma.masked:
pass
else:
arr = np.asarray(value)
if arr.dtype.char == 'U':
arr = np.char.encode(arr, encoding='utf-8')
if isinstance(value, np.ma.MaskedArray):
arr = np.ma.array(arr, mask=value.mask, copy=False)
value = arr
return value
class Column(BaseColumn):
"""Define a data column for use in a Table object.
Parameters
----------
data : list, ndarray or None
Column data values
name : str
Column name and key for reference within Table
dtype : numpy.dtype compatible value
Data type for column
shape : tuple or ()
Dimensions of a single row element in the column data
length : int or 0
Number of row elements in column data
description : str or None
Full description of column
unit : str or None
Physical unit
format : str or None or function or callable
Format string for outputting column values. This can be an
"old-style" (``format % value``) or "new-style" (`str.format`)
format specification string or a function or any callable object that
accepts a single value and returns a string.
meta : dict-like or None
Meta-data associated with the column
Examples
--------
A Column can be created in two different ways:
- Provide a ``data`` value but not ``shape`` or ``length`` (which are
inferred from the data).
Examples::
col = Column(data=[1, 2], name='name') # shape=(2,)
col = Column(data=[[1, 2], [3, 4]], name='name') # shape=(2, 2)
col = Column(data=[1, 2], name='name', dtype=float)
col = Column(data=np.array([1, 2]), name='name')
col = Column(data=['hello', 'world'], name='name')
The ``dtype`` argument can be any value which is an acceptable
fixed-size data-type initializer for the numpy.dtype() method. See
`<https://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html>`_.
Examples include:
- Python non-string type (float, int, bool)
- Numpy non-string type (e.g. np.float32, np.int64, np.bool\\_)
- Numpy.dtype array-protocol type strings (e.g. 'i4', 'f8', 'S15')
If no ``dtype`` value is provide then the type is inferred using
``np.array(data)``.
- Provide ``length`` and optionally ``shape``, but not ``data``
Examples::
col = Column(name='name', length=5)
col = Column(name='name', dtype=int, length=10, shape=(3,4))
The default ``dtype`` is ``np.float64``. The ``shape`` argument is the
array shape of a single cell in the column.
"""
def __new__(cls, data=None, name=None,
dtype=None, shape=(), length=0,
description=None, unit=None, format=None, meta=None,
copy=False, copy_indices=True):
if isinstance(data, MaskedColumn) and np.any(data.mask):
raise TypeError("Cannot convert a MaskedColumn with masked value to a Column")
self = super().__new__(
cls, data=data, name=name, dtype=dtype, shape=shape, length=length,
description=description, unit=unit, format=format, meta=meta,
copy=copy, copy_indices=copy_indices)
return self
def __setattr__(self, item, value):
if not isinstance(self, MaskedColumn) and item == "mask":
raise AttributeError("cannot set mask value to a column in non-masked Table")
super().__setattr__(item, value)
if item == 'unit' and issubclass(self.dtype.type, np.number):
try:
converted = self.parent_table._convert_col_for_table(self)
except AttributeError: # Either no parent table or parent table is None
pass
else:
if converted is not self:
self.parent_table.replace_column(self.name, converted)
def _base_repr_(self, html=False):
# If scalar then just convert to correct numpy type and use numpy repr
if self.ndim == 0:
return repr(self.item())
descr_vals = [self.__class__.__name__]
unit = None if self.unit is None else str(self.unit)
shape = None if self.ndim <= 1 else self.shape[1:]
for attr, val in (('name', self.name),
('dtype', dtype_info_name(self.dtype)),
('shape', shape),
('unit', unit),
('format', self.format),
('description', self.description),
('length', len(self))):
if val is not None:
descr_vals.append('{0}={1!r}'.format(attr, val))
descr = '<' + ' '.join(descr_vals) + '>\n'
if html:
from astropy.utils.xml.writer import xml_escape
descr = xml_escape(descr)
data_lines, outs = self._formatter._pformat_col(
self, show_name=False, show_unit=False, show_length=False, html=html)
out = descr + '\n'.join(data_lines)
return out
def _repr_html_(self):
return self._base_repr_(html=True)
def __repr__(self):
return self._base_repr_(html=False)
def __str__(self):
# If scalar then just convert to correct numpy type and use numpy repr
if self.ndim == 0:
return str(self.item())
lines, outs = self._formatter._pformat_col(self)
return '\n'.join(lines)
def __bytes__(self):
return str(self).encode('utf-8')
def _check_string_truncate(self, value):
"""
Emit a warning if any elements of ``value`` will be truncated when
``value`` is assigned to self.
"""
# Convert input ``value`` to the string dtype of this column and
# find the length of the longest string in the array.
value = np.asanyarray(value, dtype=self.dtype.type)
if value.size == 0:
return
value_str_len = np.char.str_len(value).max()
# Parse the array-protocol typestring (e.g. '|U15') of self.dtype which
# has the character repeat count on the right side.
self_str_len = dtype_bytes_or_chars(self.dtype)
if value_str_len > self_str_len:
warnings.warn('truncated right side string(s) longer than {} '
'character(s) during assignment'
.format(self_str_len),
StringTruncateWarning,
stacklevel=3)
def __setitem__(self, index, value):
if self.dtype.char == 'S':
value = self._encode_str(value)
# Issue warning for string assignment that truncates ``value``
if issubclass(self.dtype.type, np.character):
self._check_string_truncate(value)
# update indices
self.info.adjust_indices(index, value, len(self))
# Set items using a view of the underlying data, as it gives an
# order-of-magnitude speed-up. [#2994]
self.data[index] = value
def _make_compare(oper):
"""
Make comparison methods which encode the ``other`` object to utf-8
in the case of a bytestring dtype for Py3+.
"""
swapped_oper = {'__eq__': '__eq__',
'__ne__': '__ne__',
'__gt__': '__lt__',
'__lt__': '__gt__',
'__ge__': '__le__',
'__le__': '__ge__'}[oper]
def _compare(self, other):
op = oper # copy enclosed ref to allow swap below
# Special case to work around #6838. Other combinations work OK,
# see tests.test_column.test_unicode_sandwich_compare(). In this
# case just swap self and other.
#
# This is related to an issue in numpy that was addressed in np 1.13.
# However that fix does not make this problem go away, but maybe
# future numpy versions will do so. NUMPY_LT_1_13 to get the
# attention of future maintainers to check (by deleting or versioning
# the if block below). See #6899 discussion.
if (isinstance(self, MaskedColumn) and self.dtype.kind == 'U' and
isinstance(other, MaskedColumn) and other.dtype.kind == 'S'):
self, other = other, self
op = swapped_oper
if self.dtype.char == 'S':
other = self._encode_str(other)
return getattr(self.data, op)(other)
return _compare
__eq__ = _make_compare('__eq__')
__ne__ = _make_compare('__ne__')
__gt__ = _make_compare('__gt__')
__lt__ = _make_compare('__lt__')
__ge__ = _make_compare('__ge__')
__le__ = _make_compare('__le__')
def insert(self, obj, values, axis=0):
"""
Insert values before the given indices in the column and return
a new `~astropy.table.Column` object.
Parameters
----------
obj : int, slice or sequence of ints
Object that defines the index or indices before which ``values`` is
inserted.
values : array_like
Value(s) to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
``values`` should be shaped so that it can be broadcast appropriately
axis : int, optional
Axis along which to insert ``values``. If ``axis`` is None then
the column array is flattened before insertion. Default is 0,
which will insert a row.
Returns
-------
out : `~astropy.table.Column`
A copy of column with ``values`` and ``mask`` inserted. Note that the
insertion does not occur in-place: a new column is returned.
"""
if self.dtype.kind == 'O':
# Even if values is array-like (e.g. [1,2,3]), insert as a single
# object. Numpy.insert instead inserts each element in an array-like
# input individually.
data = np.insert(self, obj, None, axis=axis)
data[obj] = values
else:
# Explicitly convert to dtype of this column. Needed because numpy 1.7
# enforces safe casting by default, so . This isn't the case for 1.6 or 1.8+.
values = np.asarray(values, dtype=self.dtype)
data = np.insert(self, obj, values, axis=axis)
out = data.view(self.__class__)
out.__array_finalize__(self)
return out
# We do this to make the methods show up in the API docs
name = BaseColumn.name
unit = BaseColumn.unit
copy = BaseColumn.copy
more = BaseColumn.more
pprint = BaseColumn.pprint
pformat = BaseColumn.pformat
convert_unit_to = BaseColumn.convert_unit_to
quantity = BaseColumn.quantity
to = BaseColumn.to
class MaskedColumnInfo(ColumnInfo):
"""
Container for meta information like name, description, format.
This is required when the object is used as a mixin column within a table,
but can be used as a general way to store meta information. In this case
it just adds the ``mask_val`` attribute.
"""
# Add `serialize_method` attribute to the attrs that MaskedColumnInfo knows
# about. This allows customization of the way that MaskedColumn objects
# get written to file depending on format. The default is to use whatever
# the writer would normally do, which in the case of FITS or ECSV is to use
# a NULL value within the data itself. If serialize_method is 'data_mask'
# then the mask is explicitly written out as a separate column if there
# are any masked values. See also code below.
attr_names = ColumnInfo.attr_names | {'serialize_method'}
# When `serialize_method` is 'data_mask', and data and mask are being written
# as separate columns, use column names <name> and <name>.mask (instead
# of default encoding as <name>.data and <name>.mask).
_represent_as_dict_primary_data = 'data'
mask_val = np.ma.masked
def __init__(self, bound=False):
super().__init__(bound)
# If bound to a data object instance then create the dict of attributes
# which stores the info attribute values.
if bound:
# Specify how to serialize this object depending on context.
self.serialize_method = {'fits': 'null_value',
'ecsv': 'null_value',
'hdf5': 'data_mask',
None: 'null_value'}
def _represent_as_dict(self):
out = super()._represent_as_dict()
col = self._parent
# If the serialize method for this context (e.g. 'fits' or 'ecsv') is
# 'data_mask', that means to serialize using an explicit mask column.
method = self.serialize_method[self._serialize_context]
if method == 'data_mask':
# Note that adding to _represent_as_dict_attrs triggers later code which
# will add this to the '__serialized_columns__' meta YAML dict.
# Note also one driver here is a performance issue in #8443 where repr() of a
# np.ma.MaskedArray value is up to 10 times slower than repr of a normal array
# value. So regardless of whether there are masked elements it is useful to
# explicitly define this as a serialized column and use col.data.data (ndarray)
# instead of letting it fall through to the "standard" serialization machinery.
out['data'] = col.data.data
self._represent_as_dict_attrs += ('data',)
if np.any(col.mask):
# Only if there are actually masked elements do we add the ``mask`` column
out['mask'] = col.mask
self._represent_as_dict_attrs += ('mask',)
elif method is 'null_value':
pass
else:
raise ValueError('serialize method must be either "data_mask" or "null_value"')
return out
class MaskedColumn(Column, _MaskedColumnGetitemShim, ma.MaskedArray):
"""Define a masked data column for use in a Table object.
Parameters
----------
data : list, ndarray or None
Column data values
name : str
Column name and key for reference within Table
mask : list, ndarray or None
Boolean mask for which True indicates missing or invalid data
fill_value : float, int, str or None
Value used when filling masked column elements
dtype : numpy.dtype compatible value
Data type for column
shape : tuple or ()
Dimensions of a single row element in the column data
length : int or 0
Number of row elements in column data
description : str or None
Full description of column
unit : str or None
Physical unit
format : str or None or function or callable
Format string for outputting column values. This can be an
"old-style" (``format % value``) or "new-style" (`str.format`)
format specification string or a function or any callable object that
accepts a single value and returns a string.
meta : dict-like or None
Meta-data associated with the column
Examples
--------
A MaskedColumn is similar to a Column except that it includes ``mask`` and
``fill_value`` attributes. It can be created in two different ways:
- Provide a ``data`` value but not ``shape`` or ``length`` (which are
inferred from the data).
Examples::
col = MaskedColumn(data=[1, 2], name='name')
col = MaskedColumn(data=[1, 2], name='name', mask=[True, False])
col = MaskedColumn(data=[1, 2], name='name', dtype=float, fill_value=99)
The ``mask`` argument will be cast as a boolean array and specifies
which elements are considered to be missing or invalid.
The ``dtype`` argument can be any value which is an acceptable
fixed-size data-type initializer for the numpy.dtype() method. See
`<https://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html>`_.
Examples include:
- Python non-string type (float, int, bool)
- Numpy non-string type (e.g. np.float32, np.int64, np.bool\\_)
- Numpy.dtype array-protocol type strings (e.g. 'i4', 'f8', 'S15')
If no ``dtype`` value is provide then the type is inferred using
``np.array(data)``. When ``data`` is provided then the ``shape``
and ``length`` arguments are ignored.
- Provide ``length`` and optionally ``shape``, but not ``data``
Examples::
col = MaskedColumn(name='name', length=5)
col = MaskedColumn(name='name', dtype=int, length=10, shape=(3,4))
The default ``dtype`` is ``np.float64``. The ``shape`` argument is the
array shape of a single cell in the column.
"""
info = MaskedColumnInfo()
def __new__(cls, data=None, name=None, mask=None, fill_value=None,
dtype=None, shape=(), length=0,
description=None, unit=None, format=None, meta=None,
copy=False, copy_indices=True):
if mask is None:
# Issue #7399 with fix #7422. Passing mask=None to ma.MaskedArray
# is extremely slow (~3 seconds for 1e7 elements), while mask=False
# gets quickly broadcast to the expected bool array of False.
mask = getattr(data, 'mask', False)
if mask is not False:
mask = np.array(mask, copy=copy)
elif mask is np.ma.nomask:
# Force the creation of a full mask array as nomask is tricky to
# use and will fail in an unexpected manner when setting a value
# to the mask.
mask = False
else:
mask = deepcopy(mask)
# Create self using MaskedArray as a wrapper class, following the example of
# class MSubArray in
# https://github.com/numpy/numpy/blob/maintenance/1.8.x/numpy/ma/tests/test_subclassing.py
# This pattern makes it so that __array_finalize__ is called as expected (e.g. #1471 and
# https://github.com/astropy/astropy/commit/ff6039e8)
# First just pass through all args and kwargs to BaseColumn, then wrap that object
# with MaskedArray.
self_data = BaseColumn(data, dtype=dtype, shape=shape, length=length, name=name,
unit=unit, format=format, description=description,
meta=meta, copy=copy, copy_indices=copy_indices)
self = ma.MaskedArray.__new__(cls, data=self_data, mask=mask)
# Note: do not set fill_value in the MaskedArray constructor because this does not
# go through the fill_value workarounds.
if fill_value is None and getattr(data, 'fill_value', None) is not None:
# Coerce the fill_value to the correct type since `data` may be a
# different dtype than self.
fill_value = self.dtype.type(data.fill_value)
self.fill_value = fill_value
self.parent_table = None
# needs to be done here since self doesn't come from BaseColumn.__new__
for index in self.indices:
index.replace_col(self_data, self)
return self
@property
def fill_value(self):
return self.get_fill_value() # defer to native ma.MaskedArray method
@fill_value.setter
def fill_value(self, val):
"""Set fill value both in the masked column view and in the parent table
if it exists. Setting one or the other alone doesn't work."""
# another ma bug workaround: If the value of fill_value for a string array is
# requested but not yet set then it gets created as 'N/A'. From this point onward
# any new fill_values are truncated to 3 characters. Note that this does not
# occur if the masked array is a structured array (as in the previous block that
# deals with the parent table).
#
# >>> x = ma.array(['xxxx'])
# >>> x.fill_value # fill_value now gets represented as an 'S3' array
# 'N/A'
# >>> x.fill_value='yyyy'
# >>> x.fill_value
# 'yyy'
#
# To handle this we are forced to reset a private variable first:
self._fill_value = None
self.set_fill_value(val) # defer to native ma.MaskedArray method
@property
def data(self):
out = self.view(ma.MaskedArray)
# The following is necessary because of a bug in Numpy, which was
# fixed in numpy/numpy#2703. The fix should be included in Numpy 1.8.0.
out.fill_value = self.fill_value
return out
def filled(self, fill_value=None):
"""Return a copy of self, with masked values filled with a given value.
Parameters
----------
fill_value : scalar; optional
The value to use for invalid entries (`None` by default). If
`None`, the ``fill_value`` attribute of the array is used
instead.
Returns
-------
filled_column : Column
A copy of ``self`` with masked entries replaced by `fill_value`
(be it the function argument or the attribute of ``self``).
"""
if fill_value is None:
fill_value = self.fill_value
data = super().filled(fill_value)
# Use parent table definition of Column if available
column_cls = self.parent_table.Column if (self.parent_table is not None) else Column
out = column_cls(name=self.name, data=data, unit=self.unit,
format=self.format, description=self.description,
meta=deepcopy(self.meta))
return out
def insert(self, obj, values, mask=None, axis=0):
"""
Insert values along the given axis before the given indices and return
a new `~astropy.table.MaskedColumn` object.
Parameters
----------
obj : int, slice or sequence of ints
Object that defines the index or indices before which ``values`` is
inserted.
values : array_like
Value(s) to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
``values`` should be shaped so that it can be broadcast appropriately
mask : boolean array_like
Mask value(s) to insert. If not supplied then False is used.
axis : int, optional
Axis along which to insert ``values``. If ``axis`` is None then
the column array is flattened before insertion. Default is 0,
which will insert a row.
Returns
-------
out : `~astropy.table.MaskedColumn`
A copy of column with ``values`` and ``mask`` inserted. Note that the
insertion does not occur in-place: a new masked column is returned.
"""
self_ma = self.data # self viewed as MaskedArray
if self.dtype.kind == 'O':
# Even if values is array-like (e.g. [1,2,3]), insert as a single
# object. Numpy.insert instead inserts each element in an array-like
# input individually.
new_data = np.insert(self_ma.data, obj, None, axis=axis)
new_data[obj] = values
else:
# Explicitly convert to dtype of this column. Needed because numpy 1.7
# enforces safe casting by default, so . This isn't the case for 1.6 or 1.8+.
values = np.asarray(values, dtype=self.dtype)
new_data = np.insert(self_ma.data, obj, values, axis=axis)
if mask is None:
if self.dtype.kind == 'O':
mask = False
else:
mask = np.zeros(values.shape, dtype=bool)
new_mask = np.insert(self_ma.mask, obj, mask, axis=axis)
new_ma = np.ma.array(new_data, mask=new_mask, copy=False)
out = new_ma.view(self.__class__)
out.parent_table = None
out.indices = []
out._copy_attrs(self)
out.fill_value = self.fill_value
return out
def _copy_attrs_slice(self, out):
# Fixes issue #3023: when calling getitem with a MaskedArray subclass
# the original object attributes are not copied.
if out.__class__ is self.__class__:
out.parent_table = None
# we need this because __getitem__ does a shallow copy of indices
if out.indices is self.indices:
out.indices = []
out._copy_attrs(self)
return out
def __setitem__(self, index, value):
# Issue warning for string assignment that truncates ``value``
if self.dtype.char == 'S':
value = self._encode_str(value)
if issubclass(self.dtype.type, np.character):
# Account for a bug in np.ma.MaskedArray setitem.
# https://github.com/numpy/numpy/issues/8624
value = np.ma.asanyarray(value, dtype=self.dtype.type)
# Check for string truncation after filling masked items with
# empty (zero-length) string. Note that filled() does not make
# a copy if there are no masked items.
self._check_string_truncate(value.filled(''))
# update indices
self.info.adjust_indices(index, value, len(self))
# Remove this when Numpy no longer emits this warning and that
# Numpy version becomes the minimum required version for Astropy.
# https://github.com/astropy/astropy/issues/6285
if MaskedArrayFutureWarning is None:
ma.MaskedArray.__setitem__(self, index, value)
else:
with warnings.catch_warnings():
warnings.simplefilter('ignore', MaskedArrayFutureWarning)
ma.MaskedArray.__setitem__(self, index, value)
# We do this to make the methods show up in the API docs
name = BaseColumn.name
copy = BaseColumn.copy
more = BaseColumn.more
pprint = BaseColumn.pprint
pformat = BaseColumn.pformat
convert_unit_to = BaseColumn.convert_unit_to
|
0ddadda3a192107725e94da6fc0341042d085f43a807a330f45a3b16e3d78d87 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import sys
import re
import numpy as np
from astropy import log
from astropy.utils.console import Getch, color_print, terminal_size, conf
from astropy.utils.data_info import dtype_info_name
__all__ = []
def default_format_func(format_, val):
if isinstance(val, bytes):
return val.decode('utf-8', errors='replace')
else:
return str(val)
# The first three functions are helpers for _auto_format_func
def _use_str_for_masked_values(format_func):
"""Wrap format function to trap masked values.
String format functions and most user functions will not be able to deal
with masked values, so we wrap them to ensure they are passed to str().
"""
return lambda format_, val: (str(val) if val is np.ma.masked
else format_func(format_, val))
def _possible_string_format_functions(format_):
"""Iterate through possible string-derived format functions.
A string can either be a format specifier for the format built-in,
a new-style format string, or an old-style format string.
"""
yield lambda format_, val: format(val, format_)
yield lambda format_, val: format_.format(val)
yield lambda format_, val: format_ % val
def get_auto_format_func(
col=None,
possible_string_format_functions=_possible_string_format_functions):
"""
Return a wrapped ``auto_format_func`` function which is used in
formatting table columns. This is primarily an internal function but
gets used directly in other parts of astropy, e.g. `astropy.io.ascii`.
Parameters
----------
col_name : object, optional
Hashable object to identify column like id or name. Default is None.
possible_string_format_functions : func, optional
Function that yields possible string formatting functions
(defaults to internal function to do this).
Returns
-------
Wrapped ``auto_format_func`` function
"""
def _auto_format_func(format_, val):
"""Format ``val`` according to ``format_`` for a plain format specifier,
old- or new-style format strings, or using a user supplied function.
More importantly, determine and cache (in _format_funcs) a function
that will do this subsequently. In this way this complicated logic is
only done for the first value.
Returns the formatted value.
"""
if format_ is None:
return default_format_func(format_, val)
if format_ in col.info._format_funcs:
return col.info._format_funcs[format_](format_, val)
if callable(format_):
format_func = lambda format_, val: format_(val)
try:
out = format_func(format_, val)
if not isinstance(out, str):
raise ValueError('Format function for value {0} returned {1} '
'instead of string type'
.format(val, type(val)))
except Exception as err:
# For a masked element, the format function call likely failed
# to handle it. Just return the string representation for now,
# and retry when a non-masked value comes along.
if val is np.ma.masked:
return str(val)
raise ValueError('Format function for value {0} failed: {1}'
.format(val, err))
# If the user-supplied function handles formatting masked elements, use
# it directly. Otherwise, wrap it in a function that traps them.
try:
format_func(format_, np.ma.masked)
except Exception:
format_func = _use_str_for_masked_values(format_func)
else:
# For a masked element, we cannot set string-based format functions yet,
# as all tests below will fail. Just return the string representation
# of masked for now, and retry when a non-masked value comes along.
if val is np.ma.masked:
return str(val)
for format_func in possible_string_format_functions(format_):
try:
# Does this string format method work?
out = format_func(format_, val)
# Require that the format statement actually did something.
if out == format_:
raise ValueError('the format passed in did nothing.')
except Exception:
continue
else:
break
else:
# None of the possible string functions passed muster.
raise ValueError('unable to parse format string {0} for its '
'column.'.format(format_))
# String-based format functions will fail on masked elements;
# wrap them in a function that traps them.
format_func = _use_str_for_masked_values(format_func)
col.info._format_funcs[format_] = format_func
return out
return _auto_format_func
class TableFormatter:
@staticmethod
def _get_pprint_size(max_lines=None, max_width=None):
"""Get the output size (number of lines and character width) for Column and
Table pformat/pprint methods.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default will be determined
using the ``astropy.table.conf.max_lines`` configuration item. If a
negative value of ``max_lines`` is supplied then there is no line
limit applied.
The same applies for max_width except the configuration item is
``astropy.table.conf.max_width``.
Parameters
----------
max_lines : int or None
Maximum lines of output (header + data rows)
max_width : int or None
Maximum width (characters) output
Returns
-------
max_lines, max_width : int
"""
if max_lines is None:
max_lines = conf.max_lines
if max_width is None:
max_width = conf.max_width
if max_lines is None or max_width is None:
lines, width = terminal_size()
if max_lines is None:
max_lines = lines
elif max_lines < 0:
max_lines = sys.maxsize
if max_lines < 8:
max_lines = 8
if max_width is None:
max_width = width
elif max_width < 0:
max_width = sys.maxsize
if max_width < 10:
max_width = 10
return max_lines, max_width
def _pformat_col(self, col, max_lines=None, show_name=True, show_unit=None,
show_dtype=False, show_length=None, html=False, align=None):
"""Return a list of formatted string representation of column values.
Parameters
----------
max_lines : int
Maximum lines of output (header + data rows)
show_name : bool
Include column name. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include column dtype. Default is False.
show_length : bool
Include column length at end. Default is to show this only
if the column is not shown completely.
html : bool
Output column as HTML
align : str
Left/right alignment of columns. Default is '>' (right) for all
columns. Other allowed values are '<', '^', and '0=' for left,
centered, and 0-padded, respectively.
Returns
-------
lines : list
List of lines with formatted column values
outs : dict
Dict which is used to pass back additional values
defined within the iterator.
"""
if show_unit is None:
show_unit = col.info.unit is not None
outs = {} # Some values from _pformat_col_iter iterator that are needed here
col_strs_iter = self._pformat_col_iter(col, max_lines, show_name=show_name,
show_unit=show_unit,
show_dtype=show_dtype,
show_length=show_length,
outs=outs)
col_strs = list(col_strs_iter)
if len(col_strs) > 0:
col_width = max(len(x) for x in col_strs)
if html:
from astropy.utils.xml.writer import xml_escape
n_header = outs['n_header']
for i, col_str in enumerate(col_strs):
# _pformat_col output has a header line '----' which is not needed here
if i == n_header - 1:
continue
td = 'th' if i < n_header else 'td'
val = '<{0}>{1}</{2}>'.format(td, xml_escape(col_str.strip()), td)
row = ('<tr>' + val + '</tr>')
if i < n_header:
row = ('<thead>' + row + '</thead>')
col_strs[i] = row
if n_header > 0:
# Get rid of '---' header line
col_strs.pop(n_header - 1)
col_strs.insert(0, '<table>')
col_strs.append('</table>')
# Now bring all the column string values to the same fixed width
else:
col_width = max(len(x) for x in col_strs) if col_strs else 1
# Center line header content and generate dashed headerline
for i in outs['i_centers']:
col_strs[i] = col_strs[i].center(col_width)
if outs['i_dashes'] is not None:
col_strs[outs['i_dashes']] = '-' * col_width
# Format columns according to alignment. `align` arg has precedent, otherwise
# use `col.format` if it starts as a legal alignment string. If neither applies
# then right justify.
re_fill_align = re.compile(r'(?P<fill>.?)(?P<align>[<^>=])')
match = None
if align:
# If there is an align specified then it must match
match = re_fill_align.match(align)
if not match:
raise ValueError("column align must be one of '<', '^', '>', or '='")
elif isinstance(col.info.format, str):
# col.info.format need not match, in which case rjust gets used
match = re_fill_align.match(col.info.format)
if match:
fill_char = match.group('fill')
align_char = match.group('align')
if align_char == '=':
if fill_char != '0':
raise ValueError("fill character must be '0' for '=' align")
fill_char = '' # str.zfill gets used which does not take fill char arg
else:
fill_char = ''
align_char = '>'
justify_methods = {'<': 'ljust', '^': 'center', '>': 'rjust', '=': 'zfill'}
justify_method = justify_methods[align_char]
justify_args = (col_width, fill_char) if fill_char else (col_width,)
for i, col_str in enumerate(col_strs):
col_strs[i] = getattr(col_str, justify_method)(*justify_args)
if outs['show_length']:
col_strs.append('Length = {0} rows'.format(len(col)))
return col_strs, outs
def _pformat_col_iter(self, col, max_lines, show_name, show_unit, outs,
show_dtype=False, show_length=None):
"""Iterator which yields formatted string representation of column values.
Parameters
----------
max_lines : int
Maximum lines of output (header + data rows)
show_name : bool
Include column name. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
outs : dict
Must be a dict which is used to pass back additional values
defined within the iterator.
show_dtype : bool
Include column dtype. Default is False.
show_length : bool
Include column length at end. Default is to show this only
if the column is not shown completely.
"""
max_lines, _ = self._get_pprint_size(max_lines, -1)
multidims = getattr(col, 'shape', [0])[1:]
if multidims:
multidim0 = tuple(0 for n in multidims)
multidim1 = tuple(n - 1 for n in multidims)
trivial_multidims = np.prod(multidims) == 1
i_dashes = None
i_centers = [] # Line indexes where content should be centered
n_header = 0
if show_name:
i_centers.append(n_header)
# Get column name (or 'None' if not set)
col_name = str(col.info.name)
if multidims:
col_name += ' [{0}]'.format(
','.join(str(n) for n in multidims))
n_header += 1
yield col_name
if show_unit:
i_centers.append(n_header)
n_header += 1
yield str(col.info.unit or '')
if show_dtype:
i_centers.append(n_header)
n_header += 1
try:
dtype = dtype_info_name(col.dtype)
except AttributeError:
dtype = 'object'
yield str(dtype)
if show_unit or show_name or show_dtype:
i_dashes = n_header
n_header += 1
yield '---'
max_lines -= n_header
n_print2 = max_lines // 2
n_rows = len(col)
# This block of code is responsible for producing the function that
# will format values for this column. The ``format_func`` function
# takes two args (col_format, val) and returns the string-formatted
# version. Some points to understand:
#
# - col_format could itself be the formatting function, so it will
# actually end up being called with itself as the first arg. In
# this case the function is expected to ignore its first arg.
#
# - auto_format_func is a function that gets called on the first
# column value that is being formatted. It then determines an
# appropriate formatting function given the actual value to be
# formatted. This might be deterministic or it might involve
# try/except. The latter allows for different string formatting
# options like %f or {:5.3f}. When auto_format_func is called it:
# 1. Caches the function in the _format_funcs dict so for subsequent
# values the right function is called right away.
# 2. Returns the formatted value.
#
# - possible_string_format_functions is a function that yields a
# succession of functions that might successfully format the
# value. There is a default, but Mixin methods can override this.
# See Quantity for an example.
#
# - get_auto_format_func() returns a wrapped version of auto_format_func
# with the column id and possible_string_format_functions as
# enclosed variables.
col_format = col.info.format or getattr(col.info, 'default_format',
None)
pssf = (getattr(col.info, 'possible_string_format_functions', None) or
_possible_string_format_functions)
auto_format_func = get_auto_format_func(col, pssf)
format_func = col.info._format_funcs.get(col_format, auto_format_func)
if len(col) > max_lines:
if show_length is None:
show_length = True
i0 = n_print2 - (1 if show_length else 0)
i1 = n_rows - n_print2 - max_lines % 2
indices = np.concatenate([np.arange(0, i0 + 1),
np.arange(i1 + 1, len(col))])
else:
i0 = -1
indices = np.arange(len(col))
def format_col_str(idx):
if multidims:
# Prevents columns like Column(data=[[(1,)],[(2,)]], name='a')
# with shape (n,1,...,1) from being printed as if there was
# more than one element in a row
if trivial_multidims:
return format_func(col_format, col[(idx,) + multidim0])
else:
left = format_func(col_format, col[(idx,) + multidim0])
right = format_func(col_format, col[(idx,) + multidim1])
return '{0} .. {1}'.format(left, right)
else:
return format_func(col_format, col[idx])
# Add formatted values if within bounds allowed by max_lines
for idx in indices:
if idx == i0:
yield '...'
else:
try:
yield format_col_str(idx)
except ValueError:
raise ValueError(
'Unable to parse format string "{0}" for entry "{1}" '
'in column "{2}"'.format(col_format, col[idx],
col.info.name))
outs['show_length'] = show_length
outs['n_header'] = n_header
outs['i_centers'] = i_centers
outs['i_dashes'] = i_dashes
def _pformat_table(self, table, max_lines=None, max_width=None,
show_name=True, show_unit=None, show_dtype=False,
html=False, tableid=None, tableclass=None, align=None):
"""Return a list of lines for the formatted string representation of
the table.
Parameters
----------
max_lines : int or None
Maximum number of rows to output
max_width : int or None
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is False.
html : bool
Format the output as an HTML table. Default is False.
tableid : str or None
An ID tag for the table; only used if html is set. Default is
"table{id}", where id is the unique integer id of the table object,
id(table)
tableclass : str or list of str or `None`
CSS classes for the table; only used if html is set. Default is
none
align : str or list or tuple
Left/right alignment of columns. Default is '>' (right) for all
columns. Other allowed values are '<', '^', and '0=' for left,
centered, and 0-padded, respectively. A list of strings can be
provided for alignment of tables with multiple columns.
Returns
-------
rows : list
Formatted table as a list of strings
outs : dict
Dict which is used to pass back additional values
defined within the iterator.
"""
# "Print" all the values into temporary lists by column for subsequent
# use and to determine the width
max_lines, max_width = self._get_pprint_size(max_lines, max_width)
cols = []
if show_unit is None:
show_unit = any(col.info.unit for col in table.columns.values())
# Coerce align into a correctly-sized list of alignments (if possible)
n_cols = len(table.columns)
if align is None or isinstance(align, str):
align = [align] * n_cols
elif isinstance(align, (list, tuple)):
if len(align) != n_cols:
raise ValueError('got {0} alignment values instead of '
'the number of columns ({1})'
.format(len(align), n_cols))
else:
raise TypeError('align keyword must be str or list or tuple (got {0})'
.format(type(align)))
for align_, col in zip(align, table.columns.values()):
lines, outs = self._pformat_col(col, max_lines, show_name=show_name,
show_unit=show_unit, show_dtype=show_dtype,
align=align_)
if outs['show_length']:
lines = lines[:-1]
cols.append(lines)
if not cols:
return ['<No columns>'], {'show_length': False}
# Use the values for the last column since they are all the same
n_header = outs['n_header']
n_rows = len(cols[0])
outwidth = lambda cols: sum(len(c[0]) for c in cols) + len(cols) - 1
dots_col = ['...'] * n_rows
middle = len(cols) // 2
while outwidth(cols) > max_width:
if len(cols) == 1:
break
if len(cols) == 2:
cols[1] = dots_col
break
if cols[middle] is dots_col:
cols.pop(middle)
middle = len(cols) // 2
cols[middle] = dots_col
# Now "print" the (already-stringified) column values into a
# row-oriented list.
rows = []
if html:
from astropy.utils.xml.writer import xml_escape
if tableid is None:
tableid = 'table{id}'.format(id=id(table))
if tableclass is not None:
if isinstance(tableclass, list):
tableclass = ' '.join(tableclass)
rows.append('<table id="{tid}" class="{tcls}">'.format(
tid=tableid, tcls=tableclass))
else:
rows.append('<table id="{tid}">'.format(tid=tableid))
for i in range(n_rows):
# _pformat_col output has a header line '----' which is not needed here
if i == n_header - 1:
continue
td = 'th' if i < n_header else 'td'
vals = ('<{0}>{1}</{2}>'.format(td, xml_escape(col[i].strip()), td)
for col in cols)
row = ('<tr>' + ''.join(vals) + '</tr>')
if i < n_header:
row = ('<thead>' + row + '</thead>')
rows.append(row)
rows.append('</table>')
else:
for i in range(n_rows):
row = ' '.join(col[i] for col in cols)
rows.append(row)
return rows, outs
def _more_tabcol(self, tabcol, max_lines=None, max_width=None,
show_name=True, show_unit=None, show_dtype=False):
"""Interactive "more" of a table or column.
Parameters
----------
max_lines : int or None
Maximum number of rows to output
max_width : int or None
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is False.
"""
allowed_keys = 'f br<>qhpn'
# Count the header lines
n_header = 0
if show_name:
n_header += 1
if show_unit:
n_header += 1
if show_dtype:
n_header += 1
if show_name or show_unit or show_dtype:
n_header += 1
# Set up kwargs for pformat call. Only Table gets max_width.
kwargs = dict(max_lines=-1, show_name=show_name, show_unit=show_unit,
show_dtype=show_dtype)
if hasattr(tabcol, 'columns'): # tabcol is a table
kwargs['max_width'] = max_width
# If max_lines is None (=> query screen size) then increase by 2.
# This is because get_pprint_size leaves 6 extra lines so that in
# ipython you normally see the last input line.
max_lines1, max_width = self._get_pprint_size(max_lines, max_width)
if max_lines is None:
max_lines1 += 2
delta_lines = max_lines1 - n_header
# Set up a function to get a single character on any platform
inkey = Getch()
i0 = 0 # First table/column row to show
showlines = True
while True:
i1 = i0 + delta_lines # Last table/col row to show
if showlines: # Don't always show the table (e.g. after help)
try:
os.system('cls' if os.name == 'nt' else 'clear')
except Exception:
pass # No worries if clear screen call fails
lines = tabcol[i0:i1].pformat(**kwargs)
colors = ('red' if i < n_header else 'default'
for i in range(len(lines)))
for color, line in zip(colors, lines):
color_print(line, color)
showlines = True
print()
print("-- f, <space>, b, r, p, n, <, >, q h (help) --", end=' ')
# Get a valid key
while True:
try:
key = inkey().lower()
except Exception:
print("\n")
log.error('Console does not support getting a character'
' as required by more(). Use pprint() instead.')
return
if key in allowed_keys:
break
print(key)
if key.lower() == 'q':
break
elif key == ' ' or key == 'f':
i0 += delta_lines
elif key == 'b':
i0 = i0 - delta_lines
elif key == 'r':
pass
elif key == '<':
i0 = 0
elif key == '>':
i0 = len(tabcol)
elif key == 'p':
i0 -= 1
elif key == 'n':
i0 += 1
elif key == 'h':
showlines = False
print("""
Browsing keys:
f, <space> : forward one page
b : back one page
r : refresh same page
n : next row
p : previous row
< : go to beginning
> : go to end
q : quit browsing
h : print this help""", end=' ')
if i0 < 0:
i0 = 0
if i0 >= len(tabcol) - delta_lines:
i0 = len(tabcol) - delta_lines
print("\n")
|
9547b7486661c30c931309628fd13e1b84bf86eb9204371a9ad9678f76bf7e6b | """
High-level table operations:
- join()
- setdiff()
- hstack()
- vstack()
"""
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from copy import deepcopy
import collections
import itertools
from collections import OrderedDict, Counter
from collections.abc import Mapping, Sequence
import numpy as np
from astropy.utils import metadata
from .table import Table, QTable, Row, Column
from astropy.units import Quantity
from . import _np_utils
from .np_utils import fix_column_name, TableMergeError
__all__ = ['join', 'setdiff', 'hstack', 'vstack', 'unique']
def _merge_table_meta(out, tables, metadata_conflicts='warn'):
out_meta = deepcopy(tables[0].meta)
for table in tables[1:]:
out_meta = metadata.merge(out_meta, table.meta, metadata_conflicts=metadata_conflicts)
out.meta.update(out_meta)
def _get_list_of_tables(tables):
"""
Check that tables is a Table or sequence of Tables. Returns the
corresponding list of Tables.
"""
# Make sure we have a list of things
if not isinstance(tables, Sequence):
tables = [tables]
# Make sure there is something to stack
if len(tables) == 0:
raise ValueError('no values provided to stack.')
# Convert inputs (Table, Row, or anything column-like) to Tables.
# Special case that Quantity converts to a QTable.
for ii, val in enumerate(tables):
if isinstance(val, Table):
pass
elif isinstance(val, Row):
tables[ii] = Table(val)
elif isinstance(val, Quantity):
tables[ii] = QTable([val])
else:
try:
tables[ii] = Table([val])
except (ValueError, TypeError):
raise TypeError('cannot convert {} to table column.'
.format(val))
return tables
def _get_out_class(objs):
"""
From a list of input objects ``objs`` get merged output object class.
This is just taken as the deepest subclass. This doesn't handle complicated
inheritance schemes.
"""
out_class = objs[0].__class__
for obj in objs[1:]:
if issubclass(obj.__class__, out_class):
out_class = obj.__class__
if any(not issubclass(out_class, obj.__class__) for obj in objs):
raise ValueError('unmergeable object classes {}'
.format([obj.__class__.__name__ for obj in objs]))
return out_class
def join(left, right, keys=None, join_type='inner',
uniq_col_name='{col_name}_{table_name}',
table_names=['1', '2'], metadata_conflicts='warn'):
"""
Perform a join of the left table with the right table on specified keys.
Parameters
----------
left : Table object or a value that will initialize a Table object
Left side table in the join
right : Table object or a value that will initialize a Table object
Right side table in the join
keys : str or list of str
Name(s) of column(s) used to match rows of left and right tables.
Default is to use all columns which are common to both tables.
join_type : str
Join type ('inner' | 'outer' | 'left' | 'right'), default is 'inner'
uniq_col_name : str or None
String generate a unique output column name in case of a conflict.
The default is '{col_name}_{table_name}'.
table_names : list of str or None
Two-element list of table names used when generating unique output
column names. The default is ['1', '2'].
metadata_conflicts : str
How to proceed with metadata conflicts. This should be one of:
* ``'silent'``: silently pick the last conflicting meta-data value
* ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default)
* ``'error'``: raise an exception.
Returns
-------
joined_table : `~astropy.table.Table` object
New table containing the result of the join operation.
"""
# Try converting inputs to Table as needed
if not isinstance(left, Table):
left = Table(left)
if not isinstance(right, Table):
right = Table(right)
col_name_map = OrderedDict()
out = _join(left, right, keys, join_type,
uniq_col_name, table_names, col_name_map, metadata_conflicts)
# Merge the column and table meta data. Table subclasses might override
# these methods for custom merge behavior.
_merge_table_meta(out, [left, right], metadata_conflicts=metadata_conflicts)
return out
def setdiff(table1, table2, keys=None):
"""
Take a set difference of table rows.
The row set difference will contain all rows in ``table1`` that are not
present in ``table2``. If the keys parameter is not defined, all columns in
``table1`` will be included in the output table.
Parameters
----------
table1 : `~astropy.table.Table`
``table1`` is on the left side of the set difference.
table2 : `~astropy.table.Table`
``table2`` is on the right side of the set difference.
keys : str or list of str
Name(s) of column(s) used to match rows of left and right tables.
Default is to use all columns in ``table1``.
Returns
-------
diff_table : `~astropy.table.Table`
New table containing the set difference between tables. If the set
difference is none, an empty table will be returned.
Examples
--------
To get a set difference between two tables::
>>> from astropy.table import setdiff, Table
>>> t1 = Table({'a': [1, 4, 9], 'b': ['c', 'd', 'f']}, names=('a', 'b'))
>>> t2 = Table({'a': [1, 5, 9], 'b': ['c', 'b', 'f']}, names=('a', 'b'))
>>> print(t1)
a b
--- ---
1 c
4 d
9 f
>>> print(t2)
a b
--- ---
1 c
5 b
9 f
>>> print(setdiff(t1, t2))
a b
--- ---
4 d
>>> print(setdiff(t2, t1))
a b
--- ---
5 b
"""
if keys is None:
keys = table1.colnames
#Check that all keys are in table1 and table2
for tbl, tbl_str in ((table1,'table1'), (table2,'table2')):
diff_keys = np.setdiff1d(keys, tbl.colnames)
if len(diff_keys) != 0:
raise ValueError("The {} columns are missing from {}, cannot take "
"a set difference.".format(diff_keys, tbl_str))
# Make a light internal copy of both tables
t1 = table1.copy(copy_data=False)
t1.meta = {}
t1.keep_columns(keys)
t1['__index1__'] = np.arange(len(table1)) # Keep track of rows indices
# Make a light internal copy to avoid touching table2
t2 = table2.copy(copy_data=False)
t2.meta = {}
t2.keep_columns(keys)
# Dummy column to recover rows after join
t2['__index2__'] = np.zeros(len(t2), dtype=np.uint8) # dummy column
t12 = _join(t1, t2, join_type='left', keys=keys,
metadata_conflicts='silent')
# If t12 is masked then that means some rows were in table1 but not table2.
if t12.masked:
# Define bool mask of table1 rows not in table2
diff = t12['__index2__'].mask
# Get the row indices of table1 for those rows
idx = t12['__index1__'][diff]
# Select corresponding table1 rows straight from table1 to ensure
# correct table and column types.
t12_diff = table1[idx]
else:
t12_diff = table1[[]]
return t12_diff
def vstack(tables, join_type='outer', metadata_conflicts='warn'):
"""
Stack tables vertically (along rows)
A ``join_type`` of 'exact' means that the tables must all have exactly
the same column names (though the order can vary). If ``join_type``
is 'inner' then the intersection of common columns will be the output.
A value of 'outer' (default) means the output will have the union of
all columns, with table values being masked where no common values are
available.
Parameters
----------
tables : Table or list of Table objects
Table(s) to stack along rows (vertically) with the current table
join_type : str
Join type ('inner' | 'exact' | 'outer'), default is 'outer'
metadata_conflicts : str
How to proceed with metadata conflicts. This should be one of:
* ``'silent'``: silently pick the last conflicting meta-data value
* ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default)
* ``'error'``: raise an exception.
Returns
-------
stacked_table : `~astropy.table.Table` object
New table containing the stacked data from the input tables.
Examples
--------
To stack two tables along rows do::
>>> from astropy.table import vstack, Table
>>> t1 = Table({'a': [1, 2], 'b': [3, 4]}, names=('a', 'b'))
>>> t2 = Table({'a': [5, 6], 'b': [7, 8]}, names=('a', 'b'))
>>> print(t1)
a b
--- ---
1 3
2 4
>>> print(t2)
a b
--- ---
5 7
6 8
>>> print(vstack([t1, t2]))
a b
--- ---
1 3
2 4
5 7
6 8
"""
tables = _get_list_of_tables(tables) # validates input
if len(tables) == 1:
return tables[0] # no point in stacking a single table
col_name_map = OrderedDict()
out = _vstack(tables, join_type, col_name_map, metadata_conflicts)
# Merge table metadata
_merge_table_meta(out, tables, metadata_conflicts=metadata_conflicts)
return out
def hstack(tables, join_type='outer',
uniq_col_name='{col_name}_{table_name}', table_names=None,
metadata_conflicts='warn'):
"""
Stack tables along columns (horizontally)
A ``join_type`` of 'exact' means that the tables must all
have exactly the same number of rows. If ``join_type`` is 'inner' then
the intersection of rows will be the output. A value of 'outer' (default)
means the output will have the union of all rows, with table values being
masked where no common values are available.
Parameters
----------
tables : List of Table objects
Tables to stack along columns (horizontally) with the current table
join_type : str
Join type ('inner' | 'exact' | 'outer'), default is 'outer'
uniq_col_name : str or None
String generate a unique output column name in case of a conflict.
The default is '{col_name}_{table_name}'.
table_names : list of str or None
Two-element list of table names used when generating unique output
column names. The default is ['1', '2', ..].
metadata_conflicts : str
How to proceed with metadata conflicts. This should be one of:
* ``'silent'``: silently pick the last conflicting meta-data value
* ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default)
* ``'error'``: raise an exception.
Returns
-------
stacked_table : `~astropy.table.Table` object
New table containing the stacked data from the input tables.
Examples
--------
To stack two tables horizontally (along columns) do::
>>> from astropy.table import Table, hstack
>>> t1 = Table({'a': [1, 2], 'b': [3, 4]}, names=('a', 'b'))
>>> t2 = Table({'c': [5, 6], 'd': [7, 8]}, names=('c', 'd'))
>>> print(t1)
a b
--- ---
1 3
2 4
>>> print(t2)
c d
--- ---
5 7
6 8
>>> print(hstack([t1, t2]))
a b c d
--- --- --- ---
1 3 5 7
2 4 6 8
"""
tables = _get_list_of_tables(tables) # validates input
if len(tables) == 1:
return tables[0] # no point in stacking a single table
col_name_map = OrderedDict()
out = _hstack(tables, join_type, uniq_col_name, table_names,
col_name_map)
_merge_table_meta(out, tables, metadata_conflicts=metadata_conflicts)
return out
def unique(input_table, keys=None, silent=False, keep='first'):
"""
Returns the unique rows of a table.
Parameters
----------
input_table : `~astropy.table.Table` object or a value that
will initialize a `~astropy.table.Table` object
keys : str or list of str
Name(s) of column(s) used to create unique rows.
Default is to use all columns.
keep : one of 'first', 'last' or 'none'
Whether to keep the first or last row for each set of
duplicates. If 'none', all rows that are duplicate are
removed, leaving only rows that are already unique in
the input.
Default is 'first'.
silent : boolean
If `True`, masked value column(s) are silently removed from
``keys``. If `False`, an exception is raised when ``keys``
contains masked value column(s).
Default is `False`.
Returns
-------
unique_table : `~astropy.table.Table` object
New table containing only the unique rows of ``input_table``.
Examples
--------
>>> from astropy.table import unique, Table
>>> import numpy as np
>>> table = Table(data=[[1,2,3,2,3,3],
... [2,3,4,5,4,6],
... [3,4,5,6,7,8]],
... names=['col1', 'col2', 'col3'],
... dtype=[np.int32, np.int32, np.int32])
>>> table
<Table length=6>
col1 col2 col3
int32 int32 int32
----- ----- -----
1 2 3
2 3 4
3 4 5
2 5 6
3 4 7
3 6 8
>>> unique(table, keys='col1')
<Table length=3>
col1 col2 col3
int32 int32 int32
----- ----- -----
1 2 3
2 3 4
3 4 5
>>> unique(table, keys=['col1'], keep='last')
<Table length=3>
col1 col2 col3
int32 int32 int32
----- ----- -----
1 2 3
2 5 6
3 6 8
>>> unique(table, keys=['col1', 'col2'])
<Table length=5>
col1 col2 col3
int32 int32 int32
----- ----- -----
1 2 3
2 3 4
2 5 6
3 4 5
3 6 8
>>> unique(table, keys=['col1', 'col2'], keep='none')
<Table length=4>
col1 col2 col3
int32 int32 int32
----- ----- -----
1 2 3
2 3 4
2 5 6
3 6 8
>>> unique(table, keys=['col1'], keep='none')
<Table length=1>
col1 col2 col3
int32 int32 int32
----- ----- -----
1 2 3
"""
if keep not in ('first', 'last', 'none'):
raise ValueError("'keep' should be one of 'first', 'last', 'none'")
if isinstance(keys, str):
keys = [keys]
if keys is None:
keys = input_table.colnames
else:
if len(set(keys)) != len(keys):
raise ValueError("duplicate key names")
if input_table.masked:
nkeys = 0
for key in keys[:]:
if np.any(input_table[key].mask):
if not silent:
raise ValueError(
"cannot use columns with masked values as keys; "
"remove column '{0}' from keys and rerun "
"unique()".format(key))
del keys[keys.index(key)]
if len(keys) == 0:
raise ValueError("no column remained in ``keys``; "
"unique() cannot work with masked value "
"key columns")
grouped_table = input_table.group_by(keys)
indices = grouped_table.groups.indices
if keep == 'first':
indices = indices[:-1]
elif keep == 'last':
indices = indices[1:] - 1
else:
indices = indices[:-1][np.diff(indices) == 1]
return grouped_table[indices]
def get_col_name_map(arrays, common_names, uniq_col_name='{col_name}_{table_name}',
table_names=None):
"""
Find the column names mapping when merging the list of tables
``arrays``. It is assumed that col names in ``common_names`` are to be
merged into a single column while the rest will be uniquely represented
in the output. The args ``uniq_col_name`` and ``table_names`` specify
how to rename columns in case of conflicts.
Returns a dict mapping each output column name to the input(s). This takes the form
{outname : (col_name_0, col_name_1, ...), ... }. For key columns all of input names
will be present, while for the other non-key columns the value will be (col_name_0,
None, ..) or (None, col_name_1, ..) etc.
"""
col_name_map = collections.defaultdict(lambda: [None] * len(arrays))
col_name_list = []
if table_names is None:
table_names = [str(ii + 1) for ii in range(len(arrays))]
for idx, array in enumerate(arrays):
table_name = table_names[idx]
for name in array.colnames:
out_name = name
if name in common_names:
# If name is in the list of common_names then insert into
# the column name list, but just once.
if name not in col_name_list:
col_name_list.append(name)
else:
# If name is not one of the common column outputs, and it collides
# with the names in one of the other arrays, then rename
others = list(arrays)
others.pop(idx)
if any(name in other.colnames for other in others):
out_name = uniq_col_name.format(table_name=table_name, col_name=name)
col_name_list.append(out_name)
col_name_map[out_name][idx] = name
# Check for duplicate output column names
col_name_count = Counter(col_name_list)
repeated_names = [name for name, count in col_name_count.items() if count > 1]
if repeated_names:
raise TableMergeError('Merging column names resulted in duplicates: {0}. '
'Change uniq_col_name or table_names args to fix this.'
.format(repeated_names))
# Convert col_name_map to a regular dict with tuple (immutable) values
col_name_map = OrderedDict((name, col_name_map[name]) for name in col_name_list)
return col_name_map
def get_descrs(arrays, col_name_map):
"""
Find the dtypes descrs resulting from merging the list of arrays' dtypes,
using the column name mapping ``col_name_map``.
Return a list of descrs for the output.
"""
out_descrs = []
for out_name, in_names in col_name_map.items():
# List of input arrays that contribute to this output column
in_cols = [arr[name] for arr, name in zip(arrays, in_names) if name is not None]
# List of names of the columns that contribute to this output column.
names = [name for name in in_names if name is not None]
# Output dtype is the superset of all dtypes in in_arrays
try:
dtype = common_dtype(in_cols)
except TableMergeError as tme:
# Beautify the error message when we are trying to merge columns with incompatible
# types by including the name of the columns that originated the error.
raise TableMergeError("The '{0}' columns have incompatible types: {1}"
.format(names[0], tme._incompat_types))
# Make sure all input shapes are the same
uniq_shapes = set(col.shape[1:] for col in in_cols)
if len(uniq_shapes) != 1:
raise TableMergeError('Key columns {0!r} have different shape'.format(names))
shape = uniq_shapes.pop()
out_descrs.append((fix_column_name(out_name), dtype, shape))
return out_descrs
def common_dtype(cols):
"""
Use numpy to find the common dtype for a list of columns.
Only allow columns within the following fundamental numpy data types:
np.bool_, np.object_, np.number, np.character, np.void
"""
try:
return metadata.common_dtype(cols)
except metadata.MergeConflictError as err:
tme = TableMergeError('Columns have incompatible types {0}'
.format(err._incompat_types))
tme._incompat_types = err._incompat_types
raise tme
def _join(left, right, keys=None, join_type='inner',
uniq_col_name='{col_name}_{table_name}',
table_names=['1', '2'],
col_name_map=None, metadata_conflicts='warn'):
"""
Perform a join of the left and right Tables on specified keys.
Parameters
----------
left : Table
Left side table in the join
right : Table
Right side table in the join
keys : str or list of str
Name(s) of column(s) used to match rows of left and right tables.
Default is to use all columns which are common to both tables.
join_type : str
Join type ('inner' | 'outer' | 'left' | 'right'), default is 'inner'
uniq_col_name : str or None
String generate a unique output column name in case of a conflict.
The default is '{col_name}_{table_name}'.
table_names : list of str or None
Two-element list of table names used when generating unique output
column names. The default is ['1', '2'].
col_name_map : empty dict or None
If passed as a dict then it will be updated in-place with the
mapping of output to input column names.
Returns
-------
joined_table : `~astropy.table.Table` object
New table containing the result of the join operation.
"""
# Store user-provided col_name_map until the end
_col_name_map = col_name_map
if join_type not in ('inner', 'outer', 'left', 'right'):
raise ValueError("The 'join_type' argument should be in 'inner', "
"'outer', 'left' or 'right' (got '{0}' instead)".
format(join_type))
# If we have a single key, put it in a tuple
if keys is None:
keys = tuple(name for name in left.colnames if name in right.colnames)
if len(keys) == 0:
raise TableMergeError('No keys in common between left and right tables')
elif isinstance(keys, str):
keys = (keys,)
# Check the key columns
for arr, arr_label in ((left, 'Left'), (right, 'Right')):
for name in keys:
if name not in arr.colnames:
raise TableMergeError('{0} table does not have key column {1!r}'
.format(arr_label, name))
if hasattr(arr[name], 'mask') and np.any(arr[name].mask):
raise TableMergeError('{0} key column {1!r} has missing values'
.format(arr_label, name))
if not isinstance(arr[name], np.ndarray):
raise ValueError("non-ndarray column '{}' not allowed as a key column"
.format(name))
len_left, len_right = len(left), len(right)
if len_left == 0 or len_right == 0:
raise ValueError('input tables for join must both have at least one row')
# Joined array dtype as a list of descr (name, type_str, shape) tuples
col_name_map = get_col_name_map([left, right], keys, uniq_col_name, table_names)
out_descrs = get_descrs([left, right], col_name_map)
# Make an array with just the key columns. This uses a temporary
# structured array for efficiency.
out_keys_dtype = [descr for descr in out_descrs if descr[0] in keys]
out_keys = np.empty(len_left + len_right, dtype=out_keys_dtype)
for key in keys:
out_keys[key][:len_left] = left[key]
out_keys[key][len_left:] = right[key]
idx_sort = out_keys.argsort(order=keys)
out_keys = out_keys[idx_sort]
# Get all keys
diffs = np.concatenate(([True], out_keys[1:] != out_keys[:-1], [True]))
idxs = np.flatnonzero(diffs)
# Main inner loop in Cython to compute the cartesian product
# indices for the given join type
int_join_type = {'inner': 0, 'outer': 1, 'left': 2, 'right': 3}[join_type]
masked, n_out, left_out, left_mask, right_out, right_mask = \
_np_utils.join_inner(idxs, idx_sort, len_left, int_join_type)
# If either of the inputs are masked then the output is masked
if left.masked or right.masked:
masked = True
masked = bool(masked)
out = _get_out_class([left, right])(masked=masked)
for out_name, dtype, shape in out_descrs:
left_name, right_name = col_name_map[out_name]
if left_name and right_name: # this is a key which comes from left and right
cols = [left[left_name], right[right_name]]
col_cls = _get_out_class(cols)
if not hasattr(col_cls.info, 'new_like'):
raise NotImplementedError('join unavailable for mixin column type(s): {}'
.format(col_cls.__name__))
out[out_name] = col_cls.info.new_like(cols, n_out, metadata_conflicts, out_name)
if issubclass(col_cls, Column):
out[out_name][:] = np.where(right_mask,
left[left_name].take(left_out),
right[right_name].take(right_out))
else:
# np.where does not work for mixin columns (e.g. Quantity) so
# use a slower workaround.
left_mask = ~right_mask
if np.any(left_mask):
out[out_name][left_mask] = left[left_name].take(left_out)
if np.any(right_mask):
out[out_name][right_mask] = right[right_name].take(right_out)
continue
elif left_name: # out_name came from the left table
name, array, array_out, array_mask = left_name, left, left_out, left_mask
elif right_name:
name, array, array_out, array_mask = right_name, right, right_out, right_mask
else:
raise TableMergeError('Unexpected column names (maybe one is ""?)')
# Finally add the joined column to the output table.
out[out_name] = array[name][array_out]
# If the output table is masked then set the output column masking
# accordingly. Check for columns that don't support a mask attribute.
if masked and np.any(array_mask):
# array_mask is 1-d corresponding to length of output column. We need
# make it have the correct shape for broadcasting, i.e. (length, 1, 1, ..).
# Mixin columns might not have ndim attribute so use len(col.shape).
array_mask.shape = (out[out_name].shape[0],) + (1,) * (len(out[out_name].shape) - 1)
# Now broadcast to the correct final shape
array_mask = np.broadcast_to(array_mask, out[out_name].shape)
if array.masked:
array_mask = array_mask | array[name].mask[array_out]
try:
out[out_name][array_mask] = out[out_name].info.mask_val
except Exception: # Not clear how different classes will fail here
raise NotImplementedError(
"join requires masking column '{}' but column"
" type {} does not support masking"
.format(out_name, out[out_name].__class__.__name__))
# If col_name_map supplied as a dict input, then update.
if isinstance(_col_name_map, Mapping):
_col_name_map.update(col_name_map)
return out
def _vstack(arrays, join_type='outer', col_name_map=None, metadata_conflicts='warn'):
"""
Stack Tables vertically (by rows)
A ``join_type`` of 'exact' (default) means that the arrays must all
have exactly the same column names (though the order can vary). If
``join_type`` is 'inner' then the intersection of common columns will
be the output. A value of 'outer' means the output will have the union of
all columns, with array values being masked where no common values are
available.
Parameters
----------
arrays : list of Tables
Tables to stack by rows (vertically)
join_type : str
Join type ('inner' | 'exact' | 'outer'), default is 'outer'
col_name_map : empty dict or None
If passed as a dict then it will be updated in-place with the
mapping of output to input column names.
Returns
-------
stacked_table : `~astropy.table.Table` object
New table containing the stacked data from the input tables.
"""
# Store user-provided col_name_map until the end
_col_name_map = col_name_map
# Input validation
if join_type not in ('inner', 'exact', 'outer'):
raise ValueError("`join_type` arg must be one of 'inner', 'exact' or 'outer'")
# Trivial case of one input array
if len(arrays) == 1:
return arrays[0]
# Start by assuming an outer match where all names go to output
names = set(itertools.chain(*[arr.colnames for arr in arrays]))
col_name_map = get_col_name_map(arrays, names)
# If require_match is True then the output must have exactly the same
# number of columns as each input array
if join_type == 'exact':
for names in col_name_map.values():
if any(x is None for x in names):
raise TableMergeError('Inconsistent columns in input arrays '
"(use 'inner' or 'outer' join_type to "
"allow non-matching columns)")
join_type = 'outer'
# For an inner join, keep only columns where all input arrays have that column
if join_type == 'inner':
col_name_map = OrderedDict((name, in_names) for name, in_names in col_name_map.items()
if all(x is not None for x in in_names))
if len(col_name_map) == 0:
raise TableMergeError('Input arrays have no columns in common')
# If there are any output columns where one or more input arrays are missing
# then the output must be masked. If any input arrays are masked then
# output is masked.
masked = any(getattr(arr, 'masked', False) for arr in arrays)
for names in col_name_map.values():
if any(x is None for x in names):
masked = True
break
lens = [len(arr) for arr in arrays]
n_rows = sum(lens)
out = _get_out_class(arrays)(masked=masked)
for out_name, in_names in col_name_map.items():
# List of input arrays that contribute to this output column
cols = [arr[name] for arr, name in zip(arrays, in_names) if name is not None]
col_cls = _get_out_class(cols)
if not hasattr(col_cls.info, 'new_like'):
raise NotImplementedError('vstack unavailable for mixin column type(s): {}'
.format(col_cls.__name__))
try:
out[out_name] = col_cls.info.new_like(cols, n_rows, metadata_conflicts, out_name)
except metadata.MergeConflictError as err:
# Beautify the error message when we are trying to merge columns with incompatible
# types by including the name of the columns that originated the error.
raise TableMergeError("The '{0}' columns have incompatible types: {1}"
.format(out_name, err._incompat_types))
idx0 = 0
for name, array in zip(in_names, arrays):
idx1 = idx0 + len(array)
if name in array.colnames:
out[out_name][idx0:idx1] = array[name]
else:
try:
out[out_name][idx0:idx1] = out[out_name].info.mask_val
except Exception:
raise NotImplementedError(
"vstack requires masking column '{}' but column"
" type {} does not support masking"
.format(out_name, out[out_name].__class__.__name__))
idx0 = idx1
# If col_name_map supplied as a dict input, then update.
if isinstance(_col_name_map, Mapping):
_col_name_map.update(col_name_map)
return out
def _hstack(arrays, join_type='outer', uniq_col_name='{col_name}_{table_name}',
table_names=None, col_name_map=None):
"""
Stack tables horizontally (by columns)
A ``join_type`` of 'exact' (default) means that the arrays must all
have exactly the same number of rows. If ``join_type`` is 'inner' then
the intersection of rows will be the output. A value of 'outer' means
the output will have the union of all rows, with array values being
masked where no common values are available.
Parameters
----------
arrays : List of tables
Tables to stack by columns (horizontally)
join_type : str
Join type ('inner' | 'exact' | 'outer'), default is 'outer'
uniq_col_name : str or None
String generate a unique output column name in case of a conflict.
The default is '{col_name}_{table_name}'.
table_names : list of str or None
Two-element list of table names used when generating unique output
column names. The default is ['1', '2', ..].
Returns
-------
stacked_table : `~astropy.table.Table` object
New table containing the stacked data from the input tables.
"""
# Store user-provided col_name_map until the end
_col_name_map = col_name_map
# Input validation
if join_type not in ('inner', 'exact', 'outer'):
raise ValueError("join_type arg must be either 'inner', 'exact' or 'outer'")
if table_names is None:
table_names = ['{0}'.format(ii + 1) for ii in range(len(arrays))]
if len(arrays) != len(table_names):
raise ValueError('Number of arrays must match number of table_names')
# Trivial case of one input arrays
if len(arrays) == 1:
return arrays[0]
col_name_map = get_col_name_map(arrays, [], uniq_col_name, table_names)
# If require_match is True then all input arrays must have the same length
arr_lens = [len(arr) for arr in arrays]
if join_type == 'exact':
if len(set(arr_lens)) > 1:
raise TableMergeError("Inconsistent number of rows in input arrays "
"(use 'inner' or 'outer' join_type to allow "
"non-matching rows)")
join_type = 'outer'
# For an inner join, keep only the common rows
if join_type == 'inner':
min_arr_len = min(arr_lens)
if len(set(arr_lens)) > 1:
arrays = [arr[:min_arr_len] for arr in arrays]
arr_lens = [min_arr_len for arr in arrays]
# If there are any output rows where one or more input arrays are missing
# then the output must be masked. If any input arrays are masked then
# output is masked.
masked = any(getattr(arr, 'masked', False) for arr in arrays) or len(set(arr_lens)) > 1
n_rows = max(arr_lens)
out = _get_out_class(arrays)(masked=masked)
for out_name, in_names in col_name_map.items():
for name, array, arr_len in zip(in_names, arrays, arr_lens):
if name is None:
continue
if n_rows > arr_len:
indices = np.arange(n_rows)
indices[arr_len:] = 0
out[out_name] = array[name][indices]
try:
out[out_name][arr_len:] = out[out_name].info.mask_val
except Exception:
raise NotImplementedError(
"hstack requires masking column '{}' but column"
" type {} does not support masking"
.format(out_name, out[out_name].__class__.__name__))
else:
out[out_name] = array[name][:n_rows]
# If col_name_map supplied as a dict input, then update.
if isinstance(_col_name_map, Mapping):
_col_name_map.update(col_name_map)
return out
|
abc68a54482657d503ccb62fc66a6e26e81ea43cad5485b50edf0531b3ba3696 | """
High-level operations for numpy structured arrays.
Some code and inspiration taken from numpy.lib.recfunctions.join_by().
Redistribution license restrictions apply.
"""
from itertools import chain
import collections
from collections import OrderedDict, Counter
from collections.abc import Sequence
import numpy as np
import numpy.ma as ma
from . import _np_utils
__all__ = ['TableMergeError']
class TableMergeError(ValueError):
pass
def get_col_name_map(arrays, common_names, uniq_col_name='{col_name}_{table_name}',
table_names=None):
"""
Find the column names mapping when merging the list of structured ndarrays
``arrays``. It is assumed that col names in ``common_names`` are to be
merged into a single column while the rest will be uniquely represented
in the output. The args ``uniq_col_name`` and ``table_names`` specify
how to rename columns in case of conflicts.
Returns a dict mapping each output column name to the input(s). This takes the form
{outname : (col_name_0, col_name_1, ...), ... }. For key columns all of input names
will be present, while for the other non-key columns the value will be (col_name_0,
None, ..) or (None, col_name_1, ..) etc.
"""
col_name_map = collections.defaultdict(lambda: [None] * len(arrays))
col_name_list = []
if table_names is None:
table_names = [str(ii + 1) for ii in range(len(arrays))]
for idx, array in enumerate(arrays):
table_name = table_names[idx]
for name in array.dtype.names:
out_name = name
if name in common_names:
# If name is in the list of common_names then insert into
# the column name list, but just once.
if name not in col_name_list:
col_name_list.append(name)
else:
# If name is not one of the common column outputs, and it collides
# with the names in one of the other arrays, then rename
others = list(arrays)
others.pop(idx)
if any(name in other.dtype.names for other in others):
out_name = uniq_col_name.format(table_name=table_name, col_name=name)
col_name_list.append(out_name)
col_name_map[out_name][idx] = name
# Check for duplicate output column names
col_name_count = Counter(col_name_list)
repeated_names = [name for name, count in col_name_count.items() if count > 1]
if repeated_names:
raise TableMergeError('Merging column names resulted in duplicates: {0}. '
'Change uniq_col_name or table_names args to fix this.'
.format(repeated_names))
# Convert col_name_map to a regular dict with tuple (immutable) values
col_name_map = OrderedDict((name, col_name_map[name]) for name in col_name_list)
return col_name_map
def get_descrs(arrays, col_name_map):
"""
Find the dtypes descrs resulting from merging the list of arrays' dtypes,
using the column name mapping ``col_name_map``.
Return a list of descrs for the output.
"""
out_descrs = []
for out_name, in_names in col_name_map.items():
# List of input arrays that contribute to this output column
in_cols = [arr[name] for arr, name in zip(arrays, in_names) if name is not None]
# List of names of the columns that contribute to this output column.
names = [name for name in in_names if name is not None]
# Output dtype is the superset of all dtypes in in_arrays
try:
dtype = common_dtype(in_cols)
except TableMergeError as tme:
# Beautify the error message when we are trying to merge columns with incompatible
# types by including the name of the columns that originated the error.
raise TableMergeError("The '{0}' columns have incompatible types: {1}"
.format(names[0], tme._incompat_types))
# Make sure all input shapes are the same
uniq_shapes = set(col.shape[1:] for col in in_cols)
if len(uniq_shapes) != 1:
raise TableMergeError('Key columns {0!r} have different shape'.format(name))
shape = uniq_shapes.pop()
out_descrs.append((fix_column_name(out_name), dtype, shape))
return out_descrs
def common_dtype(cols):
"""
Use numpy to find the common dtype for a list of structured ndarray columns.
Only allow columns within the following fundamental numpy data types:
np.bool_, np.object_, np.number, np.character, np.void
"""
np_types = (np.bool_, np.object_, np.number, np.character, np.void)
uniq_types = set(tuple(issubclass(col.dtype.type, np_type) for np_type in np_types)
for col in cols)
if len(uniq_types) > 1:
# Embed into the exception the actual list of incompatible types.
incompat_types = [col.dtype.name for col in cols]
tme = TableMergeError('Columns have incompatible types {0}'
.format(incompat_types))
tme._incompat_types = incompat_types
raise tme
arrs = [np.empty(1, dtype=col.dtype) for col in cols]
# For string-type arrays need to explicitly fill in non-zero
# values or the final arr_common = .. step is unpredictable.
for arr in arrs:
if arr.dtype.kind in ('S', 'U'):
arr[0] = '0' * arr.itemsize
arr_common = np.array([arr[0] for arr in arrs])
return arr_common.dtype.str
def _check_for_sequence_of_structured_arrays(arrays):
err = '`arrays` arg must be a sequence (e.g. list) of structured arrays'
if not isinstance(arrays, Sequence):
raise TypeError(err)
for array in arrays:
# Must be structured array
if not isinstance(array, np.ndarray) or array.dtype.names is None:
raise TypeError(err)
if len(arrays) == 0:
raise ValueError('`arrays` arg must include at least one array')
def fix_column_name(val):
"""
Fixes column names so that they are compatible with Numpy on
Python 2. Raises a ValueError exception if the column name
contains Unicode characters, which can not reasonably be used as a
column name.
"""
if val is not None:
try:
val = str(val)
except UnicodeEncodeError:
raise
return val
def recarray_fromrecords(rec_list):
"""
Partial replacement for `~numpy.core.records.fromrecords` which includes
a workaround for the bug with unicode arrays described at:
https://github.com/astropy/astropy/issues/3052
This should not serve as a full replacement for the original function;
this only does enough to fulfill the needs of the table module.
"""
# Note: This is just copying what Numpy does for converting arbitrary rows
# to column arrays in the recarray module; it could be there is a better
# way
nfields = len(rec_list[0])
obj = np.array(rec_list, dtype=object)
array_list = [np.array(obj[..., i].tolist()) for i in range(nfields)]
formats = []
for obj in array_list:
formats.append(obj.dtype.str)
formats = ','.join(formats)
return np.rec.fromarrays(array_list, formats=formats)
|
bf2bdff72f309f88ed22e3b3f513ffef46494819e05914a9c79ca3c091ecd30d | import textwrap
import copy
from collections import OrderedDict
__all__ = ['get_header_from_yaml', 'get_yaml_from_header', 'get_yaml_from_table']
class ColumnOrderList(list):
"""
List of tuples that sorts in a specific order that makes sense for
astropy table column attributes.
"""
def sort(self, *args, **kwargs):
super().sort()
column_keys = ['name', 'unit', 'datatype', 'format', 'description', 'meta']
in_dict = dict(self)
out_list = []
for key in column_keys:
if key in in_dict:
out_list.append((key, in_dict[key]))
for key, val in self:
if key not in column_keys:
out_list.append((key, val))
# Clear list in-place
del self[:]
self.extend(out_list)
class ColumnDict(dict):
"""
Specialized dict subclass to represent attributes of a Column
and return items() in a preferred order. This is only for use
in generating a YAML map representation that has a fixed order.
"""
def items(self):
"""
Return items as a ColumnOrderList, which sorts in the preferred
way for column attributes.
"""
return ColumnOrderList(super().items())
def _construct_odict(load, node):
"""
Construct OrderedDict from !!omap in yaml safe load.
Source: https://gist.github.com/weaver/317164
License: Unspecified
This is the same as SafeConstructor.construct_yaml_omap(),
except the data type is changed to OrderedDict() and setitem is
used instead of append in the loop
Examples
--------
::
>>> yaml.load(''' # doctest: +SKIP
... !!omap
... - foo: bar
... - mumble: quux
... - baz: gorp
... ''')
OrderedDict([('foo', 'bar'), ('mumble', 'quux'), ('baz', 'gorp')])
>>> yaml.load('''!!omap [ foo: bar, mumble: quux, baz : gorp ]''') # doctest: +SKIP
OrderedDict([('foo', 'bar'), ('mumble', 'quux'), ('baz', 'gorp')])
"""
import yaml
omap = OrderedDict()
yield omap
if not isinstance(node, yaml.SequenceNode):
raise yaml.constructor.ConstructorError(
"while constructing an ordered map", node.start_mark,
"expected a sequence, but found {}".format(node.id), node.start_mark)
for subnode in node.value:
if not isinstance(subnode, yaml.MappingNode):
raise yaml.constructor.ConstructorError(
"while constructing an ordered map", node.start_mark,
"expected a mapping of length 1, but found {}".format(subnode.id),
subnode.start_mark)
if len(subnode.value) != 1:
raise yaml.constructor.ConstructorError(
"while constructing an ordered map", node.start_mark,
"expected a single mapping item, but found {} items".format(len(subnode.value)),
subnode.start_mark)
key_node, value_node = subnode.value[0]
key = load.construct_object(key_node)
value = load.construct_object(value_node)
omap[key] = value
def _repr_pairs(dump, tag, sequence, flow_style=None):
"""
This is the same code as BaseRepresenter.represent_sequence(),
but the value passed to dump.represent_data() in the loop is a
dictionary instead of a tuple.
Source: https://gist.github.com/weaver/317164
License: Unspecified
"""
import yaml
value = []
node = yaml.SequenceNode(tag, value, flow_style=flow_style)
if dump.alias_key is not None:
dump.represented_objects[dump.alias_key] = node
best_style = True
for (key, val) in sequence:
item = dump.represent_data({key: val})
if not (isinstance(item, yaml.ScalarNode) and not item.style):
best_style = False
value.append(item)
if flow_style is None:
if dump.default_flow_style is not None:
node.flow_style = dump.default_flow_style
else:
node.flow_style = best_style
return node
def _repr_odict(dumper, data):
"""
Represent OrderedDict in yaml dump.
Source: https://gist.github.com/weaver/317164
License: Unspecified
>>> data = OrderedDict([('foo', 'bar'), ('mumble', 'quux'), ('baz', 'gorp')])
>>> yaml.dump(data, default_flow_style=False) # doctest: +SKIP
'!!omap\\n- foo: bar\\n- mumble: quux\\n- baz: gorp\\n'
>>> yaml.dump(data, default_flow_style=True) # doctest: +SKIP
'!!omap [foo: bar, mumble: quux, baz: gorp]\\n'
"""
return _repr_pairs(dumper, u'tag:yaml.org,2002:omap', data.items())
def _repr_column_dict(dumper, data):
"""
Represent ColumnDict in yaml dump.
This is the same as an ordinary mapping except that the keys
are written in a fixed order that makes sense for astropy table
columns.
"""
return dumper.represent_mapping(u'tag:yaml.org,2002:map', data)
def _get_col_attributes(col):
"""
Extract information from a column (apart from the values) that is required
to fully serialize the column.
"""
attrs = ColumnDict()
attrs['name'] = col.info.name
type_name = col.info.dtype.type.__name__
if type_name.startswith(('bytes', 'str')):
type_name = 'string'
if type_name.endswith('_'):
type_name = type_name[:-1] # string_ and bool_ lose the final _ for ECSV
attrs['datatype'] = type_name
# Set the output attributes
for attr, nontrivial, xform in (('unit', lambda x: x is not None, str),
('format', lambda x: x is not None, None),
('description', lambda x: x is not None, None),
('meta', lambda x: x, None)):
col_attr = getattr(col.info, attr)
if nontrivial(col_attr):
attrs[attr] = xform(col_attr) if xform else col_attr
return attrs
def get_yaml_from_table(table):
"""
Return lines with a YAML representation of header content from the ``table``.
Parameters
----------
table : `~astropy.table.Table` object
Table for which header content is output
Returns
-------
lines : list
List of text lines with YAML header content
"""
header = {'cols': list(table.columns.values())}
if table.meta:
header['meta'] = table.meta
return get_yaml_from_header(header)
def get_yaml_from_header(header):
"""
Return lines with a YAML representation of header content from a Table.
The ``header`` dict must contain these keys:
- 'cols' : list of table column objects (required)
- 'meta' : table 'meta' attribute (optional)
Other keys included in ``header`` will be serialized in the output YAML
representation.
Parameters
----------
header : dict
Table header content
Returns
-------
lines : list
List of text lines with YAML header content
"""
try:
import yaml
except ImportError:
raise ImportError('`import yaml` failed, PyYAML package is '
'required for serializing mixin columns')
from astropy.io.misc.yaml import AstropyDumper
class TableDumper(AstropyDumper):
"""
Custom Dumper that represents OrderedDict as an !!omap object.
"""
def represent_mapping(self, tag, mapping, flow_style=None):
"""
This is a combination of the Python 2 and 3 versions of this method
in the PyYAML library to allow the required key ordering via the
ColumnOrderList object. The Python 3 version insists on turning the
items() mapping into a list object and sorting, which results in
alphabetical order for the column keys.
"""
value = []
node = yaml.MappingNode(tag, value, flow_style=flow_style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
if hasattr(mapping, 'items'):
mapping = mapping.items()
if hasattr(mapping, 'sort'):
mapping.sort()
else:
mapping = list(mapping)
try:
mapping = sorted(mapping)
except TypeError:
pass
for item_key, item_value in mapping:
node_key = self.represent_data(item_key)
node_value = self.represent_data(item_value)
if not (isinstance(node_key, yaml.ScalarNode) and not node_key.style):
best_style = False
if not (isinstance(node_value, yaml.ScalarNode) and not node_value.style):
best_style = False
value.append((node_key, node_value))
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
TableDumper.add_representer(OrderedDict, _repr_odict)
TableDumper.add_representer(ColumnDict, _repr_column_dict)
header = copy.copy(header) # Don't overwrite original
header['datatype'] = [_get_col_attributes(col) for col in header['cols']]
del header['cols']
lines = yaml.dump(header, default_flow_style=None,
Dumper=TableDumper, width=130).splitlines()
return lines
class YamlParseError(Exception):
pass
def get_header_from_yaml(lines):
"""
Get a header dict from input ``lines`` which should be valid YAML. This
input will typically be created by get_yaml_from_header. The output is a
dictionary which describes all the table and column meta.
The get_cols() method in the io/ascii/ecsv.py file should be used as a
guide to using the information when constructing a table using this
header dict information.
Parameters
----------
lines : list
List of text lines with YAML header content
Returns
-------
header : dict
Dictionary describing table and column meta
"""
try:
import yaml
except ImportError:
raise ImportError('`import yaml` failed, PyYAML package '
'is required for serializing mixin columns')
from astropy.io.misc.yaml import AstropyLoader
class TableLoader(AstropyLoader):
"""
Custom Loader that constructs OrderedDict from an !!omap object.
This does nothing but provide a namespace for adding the
custom odict constructor.
"""
TableLoader.add_constructor(u'tag:yaml.org,2002:omap', _construct_odict)
# Now actually load the YAML data structure into `meta`
header_yaml = textwrap.dedent('\n'.join(lines))
try:
header = yaml.load(header_yaml, Loader=TableLoader)
except Exception as err:
raise YamlParseError(str(err))
return header
|
cdb2ff1d70ebc42d40db182245525f4e887562592e27adcd6dd680d0a33775cb | from importlib import import_module
import re
from copy import deepcopy
from collections import OrderedDict
from astropy.utils.data_info import MixinInfo
from .column import Column
from .table import Table, QTable, has_info_class
from astropy.units.quantity import QuantityInfo
__construct_mixin_classes = ('astropy.time.core.Time',
'astropy.time.core.TimeDelta',
'astropy.units.quantity.Quantity',
'astropy.coordinates.angles.Latitude',
'astropy.coordinates.angles.Longitude',
'astropy.coordinates.angles.Angle',
'astropy.coordinates.distances.Distance',
'astropy.coordinates.earth.EarthLocation',
'astropy.coordinates.sky_coordinate.SkyCoord',
'astropy.table.table.NdarrayMixin',
'astropy.table.column.MaskedColumn')
class SerializedColumn(dict):
"""
Subclass of dict that is a used in the representation to contain the name
(and possible other info) for a mixin attribute (either primary data or an
array-like attribute) that is serialized as a column in the table.
Normally contains the single key ``name`` with the name of the column in the
table.
"""
pass
def _represent_mixin_as_column(col, name, new_cols, mixin_cols,
exclude_classes=()):
"""Carry out processing needed to serialize ``col`` in an output table
consisting purely of plain ``Column`` or ``MaskedColumn`` columns. This
relies on the object determine if any transformation is required and may
depend on the ``serialize_method`` and ``serialize_context`` context
variables. For instance a ``MaskedColumn`` may be stored directly to
FITS, but can also be serialized as separate data and mask columns.
This function builds up a list of plain columns in the ``new_cols`` arg (which
is passed as a persistent list). This includes both plain columns from the
original table and plain columns that represent data from serialized columns
(e.g. ``jd1`` and ``jd2`` arrays from a ``Time`` column).
For serialized columns the ``mixin_cols`` dict is updated with required
attributes and information to subsequently reconstruct the table.
Table mixin columns are always serialized and get represented by one
or more data columns. In earlier versions of the code *only* mixin
columns were serialized, hence the use within this code of "mixin"
to imply serialization. Starting with version 3.1, the non-mixin
``MaskedColumn`` can also be serialized.
"""
obj_attrs = col.info._represent_as_dict()
ordered_keys = col.info._represent_as_dict_attrs
# If serialization is not required (see function docstring above)
# or explicitly specified as excluded, then treat as a normal column.
if not obj_attrs or col.__class__ in exclude_classes:
new_cols.append(col)
return
# Subtlety here is handling mixin info attributes. The basic list of such
# attributes is: 'name', 'unit', 'dtype', 'format', 'description', 'meta'.
# - name: handled directly [DON'T store]
# - unit: DON'T store if this is a parent attribute
# - dtype: captured in plain Column if relevant [DON'T store]
# - format: possibly irrelevant but settable post-object creation [DO store]
# - description: DO store
# - meta: DO store
info = {}
for attr, nontrivial, xform in (('unit', lambda x: x is not None and x != '', str),
('format', lambda x: x is not None, None),
('description', lambda x: x is not None, None),
('meta', lambda x: x, None)):
col_attr = getattr(col.info, attr)
if nontrivial(col_attr):
info[attr] = xform(col_attr) if xform else col_attr
data_attrs = [key for key in ordered_keys if key in obj_attrs and
getattr(obj_attrs[key], 'shape', ())[:1] == col.shape[:1]]
for data_attr in data_attrs:
data = obj_attrs[data_attr]
# New column name combines the old name and attribute
# (e.g. skycoord.ra, skycoord.dec).unless it is the primary data
# attribute for the column (e.g. value for Quantity or data
# for MaskedColumn)
if data_attr == col.info._represent_as_dict_primary_data:
new_name = name
else:
new_name = name + '.' + data_attr
if not has_info_class(data, MixinInfo):
new_cols.append(Column(data, name=new_name, **info))
obj_attrs[data_attr] = SerializedColumn({'name': new_name})
else:
# recurse. This will define obj_attrs[new_name].
_represent_mixin_as_column(data, new_name, new_cols, obj_attrs)
obj_attrs[data_attr] = SerializedColumn(obj_attrs.pop(new_name))
# Strip out from info any attributes defined by the parent
for attr in col.info.attrs_from_parent:
if attr in info:
del info[attr]
if info:
obj_attrs['__info__'] = info
# Store the fully qualified class name
obj_attrs['__class__'] = col.__module__ + '.' + col.__class__.__name__
mixin_cols[name] = obj_attrs
def represent_mixins_as_columns(tbl, exclude_classes=()):
"""Represent input Table ``tbl`` using only `~astropy.table.Column`
or `~astropy.table.MaskedColumn` objects.
This function represents any mixin columns like `~astropy.time.Time` in
``tbl`` to one or more plain ``~astropy.table.Column`` objects and returns
a new Table. A single mixin column may be split into multiple column
components as needed for fully representing the column. This includes the
possibility of recursive splitting, as shown in the example below. The
new column names are formed as ``<column_name>.<component>``, e.g.
``sc.ra`` for a `~astropy.coordinates.SkyCoord` column named ``sc``.
In addition to splitting columns, this function updates the table ``meta``
dictionary to include a dict named ``__serialized_columns__`` which provides
additional information needed to construct the original mixin columns from
the split columns.
This function is used by astropy I/O when writing tables to ECSV, FITS,
HDF5 formats.
Note that if the table does not include any mixin columns then the original
table is returned with no update to ``meta``.
Parameters
----------
tbl : `~astropy.table.Table` or subclass
Table to represent mixins as Columns
exclude_classes : tuple of classes
Exclude any mixin columns which are instannces of any classes in the tuple
Returns
-------
tbl : `~astropy.table.Table`
New Table with updated columns, or else the original input ``tbl``
Examples
--------
>>> from astropy.table import Table, represent_mixins_as_columns
>>> from astropy.time import Time
>>> from astropy.coordinates import SkyCoord
>>> x = [100.0, 200.0]
>>> obstime = Time([1999.0, 2000.0], format='jyear')
>>> sc = SkyCoord([1, 2], [3, 4], unit='deg', obstime=obstime)
>>> tbl = Table([sc, x], names=['sc', 'x'])
>>> represent_mixins_as_columns(tbl)
<Table length=2>
sc.ra sc.dec sc.obstime.jd1 sc.obstime.jd2 x
deg deg
float64 float64 float64 float64 float64
------- ------- -------------- -------------- -------
1.0 3.0 2451180.0 -0.25 100.0
2.0 4.0 2451545.0 0.0 200.0
"""
# Dict of metadata for serializing each column, keyed by column name.
# Gets filled in place by _represent_mixin_as_column().
mixin_cols = {}
# List of columns for the output table. For plain Column objects
# this will just be the original column object.
new_cols = []
# Go through table columns and represent each column as one or more
# plain Column objects (in new_cols) + metadata (in mixin_cols).
for col in tbl.itercols():
_represent_mixin_as_column(col, col.info.name, new_cols, mixin_cols,
exclude_classes=exclude_classes)
# If no metadata was created then just return the original table.
if not mixin_cols:
return tbl
meta = deepcopy(tbl.meta)
meta['__serialized_columns__'] = mixin_cols
out = Table(new_cols, meta=meta, copy=False)
return out
def _construct_mixin_from_obj_attrs_and_info(obj_attrs, info):
cls_full_name = obj_attrs.pop('__class__')
# If this is a supported class then import the class and run
# the _construct_from_col method. Prevent accidentally running
# untrusted code by only importing known astropy classes.
if cls_full_name not in __construct_mixin_classes:
raise ValueError('unsupported class for construct {}'.format(cls_full_name))
mod_name, cls_name = re.match(r'(.+)\.(\w+)', cls_full_name).groups()
module = import_module(mod_name)
cls = getattr(module, cls_name)
for attr, value in info.items():
if attr in cls.info.attrs_from_parent:
obj_attrs[attr] = value
mixin = cls.info._construct_from_dict(obj_attrs)
for attr, value in info.items():
if attr not in obj_attrs:
setattr(mixin.info, attr, value)
return mixin
class _TableLite(OrderedDict):
"""
Minimal table-like object for _construct_mixin_from_columns. This allows
manipulating the object like a Table but without the actual overhead
for a full Table.
More pressing, there is an issue with constructing MaskedColumn, where the
encoded Column components (data, mask) are turned into a MaskedColumn.
When this happens in a real table then all other columns are immediately
Masked and a warning is issued. This is not desirable.
"""
def add_column(self, col, index=0):
colnames = self.colnames
self[col.info.name] = col
for ii, name in enumerate(colnames):
if ii >= index:
self.move_to_end(name)
@property
def colnames(self):
return list(self.keys())
def itercols(self):
return self.values()
def _construct_mixin_from_columns(new_name, obj_attrs, out):
data_attrs_map = {}
for name, val in obj_attrs.items():
if isinstance(val, SerializedColumn):
if 'name' in val:
data_attrs_map[val['name']] = name
else:
_construct_mixin_from_columns(name, val, out)
data_attrs_map[name] = name
for name in data_attrs_map.values():
del obj_attrs[name]
# Get the index where to add new column
idx = min(out.colnames.index(name) for name in data_attrs_map)
# Name is the column name in the table (e.g. "coord.ra") and
# data_attr is the object attribute name (e.g. "ra"). A different
# example would be a formatted time object that would have (e.g.)
# "time_col" and "value", respectively.
for name, data_attr in data_attrs_map.items():
col = out[name]
obj_attrs[data_attr] = col
del out[name]
info = obj_attrs.pop('__info__', {})
if len(data_attrs_map) == 1:
# col is the first and only serialized column; in that case, use info
# stored on the column.
for attr, nontrivial in (('unit', lambda x: x not in (None, '')),
('format', lambda x: x is not None),
('description', lambda x: x is not None),
('meta', lambda x: x)):
col_attr = getattr(col.info, attr)
if nontrivial(col_attr):
info[attr] = col_attr
info['name'] = new_name
col = _construct_mixin_from_obj_attrs_and_info(obj_attrs, info)
out.add_column(col, index=idx)
def _construct_mixins_from_columns(tbl):
if '__serialized_columns__' not in tbl.meta:
return tbl
meta = tbl.meta.copy()
mixin_cols = meta.pop('__serialized_columns__')
out = _TableLite(tbl.columns)
for new_name, obj_attrs in mixin_cols.items():
_construct_mixin_from_columns(new_name, obj_attrs, out)
# If no quantity subclasses are in the output then output as Table.
# For instance ascii.read(file, format='ecsv') doesn't specify an
# output class and should return the minimal table class that
# represents the table file.
has_quantities = any(isinstance(col.info, QuantityInfo)
for col in out.itercols())
out_cls = QTable if has_quantities else Table
return out_cls(list(out.values()), names=out.colnames, copy=False, meta=meta)
|
74cc8e9ce831fce0eeb9624ac3dfae6455462aa38f99f25769a55f875bf132fb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Helper functions for table development, mostly creating useful
tables for testing.
"""
from itertools import cycle
import string
import numpy as np
from .table import Table, Column
from astropy.utils.data_info import ParentDtypeInfo
class TimingTables:
"""
Object which contains two tables and various other attributes that
are useful for timing and other API tests.
"""
def __init__(self, size=1000, masked=False):
self.masked = masked
# Initialize table
self.table = Table(masked=self.masked)
# Create column with mixed types
np.random.seed(12345)
self.table['i'] = np.arange(size)
self.table['a'] = np.random.random(size) # float
self.table['b'] = np.random.random(size) > 0.5 # bool
self.table['c'] = np.random.random((size, 10)) # 2d column
self.table['d'] = np.random.choice(np.array(list(string.ascii_letters)), size)
self.extra_row = {'a': 1.2, 'b': True, 'c': np.repeat(1, 10), 'd': 'Z'}
self.extra_column = np.random.randint(0, 100, size)
self.row_indices = np.where(self.table['a'] > 0.9)[0]
self.table_grouped = self.table.group_by('d')
# Another table for testing joining
self.other_table = Table(masked=self.masked)
self.other_table['i'] = np.arange(1, size, 3)
self.other_table['f'] = np.random.random()
self.other_table.sort('f')
# Another table for testing hstack
self.other_table_2 = Table(masked=self.masked)
self.other_table_2['g'] = np.random.random(size)
self.other_table_2['h'] = np.random.random((size, 10))
self.bool_mask = self.table['a'] > 0.6
def simple_table(size=3, cols=None, kinds='ifS', masked=False):
"""
Return a simple table for testing.
Example
--------
::
>>> from astropy.table.table_helpers import simple_table
>>> print(simple_table(3, 6, masked=True, kinds='ifOS'))
a b c d e f
--- --- -------- --- --- ---
-- 1.0 {'c': 2} -- 5 5.0
2 2.0 -- e 6 --
3 -- {'e': 4} f -- 7.0
Parameters
----------
size : int
Number of table rows
cols : int, optional
Number of table columns. Defaults to number of kinds.
kinds : str
String consisting of the column dtype.kinds. This string
will be cycled through to generate the column dtype.
The allowed values are 'i', 'f', 'S', 'O'.
Returns
-------
out : `Table`
New table with appropriate characteristics
"""
if cols is None:
cols = len(kinds)
if cols > 26:
raise ValueError("Max 26 columns in SimpleTable")
columns = []
names = [chr(ord('a') + ii) for ii in range(cols)]
letters = np.array([c for c in string.ascii_letters])
for jj, kind in zip(range(cols), cycle(kinds)):
if kind == 'i':
data = np.arange(1, size + 1, dtype=np.int64) + jj
elif kind == 'f':
data = np.arange(size, dtype=np.float64) + jj
elif kind == 'S':
indices = (np.arange(size) + jj) % len(letters)
data = letters[indices]
elif kind == 'O':
indices = (np.arange(size) + jj) % len(letters)
vals = letters[indices]
data = [{val: index} for val, index in zip(vals, indices)]
else:
raise ValueError('Unknown data kind')
columns.append(Column(data))
table = Table(columns, names=names, masked=masked)
if masked:
for ii, col in enumerate(table.columns.values()):
mask = np.array((np.arange(size) + ii) % 3, dtype=bool)
col.mask = ~mask
return table
def complex_table():
"""
Return a masked table from the io.votable test set that has a wide variety
of stressing types.
"""
from astropy.utils.data import get_pkg_data_filename
from astropy.io.votable.table import parse
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
votable = parse(get_pkg_data_filename('../io/votable/tests/data/regression.xml'),
pedantic=False)
first_table = votable.get_first_table()
table = first_table.to_table()
return table
class ArrayWrapper:
"""
Minimal mixin using a simple wrapper around a numpy array
"""
info = ParentDtypeInfo()
def __init__(self, data):
self.data = np.array(data)
if 'info' in getattr(data, '__dict__', ()):
self.info = data.info
def __getitem__(self, item):
if isinstance(item, (int, np.integer)):
out = self.data[item]
else:
out = self.__class__(self.data[item])
if 'info' in self.__dict__:
out.info = self.info
return out
def __setitem__(self, item, value):
self.data[item] = value
def __len__(self):
return len(self.data)
def __eq__(self, other):
"""Minimal equality testing, mostly for mixin unit tests"""
if isinstance(other, ArrayWrapper):
return self.data == other.data
else:
return self.data == other
@property
def dtype(self):
return self.data.dtype
@property
def shape(self):
return self.data.shape
def __repr__(self):
return ("<{0} name='{1}' data={2}>"
.format(self.__class__.__name__, self.info.name, self.data))
|
4220392fcb20ca86cb27316da8665ca71f58066d7876cb6beb271692a58b409b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import math
import numpy as np
from .core import Kernel1D, Kernel2D, Kernel
from .utils import has_even_axis, raise_even_kernel_exception
from astropy.modeling import models
from astropy.modeling.core import Fittable1DModel, Fittable2DModel
from astropy.utils.decorators import deprecated_renamed_argument
__all__ = ['Gaussian1DKernel', 'Gaussian2DKernel', 'CustomKernel',
'Box1DKernel', 'Box2DKernel', 'Tophat2DKernel',
'Trapezoid1DKernel', 'MexicanHat1DKernel', 'MexicanHat2DKernel',
'AiryDisk2DKernel', 'Moffat2DKernel', 'Model1DKernel',
'Model2DKernel', 'TrapezoidDisk2DKernel', 'Ring2DKernel']
def _round_up_to_odd_integer(value):
i = math.ceil(value)
if i % 2 == 0:
return i + 1
else:
return i
class Gaussian1DKernel(Kernel1D):
"""
1D Gaussian filter kernel.
The Gaussian filter is a filter with great smoothing properties. It is
isotropic and does not produce artifacts.
Parameters
----------
stddev : number
Standard deviation of the Gaussian kernel.
x_size : odd int, optional
Size of the kernel array. Default = 8 * stddev
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by linearly interpolating
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin. Very slow.
factor : number, optional
Factor of oversampling. Default factor = 10. If the factor
is too large, evaluation can be very slow.
See Also
--------
Box1DKernel, Trapezoid1DKernel, MexicanHat1DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Gaussian1DKernel
gauss_1D_kernel = Gaussian1DKernel(10)
plt.plot(gauss_1D_kernel, drawstyle='steps')
plt.xlabel('x [pixels]')
plt.ylabel('value')
plt.show()
"""
_separable = True
_is_bool = False
def __init__(self, stddev, **kwargs):
self._model = models.Gaussian1D(1. / (np.sqrt(2 * np.pi) * stddev),
0, stddev)
self._default_size = _round_up_to_odd_integer(8 * stddev)
super().__init__(**kwargs)
self._truncation = np.abs(1. - self._array.sum())
class Gaussian2DKernel(Kernel2D):
"""
2D Gaussian filter kernel.
The Gaussian filter is a filter with great smoothing properties. It is
isotropic and does not produce artifacts.
Parameters
----------
x_stddev : float
Standard deviation of the Gaussian in x before rotating by theta.
y_stddev : float
Standard deviation of the Gaussian in y before rotating by theta.
theta : float
Rotation angle in radians. The rotation angle increases
counterclockwise.
x_size : odd int, optional
Size in x direction of the kernel array. Default = 8 * stddev.
y_size : odd int, optional
Size in y direction of the kernel array. Default = 8 * stddev.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Box2DKernel, Tophat2DKernel, MexicanHat2DKernel, Ring2DKernel,
TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Gaussian2DKernel
gaussian_2D_kernel = Gaussian2DKernel(10)
plt.imshow(gaussian_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
_separable = True
_is_bool = False
@deprecated_renamed_argument('stddev', 'x_stddev', '3.0')
def __init__(self, x_stddev, y_stddev=None, theta=0.0, **kwargs):
if y_stddev is None:
y_stddev = x_stddev
self._model = models.Gaussian2D(1. / (2 * np.pi * x_stddev * y_stddev),
0, 0, x_stddev=x_stddev,
y_stddev=y_stddev, theta=theta)
self._default_size = _round_up_to_odd_integer(
8 * np.max([x_stddev, y_stddev]))
super().__init__(**kwargs)
self._truncation = np.abs(1. - self._array.sum())
class Box1DKernel(Kernel1D):
"""
1D Box filter kernel.
The Box filter or running mean is a smoothing filter. It is not isotropic
and can produce artifacts, when applied repeatedly to the same data.
By default the Box kernel uses the ``linear_interp`` discretization mode,
which allows non-shifting, even-sized kernels. This is achieved by
weighting the edge pixels with 1/2. E.g a Box kernel with an effective
smoothing of 4 pixel would have the following array: [0.5, 1, 1, 1, 0.5].
Parameters
----------
width : number
Width of the filter kernel.
mode : str, optional
One of the following discretization modes:
* 'center'
Discretize model by taking the value
at the center of the bin.
* 'linear_interp' (default)
Discretize model by linearly interpolating
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian1DKernel, Trapezoid1DKernel, MexicanHat1DKernel
Examples
--------
Kernel response function:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Box1DKernel
box_1D_kernel = Box1DKernel(9)
plt.plot(box_1D_kernel, drawstyle='steps')
plt.xlim(-1, 9)
plt.xlabel('x [pixels]')
plt.ylabel('value')
plt.show()
"""
_separable = True
_is_bool = True
def __init__(self, width, **kwargs):
self._model = models.Box1D(1. / width, 0, width)
self._default_size = _round_up_to_odd_integer(width)
kwargs['mode'] = 'linear_interp'
super().__init__(**kwargs)
self._truncation = 0
self.normalize()
class Box2DKernel(Kernel2D):
"""
2D Box filter kernel.
The Box filter or running mean is a smoothing filter. It is not isotropic
and can produce artifact, when applied repeatedly to the same data.
By default the Box kernel uses the ``linear_interp`` discretization mode,
which allows non-shifting, even-sized kernels. This is achieved by
weighting the edge pixels with 1/2.
Parameters
----------
width : number
Width of the filter kernel.
mode : str, optional
One of the following discretization modes:
* 'center'
Discretize model by taking the value
at the center of the bin.
* 'linear_interp' (default)
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Tophat2DKernel, MexicanHat2DKernel, Ring2DKernel,
TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Box2DKernel
box_2D_kernel = Box2DKernel(9)
plt.imshow(box_2D_kernel, interpolation='none', origin='lower',
vmin=0.0, vmax=0.015)
plt.xlim(-1, 9)
plt.ylim(-1, 9)
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
_separable = True
_is_bool = True
def __init__(self, width, **kwargs):
self._model = models.Box2D(1. / width ** 2, 0, 0, width, width)
self._default_size = _round_up_to_odd_integer(width)
kwargs['mode'] = 'linear_interp'
super().__init__(**kwargs)
self._truncation = 0
self.normalize()
class Tophat2DKernel(Kernel2D):
"""
2D Tophat filter kernel.
The Tophat filter is an isotropic smoothing filter. It can produce
artifacts when applied repeatedly on the same data.
Parameters
----------
radius : int
Radius of the filter kernel.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Box2DKernel, MexicanHat2DKernel, Ring2DKernel,
TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Tophat2DKernel
tophat_2D_kernel = Tophat2DKernel(40)
plt.imshow(tophat_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
def __init__(self, radius, **kwargs):
self._model = models.Disk2D(1. / (np.pi * radius ** 2), 0, 0, radius)
self._default_size = _round_up_to_odd_integer(2 * radius)
super().__init__(**kwargs)
self._truncation = 0
class Ring2DKernel(Kernel2D):
"""
2D Ring filter kernel.
The Ring filter kernel is the difference between two Tophat kernels of
different width. This kernel is useful for, e.g., background estimation.
Parameters
----------
radius_in : number
Inner radius of the ring kernel.
width : number
Width of the ring kernel.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Box2DKernel, Tophat2DKernel, MexicanHat2DKernel,
Ring2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Ring2DKernel
ring_2D_kernel = Ring2DKernel(9, 8)
plt.imshow(ring_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
def __init__(self, radius_in, width, **kwargs):
radius_out = radius_in + width
self._model = models.Ring2D(1. / (np.pi * (radius_out ** 2 - radius_in ** 2)),
0, 0, radius_in, width)
self._default_size = _round_up_to_odd_integer(2 * radius_out)
super().__init__(**kwargs)
self._truncation = 0
class Trapezoid1DKernel(Kernel1D):
"""
1D trapezoid kernel.
Parameters
----------
width : number
Width of the filter kernel, defined as the width of the constant part,
before it begins to slope down.
slope : number
Slope of the filter kernel's tails
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by linearly interpolating
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Box1DKernel, Gaussian1DKernel, MexicanHat1DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Trapezoid1DKernel
trapezoid_1D_kernel = Trapezoid1DKernel(17, slope=0.2)
plt.plot(trapezoid_1D_kernel, drawstyle='steps')
plt.xlabel('x [pixels]')
plt.ylabel('amplitude')
plt.xlim(-1, 28)
plt.show()
"""
_is_bool = False
def __init__(self, width, slope=1., **kwargs):
self._model = models.Trapezoid1D(1, 0, width, slope)
self._default_size = _round_up_to_odd_integer(width + 2. / slope)
super().__init__(**kwargs)
self._truncation = 0
self.normalize()
class TrapezoidDisk2DKernel(Kernel2D):
"""
2D trapezoid kernel.
Parameters
----------
radius : number
Width of the filter kernel, defined as the width of the constant part,
before it begins to slope down.
slope : number
Slope of the filter kernel's tails
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Box2DKernel, Tophat2DKernel, MexicanHat2DKernel,
Ring2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import TrapezoidDisk2DKernel
trapezoid_2D_kernel = TrapezoidDisk2DKernel(20, slope=0.2)
plt.imshow(trapezoid_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
_is_bool = False
def __init__(self, radius, slope=1., **kwargs):
self._model = models.TrapezoidDisk2D(1, 0, 0, radius, slope)
self._default_size = _round_up_to_odd_integer(2 * radius + 2. / slope)
super().__init__(**kwargs)
self._truncation = 0
self.normalize()
class MexicanHat1DKernel(Kernel1D):
"""
1D Mexican hat filter kernel.
The Mexican Hat, or inverted Gaussian-Laplace filter, is a
bandpass filter. It smooths the data and removes slowly varying
or constant structures (e.g. Background). It is useful for peak or
multi-scale detection.
This kernel is derived from a normalized Gaussian function, by
computing the second derivative. This results in an amplitude
at the kernels center of 1. / (sqrt(2 * pi) * width ** 3). The
normalization is the same as for `scipy.ndimage.gaussian_laplace`,
except for a minus sign.
Parameters
----------
width : number
Width of the filter kernel, defined as the standard deviation
of the Gaussian function from which it is derived.
x_size : odd int, optional
Size in x direction of the kernel array. Default = 8 * width.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by linearly interpolating
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Box1DKernel, Gaussian1DKernel, Trapezoid1DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import MexicanHat1DKernel
mexicanhat_1D_kernel = MexicanHat1DKernel(10)
plt.plot(mexicanhat_1D_kernel, drawstyle='steps')
plt.xlabel('x [pixels]')
plt.ylabel('value')
plt.show()
"""
_is_bool = True
def __init__(self, width, **kwargs):
amplitude = 1.0 / (np.sqrt(2 * np.pi) * width ** 3)
self._model = models.MexicanHat1D(amplitude, 0, width)
self._default_size = _round_up_to_odd_integer(8 * width)
super().__init__(**kwargs)
self._truncation = np.abs(self._array.sum() / self._array.size)
class MexicanHat2DKernel(Kernel2D):
"""
2D Mexican hat filter kernel.
The Mexican Hat, or inverted Gaussian-Laplace filter, is a
bandpass filter. It smooths the data and removes slowly varying
or constant structures (e.g. Background). It is useful for peak or
multi-scale detection.
This kernel is derived from a normalized Gaussian function, by
computing the second derivative. This results in an amplitude
at the kernels center of 1. / (pi * width ** 4). The normalization
is the same as for `scipy.ndimage.gaussian_laplace`, except
for a minus sign.
Parameters
----------
width : number
Width of the filter kernel, defined as the standard deviation
of the Gaussian function from which it is derived.
x_size : odd int, optional
Size in x direction of the kernel array. Default = 8 * width.
y_size : odd int, optional
Size in y direction of the kernel array. Default = 8 * width.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Box2DKernel, Tophat2DKernel, Ring2DKernel,
TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import MexicanHat2DKernel
mexicanhat_2D_kernel = MexicanHat2DKernel(10)
plt.imshow(mexicanhat_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
_is_bool = False
def __init__(self, width, **kwargs):
amplitude = 1.0 / (np.pi * width ** 4)
self._model = models.MexicanHat2D(amplitude, 0, 0, width)
self._default_size = _round_up_to_odd_integer(8 * width)
super().__init__(**kwargs)
self._truncation = np.abs(self._array.sum() / self._array.size)
class AiryDisk2DKernel(Kernel2D):
"""
2D Airy disk kernel.
This kernel models the diffraction pattern of a circular aperture. This
kernel is normalized to a peak value of 1.
Parameters
----------
radius : float
The radius of the Airy disk kernel (radius of the first zero).
x_size : odd int, optional
Size in x direction of the kernel array. Default = 8 * radius.
y_size : odd int, optional
Size in y direction of the kernel array. Default = 8 * radius.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Box2DKernel, Tophat2DKernel, MexicanHat2DKernel,
Ring2DKernel, TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import AiryDisk2DKernel
airydisk_2D_kernel = AiryDisk2DKernel(10)
plt.imshow(airydisk_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
_is_bool = False
def __init__(self, radius, **kwargs):
self._model = models.AiryDisk2D(1, 0, 0, radius)
self._default_size = _round_up_to_odd_integer(8 * radius)
super().__init__(**kwargs)
self.normalize()
self._truncation = None
class Moffat2DKernel(Kernel2D):
"""
2D Moffat kernel.
This kernel is a typical model for a seeing limited PSF.
Parameters
----------
gamma : float
Core width of the Moffat model.
alpha : float
Power index of the Moffat model.
x_size : odd int, optional
Size in x direction of the kernel array. Default = 8 * radius.
y_size : odd int, optional
Size in y direction of the kernel array. Default = 8 * radius.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Box2DKernel, Tophat2DKernel, MexicanHat2DKernel,
Ring2DKernel, TrapezoidDisk2DKernel, AiryDisk2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Moffat2DKernel
moffat_2D_kernel = Moffat2DKernel(3, 2)
plt.imshow(moffat_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
_is_bool = False
def __init__(self, gamma, alpha, **kwargs):
# Compute amplitude, from
# https://en.wikipedia.org/wiki/Moffat_distribution
amplitude = (alpha - 1.0) / (np.pi * gamma * gamma)
self._model = models.Moffat2D(amplitude, 0, 0, gamma, alpha)
self._default_size = _round_up_to_odd_integer(4.0 * self._model.fwhm)
super().__init__(**kwargs)
self.normalize()
self._truncation = None
class Model1DKernel(Kernel1D):
"""
Create kernel from 1D model.
The model has to be centered on x = 0.
Parameters
----------
model : `~astropy.modeling.Fittable1DModel`
Kernel response function model
x_size : odd int, optional
Size in x direction of the kernel array. Default = 8 * width.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by linearly interpolating
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
Raises
------
TypeError
If model is not an instance of `~astropy.modeling.Fittable1DModel`
See also
--------
Model2DKernel : Create kernel from `~astropy.modeling.Fittable2DModel`
CustomKernel : Create kernel from list or array
Examples
--------
Define a Gaussian1D model:
>>> from astropy.modeling.models import Gaussian1D
>>> from astropy.convolution.kernels import Model1DKernel
>>> gauss = Gaussian1D(1, 0, 2)
And create a custom one dimensional kernel from it:
>>> gauss_kernel = Model1DKernel(gauss, x_size=9)
This kernel can now be used like a usual Astropy kernel.
"""
_separable = False
_is_bool = False
def __init__(self, model, **kwargs):
if isinstance(model, Fittable1DModel):
self._model = model
else:
raise TypeError("Must be Fittable1DModel")
super().__init__(**kwargs)
class Model2DKernel(Kernel2D):
"""
Create kernel from 2D model.
The model has to be centered on x = 0 and y = 0.
Parameters
----------
model : `~astropy.modeling.Fittable2DModel`
Kernel response function model
x_size : odd int, optional
Size in x direction of the kernel array. Default = 8 * width.
y_size : odd int, optional
Size in y direction of the kernel array. Default = 8 * width.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
Raises
------
TypeError
If model is not an instance of `~astropy.modeling.Fittable2DModel`
See also
--------
Model1DKernel : Create kernel from `~astropy.modeling.Fittable1DModel`
CustomKernel : Create kernel from list or array
Examples
--------
Define a Gaussian2D model:
>>> from astropy.modeling.models import Gaussian2D
>>> from astropy.convolution.kernels import Model2DKernel
>>> gauss = Gaussian2D(1, 0, 0, 2, 2)
And create a custom two dimensional kernel from it:
>>> gauss_kernel = Model2DKernel(gauss, x_size=9)
This kernel can now be used like a usual astropy kernel.
"""
_is_bool = False
_separable = False
def __init__(self, model, **kwargs):
self._separable = False
if isinstance(model, Fittable2DModel):
self._model = model
else:
raise TypeError("Must be Fittable2DModel")
super().__init__(**kwargs)
class PSFKernel(Kernel2D):
"""
Initialize filter kernel from astropy PSF instance.
"""
_separable = False
def __init__(self):
raise NotImplementedError('Not yet implemented')
class CustomKernel(Kernel):
"""
Create filter kernel from list or array.
Parameters
----------
array : list or array
Filter kernel array. Size must be odd.
Raises
------
TypeError
If array is not a list or array.
KernelSizeError
If array size is even.
See also
--------
Model2DKernel, Model1DKernel
Examples
--------
Define one dimensional array:
>>> from astropy.convolution.kernels import CustomKernel
>>> import numpy as np
>>> array = np.array([1, 2, 3, 2, 1])
>>> kernel = CustomKernel(array)
>>> kernel.dimension
1
Define two dimensional array:
>>> array = np.array([[1, 1, 1], [1, 2, 1], [1, 1, 1]])
>>> kernel = CustomKernel(array)
>>> kernel.dimension
2
"""
def __init__(self, array):
self.array = array
super().__init__(self._array)
@property
def array(self):
"""
Filter kernel array.
"""
return self._array
@array.setter
def array(self, array):
"""
Filter kernel array setter
"""
if isinstance(array, np.ndarray):
self._array = array.astype(np.float64)
elif isinstance(array, list):
self._array = np.array(array, dtype=np.float64)
else:
raise TypeError("Must be list or array.")
# Check if array is odd in all axes
if has_even_axis(self):
raise_even_kernel_exception()
# Check if array is bool
ones = self._array == 1.
zeros = self._array == 0
self._is_bool = bool(np.all(np.logical_or(ones, zeros)))
self._truncation = 0.0
|
b61251292cc29cb25fd6ddfa2fcca78ff03612c91ce20db36734c50c1992ca46 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains the convolution and filter functionalities of astropy.
A few conceptual notes:
A filter kernel is mainly characterized by its response function. In the 1D
case we speak of "impulse response function", in the 2D case we call it "point
spread function". This response function is given for every kernel by an
astropy `FittableModel`, which is evaluated on a grid to obtain a filter array,
which can then be applied to binned data.
The model is centered on the array and should have an amplitude such that the array
integrates to one per default.
Currently only symmetric 2D kernels are supported.
"""
import warnings
import copy
import numpy as np
from astropy.utils.exceptions import AstropyUserWarning
from .utils import (discretize_model, add_kernel_arrays_1D,
add_kernel_arrays_2D)
MAX_NORMALIZATION = 100
__all__ = ['Kernel', 'Kernel1D', 'Kernel2D', 'kernel_arithmetics']
class Kernel:
"""
Convolution kernel base class.
Parameters
----------
array : `~numpy.ndarray`
Kernel array.
"""
_separable = False
_is_bool = True
_model = None
def __init__(self, array):
self._array = np.asanyarray(array)
@property
def truncation(self):
"""
Deviation from the normalization to one.
"""
return self._truncation
@property
def is_bool(self):
"""
Indicates if kernel is bool.
If the kernel is bool the multiplication in the convolution could
be omitted, to increase the performance.
"""
return self._is_bool
@property
def model(self):
"""
Kernel response model.
"""
return self._model
@property
def dimension(self):
"""
Kernel dimension.
"""
return self.array.ndim
@property
def center(self):
"""
Index of the kernel center.
"""
return [axes_size // 2 for axes_size in self._array.shape]
def normalize(self, mode='integral'):
"""
Normalize the filter kernel.
Parameters
----------
mode : {'integral', 'peak'}
One of the following modes:
* 'integral' (default)
Kernel is normalized such that its integral = 1.
* 'peak'
Kernel is normalized such that its peak = 1.
"""
if mode == 'integral':
normalization = self._array.sum()
elif mode == 'peak':
normalization = self._array.max()
else:
raise ValueError("invalid mode, must be 'integral' or 'peak'")
# Warn the user for kernels that sum to zero
if normalization == 0:
warnings.warn('The kernel cannot be normalized because it '
'sums to zero.', AstropyUserWarning)
else:
np.divide(self._array, normalization, self._array)
self._kernel_sum = self._array.sum()
@property
def shape(self):
"""
Shape of the kernel array.
"""
return self._array.shape
@property
def separable(self):
"""
Indicates if the filter kernel is separable.
A 2D filter is separable, when its filter array can be written as the
outer product of two 1D arrays.
If a filter kernel is separable, higher dimension convolutions will be
performed by applying the 1D filter array consecutively on every dimension.
This is significantly faster, than using a filter array with the same
dimension.
"""
return self._separable
@property
def array(self):
"""
Filter kernel array.
"""
return self._array
def __add__(self, kernel):
"""
Add two filter kernels.
"""
return kernel_arithmetics(self, kernel, 'add')
def __sub__(self, kernel):
"""
Subtract two filter kernels.
"""
return kernel_arithmetics(self, kernel, 'sub')
def __mul__(self, value):
"""
Multiply kernel with number or convolve two kernels.
"""
return kernel_arithmetics(self, value, "mul")
def __rmul__(self, value):
"""
Multiply kernel with number or convolve two kernels.
"""
return kernel_arithmetics(self, value, "mul")
def __array__(self):
"""
Array representation of the kernel.
"""
return self._array
def __array_wrap__(self, array, context=None):
"""
Wrapper for multiplication with numpy arrays.
"""
if type(context[0]) == np.ufunc:
return NotImplemented
else:
return array
class Kernel1D(Kernel):
"""
Base class for 1D filter kernels.
Parameters
----------
model : `~astropy.modeling.FittableModel`
Model to be evaluated.
x_size : odd int, optional
Size of the kernel array. Default = 8 * width.
array : `~numpy.ndarray`
Kernel array.
width : number
Width of the filter kernel.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by linearly interpolating
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
"""
def __init__(self, model=None, x_size=None, array=None, **kwargs):
# Initialize from model
if array is None:
if self._model is None:
raise TypeError("Must specify either array or model.")
if x_size is None:
x_size = self._default_size
elif x_size != int(x_size):
raise TypeError("x_size should be an integer")
# Set ranges where to evaluate the model
if x_size % 2 == 0: # even kernel
x_range = (-(int(x_size)) // 2 + 0.5, (int(x_size)) // 2 + 0.5)
else: # odd kernel
x_range = (-(int(x_size) - 1) // 2, (int(x_size) - 1) // 2 + 1)
array = discretize_model(self._model, x_range, **kwargs)
# Initialize from array
elif array is not None:
self._model = None
super().__init__(array)
class Kernel2D(Kernel):
"""
Base class for 2D filter kernels.
Parameters
----------
model : `~astropy.modeling.FittableModel`
Model to be evaluated.
x_size : odd int, optional
Size in x direction of the kernel array. Default = 8 * width.
y_size : odd int, optional
Size in y direction of the kernel array. Default = 8 * width.
array : `~numpy.ndarray`
Kernel array.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
width : number
Width of the filter kernel.
factor : number, optional
Factor of oversampling. Default factor = 10.
"""
def __init__(self, model=None, x_size=None, y_size=None, array=None, **kwargs):
# Initialize from model
if array is None:
if self._model is None:
raise TypeError("Must specify either array or model.")
if x_size is None:
x_size = self._default_size
elif x_size != int(x_size):
raise TypeError("x_size should be an integer")
if y_size is None:
y_size = x_size
elif y_size != int(y_size):
raise TypeError("y_size should be an integer")
# Set ranges where to evaluate the model
if x_size % 2 == 0: # even kernel
x_range = (-(int(x_size)) // 2 + 0.5, (int(x_size)) // 2 + 0.5)
else: # odd kernel
x_range = (-(int(x_size) - 1) // 2, (int(x_size) - 1) // 2 + 1)
if y_size % 2 == 0: # even kernel
y_range = (-(int(y_size)) // 2 + 0.5, (int(y_size)) // 2 + 0.5)
else: # odd kernel
y_range = (-(int(y_size) - 1) // 2, (int(y_size) - 1) // 2 + 1)
array = discretize_model(self._model, x_range, y_range, **kwargs)
# Initialize from array
elif array is not None:
self._model = None
super().__init__(array)
def kernel_arithmetics(kernel, value, operation):
"""
Add, subtract or multiply two kernels.
Parameters
----------
kernel : `astropy.convolution.Kernel`
Kernel instance
value : kernel, float or int
Value to operate with
operation : {'add', 'sub', 'mul'}
One of the following operations:
* 'add'
Add two kernels
* 'sub'
Subtract two kernels
* 'mul'
Multiply kernel with number or convolve two kernels.
"""
# 1D kernels
if isinstance(kernel, Kernel1D) and isinstance(value, Kernel1D):
if operation == "add":
new_array = add_kernel_arrays_1D(kernel.array, value.array)
if operation == "sub":
new_array = add_kernel_arrays_1D(kernel.array, -value.array)
if operation == "mul":
raise Exception("Kernel operation not supported. Maybe you want "
"to use convolve(kernel1, kernel2) instead.")
new_kernel = Kernel1D(array=new_array)
new_kernel._separable = kernel._separable and value._separable
new_kernel._is_bool = kernel._is_bool or value._is_bool
# 2D kernels
elif isinstance(kernel, Kernel2D) and isinstance(value, Kernel2D):
if operation == "add":
new_array = add_kernel_arrays_2D(kernel.array, value.array)
if operation == "sub":
new_array = add_kernel_arrays_2D(kernel.array, -value.array)
if operation == "mul":
raise Exception("Kernel operation not supported. Maybe you want "
"to use convolve(kernel1, kernel2) instead.")
new_kernel = Kernel2D(array=new_array)
new_kernel._separable = kernel._separable and value._separable
new_kernel._is_bool = kernel._is_bool or value._is_bool
# kernel and number
elif ((isinstance(kernel, Kernel1D) or isinstance(kernel, Kernel2D))
and np.isscalar(value)):
if operation == "mul":
new_kernel = copy.copy(kernel)
new_kernel._array *= value
else:
raise Exception("Kernel operation not supported.")
else:
raise Exception("Kernel operation not supported.")
return new_kernel
|
73cab06c1c9561ea68b15ca59d2fd1031c21a5e6efdc4f83be935e0e3782a616 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from .core import * # noqa
from .kernels import * # noqa
from .utils import discretize_model # noqa
from .convolve import convolve, convolve_fft, interpolate_replace_nans, convolve_models # noqa
|
f0e22277df0303b2ac5d7736b4542004494e4129db8a47912dd6d3cf26907446 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import sys
from distutils.extension import Extension
C_CONVOLVE_PKGDIR = os.path.relpath(os.path.dirname(__file__))
SRC_FILES = [os.path.join(C_CONVOLVE_PKGDIR, filename)
for filename in ['src/convolve.c']]
extra_compile_args=['-UNDEBUG']
if not sys.platform.startswith('win'):
extra_compile_args.append('-fPIC')
def get_extensions():
# Add '-Rpass-missed=.*' to ``extra_compile_args`` when compiling with clang
# to report missed optimizations
_convolve_ext = Extension(name='astropy.convolution._convolve', sources=SRC_FILES,
extra_compile_args=extra_compile_args,
include_dirs=["numpy"],
language='c')
return [_convolve_ext]
|
1bf3b3ec948f88c83b0c7f3a7a12c52cf6a6e06248bc0e45f0dd247827428670 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import ctypes
import numpy as np
from astropy.modeling.core import FittableModel, custom_model
__all__ = ['discretize_model']
class DiscretizationError(Exception):
"""
Called when discretization of models goes wrong.
"""
class KernelSizeError(Exception):
"""
Called when size of kernels is even.
"""
def has_even_axis(array):
if isinstance(array, (list, tuple)):
return not len(array) % 2
else:
return any(not axes_size % 2 for axes_size in array.shape)
def raise_even_kernel_exception():
raise KernelSizeError("Kernel size must be odd in all axes.")
def add_kernel_arrays_1D(array_1, array_2):
"""
Add two 1D kernel arrays of different size.
The arrays are added with the centers lying upon each other.
"""
if array_1.size > array_2.size:
new_array = array_1.copy()
center = array_1.size // 2
slice_ = slice(center - array_2.size // 2,
center + array_2.size // 2 + 1)
new_array[slice_] += array_2
return new_array
elif array_2.size > array_1.size:
new_array = array_2.copy()
center = array_2.size // 2
slice_ = slice(center - array_1.size // 2,
center + array_1.size // 2 + 1)
new_array[slice_] += array_1
return new_array
return array_2 + array_1
def add_kernel_arrays_2D(array_1, array_2):
"""
Add two 2D kernel arrays of different size.
The arrays are added with the centers lying upon each other.
"""
if array_1.size > array_2.size:
new_array = array_1.copy()
center = [axes_size // 2 for axes_size in array_1.shape]
slice_x = slice(center[1] - array_2.shape[1] // 2,
center[1] + array_2.shape[1] // 2 + 1)
slice_y = slice(center[0] - array_2.shape[0] // 2,
center[0] + array_2.shape[0] // 2 + 1)
new_array[slice_y, slice_x] += array_2
return new_array
elif array_2.size > array_1.size:
new_array = array_2.copy()
center = [axes_size // 2 for axes_size in array_2.shape]
slice_x = slice(center[1] - array_1.shape[1] // 2,
center[1] + array_1.shape[1] // 2 + 1)
slice_y = slice(center[0] - array_1.shape[0] // 2,
center[0] + array_1.shape[0] // 2 + 1)
new_array[slice_y, slice_x] += array_1
return new_array
return array_2 + array_1
def discretize_model(model, x_range, y_range=None, mode='center', factor=10):
"""
Function to evaluate analytical model functions on a grid.
So far the function can only deal with pixel coordinates.
Parameters
----------
model : `~astropy.modeling.FittableModel` or callable.
Analytic model function to be discretized. Callables, which are not an
instances of `~astropy.modeling.FittableModel` are passed to
`~astropy.modeling.custom_model` and then evaluated.
x_range : tuple
x range in which the model is evaluated. The difference between the
upper an lower limit must be a whole number, so that the output array
size is well defined.
y_range : tuple, optional
y range in which the model is evaluated. The difference between the
upper an lower limit must be a whole number, so that the output array
size is well defined. Necessary only for 2D models.
mode : str, optional
One of the following modes:
* ``'center'`` (default)
Discretize model by taking the value
at the center of the bin.
* ``'linear_interp'``
Discretize model by linearly interpolating
between the values at the corners of the bin.
For 2D models interpolation is bilinear.
* ``'oversample'``
Discretize model by taking the average
on an oversampled grid.
* ``'integrate'``
Discretize model by integrating the model
over the bin using `scipy.integrate.quad`.
Very slow.
factor : float or int
Factor of oversampling. Default = 10.
Returns
-------
array : `numpy.array`
Model value array
Notes
-----
The ``oversample`` mode allows to conserve the integral on a subpixel
scale. Here is the example of a normalized Gaussian1D:
.. plot::
:include-source:
import matplotlib.pyplot as plt
import numpy as np
from astropy.modeling.models import Gaussian1D
from astropy.convolution.utils import discretize_model
gauss_1D = Gaussian1D(1 / (0.5 * np.sqrt(2 * np.pi)), 0, 0.5)
y_center = discretize_model(gauss_1D, (-2, 3), mode='center')
y_corner = discretize_model(gauss_1D, (-2, 3), mode='linear_interp')
y_oversample = discretize_model(gauss_1D, (-2, 3), mode='oversample')
plt.plot(y_center, label='center sum = {0:3f}'.format(y_center.sum()))
plt.plot(y_corner, label='linear_interp sum = {0:3f}'.format(y_corner.sum()))
plt.plot(y_oversample, label='oversample sum = {0:3f}'.format(y_oversample.sum()))
plt.xlabel('pixels')
plt.ylabel('value')
plt.legend()
plt.show()
"""
if not callable(model):
raise TypeError('Model must be callable.')
if not isinstance(model, FittableModel):
model = custom_model(model)()
ndim = model.n_inputs
if ndim > 2:
raise ValueError('discretize_model only supports 1-d and 2-d models.')
if not float(np.diff(x_range)).is_integer():
raise ValueError("The difference between the upper an lower limit of"
" 'x_range' must be a whole number.")
if y_range:
if not float(np.diff(y_range)).is_integer():
raise ValueError("The difference between the upper an lower limit of"
" 'y_range' must be a whole number.")
if ndim == 2 and y_range is None:
raise ValueError("y range not specified, but model is 2-d")
if ndim == 1 and y_range is not None:
raise ValueError("y range specified, but model is only 1-d.")
if mode == "center":
if ndim == 1:
return discretize_center_1D(model, x_range)
elif ndim == 2:
return discretize_center_2D(model, x_range, y_range)
elif mode == "linear_interp":
if ndim == 1:
return discretize_linear_1D(model, x_range)
if ndim == 2:
return discretize_bilinear_2D(model, x_range, y_range)
elif mode == "oversample":
if ndim == 1:
return discretize_oversample_1D(model, x_range, factor)
if ndim == 2:
return discretize_oversample_2D(model, x_range, y_range, factor)
elif mode == "integrate":
if ndim == 1:
return discretize_integrate_1D(model, x_range)
if ndim == 2:
return discretize_integrate_2D(model, x_range, y_range)
else:
raise DiscretizationError('Invalid mode.')
def discretize_center_1D(model, x_range):
"""
Discretize model by taking the value at the center of the bin.
"""
x = np.arange(*x_range)
return model(x)
def discretize_center_2D(model, x_range, y_range):
"""
Discretize model by taking the value at the center of the pixel.
"""
x = np.arange(*x_range)
y = np.arange(*y_range)
x, y = np.meshgrid(x, y)
return model(x, y)
def discretize_linear_1D(model, x_range):
"""
Discretize model by performing a linear interpolation.
"""
# Evaluate model 0.5 pixel outside the boundaries
x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5)
values_intermediate_grid = model(x)
return 0.5 * (values_intermediate_grid[1:] + values_intermediate_grid[:-1])
def discretize_bilinear_2D(model, x_range, y_range):
"""
Discretize model by performing a bilinear interpolation.
"""
# Evaluate model 0.5 pixel outside the boundaries
x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5)
y = np.arange(y_range[0] - 0.5, y_range[1] + 0.5)
x, y = np.meshgrid(x, y)
values_intermediate_grid = model(x, y)
# Mean in y direction
values = 0.5 * (values_intermediate_grid[1:, :]
+ values_intermediate_grid[:-1, :])
# Mean in x direction
values = 0.5 * (values[:, 1:]
+ values[:, :-1])
return values
def discretize_oversample_1D(model, x_range, factor=10):
"""
Discretize model by taking the average on an oversampled grid.
"""
# Evaluate model on oversampled grid
x = np.arange(x_range[0] - 0.5 * (1 - 1 / factor),
x_range[1] + 0.5 * (1 + 1 / factor), 1. / factor)
values = model(x)
# Reshape and compute mean
values = np.reshape(values, (x.size // factor, factor))
return values.mean(axis=1)[:-1]
def discretize_oversample_2D(model, x_range, y_range, factor=10):
"""
Discretize model by taking the average on an oversampled grid.
"""
# Evaluate model on oversampled grid
x = np.arange(x_range[0] - 0.5 * (1 - 1 / factor),
x_range[1] + 0.5 * (1 + 1 / factor), 1. / factor)
y = np.arange(y_range[0] - 0.5 * (1 - 1 / factor),
y_range[1] + 0.5 * (1 + 1 / factor), 1. / factor)
x_grid, y_grid = np.meshgrid(x, y)
values = model(x_grid, y_grid)
# Reshape and compute mean
shape = (y.size // factor, factor, x.size // factor, factor)
values = np.reshape(values, shape)
return values.mean(axis=3).mean(axis=1)[:-1, :-1]
def discretize_integrate_1D(model, x_range):
"""
Discretize model by integrating numerically the model over the bin.
"""
from scipy.integrate import quad
# Set up grid
x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5)
values = np.array([])
# Integrate over all bins
for i in range(x.size - 1):
values = np.append(values, quad(model, x[i], x[i + 1])[0])
return values
def discretize_integrate_2D(model, x_range, y_range):
"""
Discretize model by integrating the model over the pixel.
"""
from scipy.integrate import dblquad
# Set up grid
x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5)
y = np.arange(y_range[0] - 0.5, y_range[1] + 0.5)
values = np.empty((y.size - 1, x.size - 1))
# Integrate over all pixels
for i in range(x.size - 1):
for j in range(y.size - 1):
values[j, i] = dblquad(lambda y, x: model(x, y), x[i], x[i + 1],
lambda x: y[j], lambda x: y[j + 1])[0]
return values
|
22bf466d6a80fc847e40b41a641002b67049a7c1e1f34c677dcc42cf7bb01ca7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import os
import sys
import glob
import ctypes
from functools import partial
import numpy as np
from numpy.ctypeslib import ndpointer, load_library
from .core import Kernel, Kernel1D, Kernel2D, MAX_NORMALIZATION
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.console import human_file_size
from astropy.utils.decorators import deprecated_renamed_argument
from astropy import units as u
from astropy.nddata import support_nddata
from astropy.modeling.core import _make_arithmetic_operator, BINARY_OPERATORS
from astropy.modeling.core import _CompoundModelMeta
from .utils import KernelSizeError, has_even_axis, raise_even_kernel_exception
LIBRARY_PATH = os.path.dirname(__file__)
try:
_convolve = load_library("_convolve", LIBRARY_PATH)
except Exception:
raise ImportError("Convolution C extension is missing. Try re-building astropy.")
# The GIL is automatically released by default when calling functions imported
# from libaries loaded by ctypes.cdll.LoadLibrary(<path>)
# Declare prototypes
# Boundary None
_convolveNd_c = _convolve.convolveNd_c
_convolveNd_c.restype = None
_convolveNd_c.argtypes = [ndpointer(ctypes.c_double, flags={"C_CONTIGUOUS", "WRITEABLE"}), # return array
ndpointer(ctypes.c_double, flags="C_CONTIGUOUS"), # input array
ctypes.c_uint, # N dim
ndpointer(ctypes.c_size_t, flags="C_CONTIGUOUS"), # size array for input and result unless
# embed_result_within_padded_region is False,
# in which case the result array is assumed to be
# input.shape - 2*(kernel.shape//2). Note: integer division.
ndpointer(ctypes.c_double, flags="C_CONTIGUOUS"), # kernel array
ndpointer(ctypes.c_size_t, flags="C_CONTIGUOUS"), # size array for kernel
ctypes.c_bool, # nan_interpolate
ctypes.c_bool, # embed_result_within_padded_region
ctypes.c_uint] # n_threads
# Disabling all doctests in this module until a better way of handling warnings
# in doctests can be determined
__doctest_skip__ = ['*']
BOUNDARY_OPTIONS = [None, 'fill', 'wrap', 'extend']
def _copy_input_if_needed(input, dtype=float, order='C', nan_treatment=None, mask=None, fill_value=None):
# Alias input
input = input.array if isinstance(input, Kernel) else input
output = input
# Copy input
try:
# Anything that's masked must be turned into NaNs for the interpolation.
# This requires copying. A copy is also needed for nan_treatment == 'fill'
# A copy prevents possible function side-effects of the input array.
if nan_treatment == 'fill' or np.ma.is_masked(input) or mask is not None:
if np.ma.is_masked(input):
# ``np.ma.maskedarray.filled()`` returns a copy, however there is no way to specify the return type
# or order etc.
# In addition ``np.nan`` is a ``float`` and there is no conversion to an ``int`` type.
# Therefore, a pre-fill copy is needed for non ``float`` masked arrays.
# ``subok=True`` is needed to retain ``np.ma.maskedarray.filled()``.
# ``copy=False`` allows the fill to act as the copy if type and order are already correct.
output = np.array(input, dtype=dtype, copy=False, order=order, subok=True)
output = output.filled(fill_value)
else:
# Since we're making a copy, we might as well use `subok=False` to save,
# what is probably, a negligible amount of memory.
output = np.array(input, dtype=dtype, copy=True, order=order, subok=False)
if mask is not None:
# mask != 0 yields a bool mask for all ints/floats/bool
output[mask != 0] = fill_value
else:
# The call below is synonymous with np.asanyarray(array, ftype=float, order='C')
# The advantage of `subok=True` is that it won't copy when array is an ndarray subclass. If it
# is and `subok=False` (default), then it will copy even if `copy=False`. This uses less memory
# when ndarray subclasses are passed in.
output = np.array(input, dtype=dtype, copy=False, order=order, subok=True)
except (TypeError, ValueError) as e:
raise TypeError('input should be a Numpy array or something '
'convertible into a float array', e)
return output
@support_nddata(data='array')
def convolve(array, kernel, boundary='fill', fill_value=0.,
nan_treatment='interpolate', normalize_kernel=True, mask=None,
preserve_nan=False, normalization_zero_tol=1e-8):
'''
Convolve an array with a kernel.
This routine differs from `scipy.ndimage.convolve` because
it includes a special treatment for ``NaN`` values. Rather than
including ``NaN`` values in the array in the convolution calculation, which
causes large ``NaN`` holes in the convolved array, ``NaN`` values are
replaced with interpolated values using the kernel as an interpolation
function.
Parameters
----------
array : `~astropy.nddata.NDData` or `numpy.ndarray` or array-like
The array to convolve. This should be a 1, 2, or 3-dimensional array
or a list or a set of nested lists representing a 1, 2, or
3-dimensional array. If an `~astropy.nddata.NDData`, the ``mask`` of
the `~astropy.nddata.NDData` will be used as the ``mask`` argument.
kernel : `numpy.ndarray` or `~astropy.convolution.Kernel`
The convolution kernel. The number of dimensions should match those for
the array, and the dimensions should be odd in all directions. If a
masked array, the masked values will be replaced by ``fill_value``.
boundary : str, optional
A flag indicating how to handle boundaries:
* `None`
Set the ``result`` values to zero where the kernel
extends beyond the edge of the array.
* 'fill'
Set values outside the array boundary to ``fill_value`` (default).
* 'wrap'
Periodic boundary that wrap to the other side of ``array``.
* 'extend'
Set values outside the array to the nearest ``array``
value.
fill_value : float, optional
The value to use outside the array when using ``boundary='fill'``
normalize_kernel : bool, optional
Whether to normalize the kernel to have a sum of one.
nan_treatment : 'interpolate', 'fill'
interpolate will result in renormalization of the kernel at each
position ignoring (pixels that are NaN in the image) in both the image
and the kernel.
'fill' will replace the NaN pixels with a fixed numerical value (default
zero, see ``fill_value``) prior to convolution
Note that if the kernel has a sum equal to zero, NaN interpolation
is not possible and will raise an exception.
preserve_nan : bool
After performing convolution, should pixels that were originally NaN
again become NaN?
mask : `None` or `numpy.ndarray`
A "mask" array. Shape must match ``array``, and anything that is masked
(i.e., not 0/`False`) will be set to NaN for the convolution. If
`None`, no masking will be performed unless ``array`` is a masked array.
If ``mask`` is not `None` *and* ``array`` is a masked array, a pixel is
masked of it is masked in either ``mask`` *or* ``array.mask``.
normalization_zero_tol: float, optional
The absolute tolerance on whether the kernel is different than zero.
If the kernel sums to zero to within this precision, it cannot be
normalized. Default is "1e-8".
Returns
-------
result : `numpy.ndarray`
An array with the same dimensions and as the input array,
convolved with kernel. The data type depends on the input
array type. If array is a floating point type, then the
return array keeps the same data type, otherwise the type
is ``numpy.float``.
Notes
-----
For masked arrays, masked values are treated as NaNs. The convolution
is always done at ``numpy.float`` precision.
'''
if boundary not in BOUNDARY_OPTIONS:
raise ValueError("Invalid boundary option: must be one of {0}"
.format(BOUNDARY_OPTIONS))
if nan_treatment not in ('interpolate', 'fill'):
raise ValueError("nan_treatment must be one of 'interpolate','fill'")
# OpenMP support is disabled at the C src code level, changing this will have
# no effect.
n_threads = 1
# Keep refs to originals
passed_kernel = kernel
passed_array = array
# The C routines all need float type inputs (so, a particular
# bit size, endianness, etc.). So we have to convert, which also
# has the effect of making copies so we don't modify the inputs.
# After this, the variables we work with will be array_internal, and
# kernel_internal. However -- we do want to keep track of what type
# the input array was so we can cast the result to that at the end
# if it's a floating point type. Don't bother with this for lists --
# just always push those as float.
# It is always necessary to make a copy of kernel (since it is modified),
# but, if we just so happen to be lucky enough to have the input array
# have exactly the desired type, we just alias to array_internal
# Convert kernel to ndarray if not already
# Copy or alias array to array_internal
array_internal = _copy_input_if_needed(passed_array, dtype=float, order='C',
nan_treatment=nan_treatment, mask=mask,
fill_value=np.nan)
array_dtype = getattr(passed_array, 'dtype', array_internal.dtype)
# Copy or alias kernel to kernel_internal
kernel_internal = _copy_input_if_needed(passed_kernel, dtype=float, order='C',
nan_treatment=None, mask=None,
fill_value=fill_value)
# Make sure kernel has all odd axes
if has_even_axis(kernel_internal):
raise_even_kernel_exception()
# If both image array and kernel are Kernel instances
# constrain convolution method
# This must occur before the main alias/copy of ``passed_kernel`` to
# ``kernel_internal`` as it is used for filling masked kernels.
if isinstance(passed_array, Kernel) and isinstance(passed_kernel, Kernel):
warnings.warn("Both array and kernel are Kernel instances, hardwiring the following parameters: "
"boundary='fill', fill_value=0, normalize_Kernel=True, "
"nan_treatment='interpolate'",
AstropyUserWarning)
boundary = 'fill'
fill_value = 0
normalize_kernel = True
nan_treatment='interpolate'
#-----------------------------------------------------------------------
# From this point onwards refer only to ``array_internal`` and
# ``kernel_internal``.
# Assume both are base np.ndarrays and NOT subclasses e.g. NOT
# ``Kernel`` nor ``np.ma.maskedarray`` classes.
#-----------------------------------------------------------------------
# Check dimensionality
if array_internal.ndim == 0:
raise Exception("cannot convolve 0-dimensional arrays")
elif array_internal.ndim > 3:
raise NotImplementedError('convolve only supports 1, 2, and 3-dimensional '
'arrays at this time')
elif array_internal.ndim != kernel_internal.ndim:
raise Exception('array and kernel have differing number of '
'dimensions.')
array_shape = np.array(array_internal.shape)
kernel_shape = np.array(kernel_internal.shape)
pad_width = kernel_shape//2
# For boundary=None only the center space is convolved. All array indices within a
# distance kernel.shape//2 from the edge are completely ignored (zeroed).
# E.g. (1D list) only the indices len(kernel)//2 : len(array)-len(kernel)//2
# are convolved. It is therefore not possible to use this method to convolve an
# array by a kernel that is larger (see note below) than the array - as ALL pixels would be ignored
# leaving an array of only zeros.
# Note: For even kernels the correctness condition is array_shape > kernel_shape.
# For odd kernels it is:
# array_shape >= kernel_shape OR array_shape > kernel_shape-1 OR array_shape > 2*(kernel_shape//2).
# Since the latter is equal to the former two for even lengths, the latter condition is complete.
if boundary == None and not np.all(array_shape > 2*pad_width):
raise KernelSizeError("for boundary=None all kernel axes must be smaller than array's - "
"use boundary in ['fill', 'extend', 'wrap'] instead.")
# NaN interpolation significantly slows down the C convolution
# computation. Since nan_treatment = 'interpolate', is the default
# check whether it is even needed, if not, don't interpolate.
# NB: np.isnan(array_internal.sum()) is faster than np.isnan(array_internal).any()
nan_interpolate = (nan_treatment == 'interpolate') and np.isnan(array_internal.sum())
# Check if kernel is normalizable
if normalize_kernel or nan_interpolate:
kernel_sum = kernel_internal.sum()
kernel_sums_to_zero = np.isclose(kernel_sum, 0, atol=normalization_zero_tol)
if kernel_sum < 1. / MAX_NORMALIZATION or kernel_sums_to_zero:
raise ValueError("The kernel can't be normalized, because its sum is "
"close to zero. The sum of the given kernel is < {0}"
.format(1. / MAX_NORMALIZATION))
# Mark the NaN values so we can replace them later if interpolate_nan is
# not set
if preserve_nan or nan_treatment == 'fill':
initially_nan = np.isnan(array_internal)
if nan_treatment == 'fill':
array_internal[initially_nan] = fill_value
# Avoid any memory allocation within the C code. Allocate output array
# here and pass through instead.
result = np.zeros(array_internal.shape, dtype=float, order='C')
embed_result_within_padded_region = True
array_to_convolve = array_internal
if boundary in ('fill', 'extend', 'wrap'):
embed_result_within_padded_region = False
if boundary == 'fill':
# This method is faster than using numpy.pad(..., mode='constant')
array_to_convolve = np.full(array_shape + 2*pad_width, fill_value=fill_value, dtype=float, order='C')
# Use bounds [pad_width[0]:array_shape[0]+pad_width[0]] instead of [pad_width[0]:-pad_width[0]]
# to account for when the kernel has size of 1 making pad_width = 0.
if array_internal.ndim == 1:
array_to_convolve[pad_width[0]:array_shape[0]+pad_width[0]] = array_internal
elif array_internal.ndim == 2:
array_to_convolve[pad_width[0]:array_shape[0]+pad_width[0],
pad_width[1]:array_shape[1]+pad_width[1]] = array_internal
elif array_internal.ndim == 3:
array_to_convolve[pad_width[0]:array_shape[0]+pad_width[0],
pad_width[1]:array_shape[1]+pad_width[1],
pad_width[2]:array_shape[2]+pad_width[2]] = array_internal
else:
np_pad_mode_dict = {'fill':'constant', 'extend':'edge', 'wrap':'wrap'}
np_pad_mode = np_pad_mode_dict[boundary]
pad_width = kernel_shape//2
if array_internal.ndim == 1:
np_pad_width = (pad_width[0],)
elif array_internal.ndim == 2:
np_pad_width = ( (pad_width[0],), (pad_width[1],) )
elif array_internal.ndim == 3:
np_pad_width = ( (pad_width[0],), (pad_width[1],), (pad_width[2],) )
array_to_convolve = np.pad(array_internal, pad_width=np_pad_width,
mode=np_pad_mode)
_convolveNd_c(result, array_to_convolve,
array_to_convolve.ndim,
np.array(array_to_convolve.shape, dtype=ctypes.c_size_t, order='C'),
kernel_internal,
np.array(kernel_shape, dtype=ctypes.c_size_t, order='C'),
nan_interpolate, embed_result_within_padded_region,
n_threads
)
# So far, normalization has only occured for nan_treatment == 'interpolate'
# because this had to happen within the C extension so as to ignore
# any NaNs
if normalize_kernel:
if not nan_interpolate:
result /= kernel_sum
else:
if nan_interpolate:
result *= kernel_sum
if nan_interpolate and not preserve_nan and np.isnan(result.sum()):
warnings.warn("nan_treatment='interpolate', however, NaN values detected "
"post convolution. A contiguous region of NaN values, larger "
"than the kernel size, are present in the input array. "
"Increase the kernel size to avoid this.", AstropyUserWarning)
if preserve_nan:
result[initially_nan] = np.nan
# Convert result to original data type
if isinstance(passed_array, Kernel):
if isinstance(passed_array, Kernel1D):
new_result = Kernel1D(array=result)
elif isinstance(passed_array, Kernel2D):
new_result = Kernel2D(array=result)
new_result._is_bool = False
new_result._separable = passed_array._separable
if isinstance(passed_kernel, Kernel):
new_result._separable = new_result._separable and passed_kernel._separable
return new_result
elif array_dtype.kind == 'f':
# Try to preserve the input type if it's a floating point type
# Avoid making another copy if possible
try:
return result.astype(array_dtype, copy=False)
except TypeError:
return result.astype(array_dtype)
else:
return result
@deprecated_renamed_argument('interpolate_nan', 'nan_treatment', 'v2.0.0')
@support_nddata(data='array')
def convolve_fft(array, kernel, boundary='fill', fill_value=0.,
nan_treatment='interpolate', normalize_kernel=True,
normalization_zero_tol=1e-8,
preserve_nan=False, mask=None, crop=True, return_fft=False,
fft_pad=None, psf_pad=None, quiet=False,
min_wt=0.0, allow_huge=False,
fftn=np.fft.fftn, ifftn=np.fft.ifftn,
complex_dtype=complex):
"""
Convolve an ndarray with an nd-kernel. Returns a convolved image with
``shape = array.shape``. Assumes kernel is centered.
`convolve_fft` is very similar to `convolve` in that it replaces ``NaN``
values in the original image with interpolated values using the kernel as
an interpolation function. However, it also includes many additional
options specific to the implementation.
`convolve_fft` differs from `scipy.signal.fftconvolve` in a few ways:
* It can treat ``NaN`` values as zeros or interpolate over them.
* ``inf`` values are treated as ``NaN``
* (optionally) It pads to the nearest 2^n size to improve FFT speed.
* Its only valid ``mode`` is 'same' (i.e., the same shape array is returned)
* It lets you use your own fft, e.g.,
`pyFFTW <https://pypi.python.org/pypi/pyFFTW>`_ or
`pyFFTW3 <https://pypi.python.org/pypi/PyFFTW3/0.2.1>`_ , which can lead to
performance improvements, depending on your system configuration. pyFFTW3
is threaded, and therefore may yield significant performance benefits on
multi-core machines at the cost of greater memory requirements. Specify
the ``fftn`` and ``ifftn`` keywords to override the default, which is
`numpy.fft.fft` and `numpy.fft.ifft`.
Parameters
----------
array : `numpy.ndarray`
Array to be convolved with ``kernel``. It can be of any
dimensionality, though only 1, 2, and 3d arrays have been tested.
kernel : `numpy.ndarray` or `astropy.convolution.Kernel`
The convolution kernel. The number of dimensions should match those
for the array. The dimensions *do not* have to be odd in all directions,
unlike in the non-fft `convolve` function. The kernel will be
normalized if ``normalize_kernel`` is set. It is assumed to be centered
(i.e., shifts may result if your kernel is asymmetric)
boundary : {'fill', 'wrap'}, optional
A flag indicating how to handle boundaries:
* 'fill': set values outside the array boundary to fill_value
(default)
* 'wrap': periodic boundary
The `None` and 'extend' parameters are not supported for FFT-based
convolution
fill_value : float, optional
The value to use outside the array when using boundary='fill'
nan_treatment : 'interpolate', 'fill'
``interpolate`` will result in renormalization of the kernel at each
position ignoring (pixels that are NaN in the image) in both the image
and the kernel. ``fill`` will replace the NaN pixels with a fixed
numerical value (default zero, see ``fill_value``) prior to
convolution. Note that if the kernel has a sum equal to zero, NaN
interpolation is not possible and will raise an exception.
normalize_kernel : function or boolean, optional
If specified, this is the function to divide kernel by to normalize it.
e.g., ``normalize_kernel=np.sum`` means that kernel will be modified to be:
``kernel = kernel / np.sum(kernel)``. If True, defaults to
``normalize_kernel = np.sum``.
normalization_zero_tol: float, optional
The absolute tolerance on whether the kernel is different than zero.
If the kernel sums to zero to within this precision, it cannot be
normalized. Default is "1e-8".
preserve_nan : bool
After performing convolution, should pixels that were originally NaN
again become NaN?
mask : `None` or `numpy.ndarray`
A "mask" array. Shape must match ``array``, and anything that is masked
(i.e., not 0/`False`) will be set to NaN for the convolution. If
`None`, no masking will be performed unless ``array`` is a masked array.
If ``mask`` is not `None` *and* ``array`` is a masked array, a pixel is
masked of it is masked in either ``mask`` *or* ``array.mask``.
Other Parameters
----------------
min_wt : float, optional
If ignoring ``NaN`` / zeros, force all grid points with a weight less than
this value to ``NaN`` (the weight of a grid point with *no* ignored
neighbors is 1.0).
If ``min_wt`` is zero, then all zero-weight points will be set to zero
instead of ``NaN`` (which they would be otherwise, because 1/0 = nan).
See the examples below
fft_pad : bool, optional
Default on. Zero-pad image to the nearest 2^n. With
``boundary='wrap'``, this will be disabled.
psf_pad : bool, optional
Zero-pad image to be at least the sum of the image sizes to avoid
edge-wrapping when smoothing. This is enabled by default with
``boundary='fill'``, but it can be overridden with a boolean option.
``boundary='wrap'`` and ``psf_pad=True`` are not compatible.
crop : bool, optional
Default on. Return an image of the size of the larger of the input
image and the kernel.
If the image and kernel are asymmetric in opposite directions, will
return the largest image in both directions.
For example, if an input image has shape [100,3] but a kernel with shape
[6,6] is used, the output will be [100,6].
return_fft : bool, optional
Return the ``fft(image)*fft(kernel)`` instead of the convolution (which is
``ifft(fft(image)*fft(kernel))``). Useful for making PSDs.
fftn, ifftn : functions, optional
The fft and inverse fft functions. Can be overridden to use your own
ffts, e.g. an fftw3 wrapper or scipy's fftn,
``fft=scipy.fftpack.fftn``
complex_dtype : numpy.complex, optional
Which complex dtype to use. `numpy` has a range of options, from 64 to
256.
quiet : bool, optional
Silence warning message about NaN interpolation
allow_huge : bool, optional
Allow huge arrays in the FFT? If False, will raise an exception if the
array or kernel size is >1 GB
Raises
------
ValueError:
If the array is bigger than 1 GB after padding, will raise this exception
unless ``allow_huge`` is True
See Also
--------
convolve:
Convolve is a non-fft version of this code. It is more memory
efficient and for small kernels can be faster.
Returns
-------
default : ndarray
``array`` convolved with ``kernel``. If ``return_fft`` is set, returns
``fft(array) * fft(kernel)``. If crop is not set, returns the
image, but with the fft-padded size instead of the input size
Notes
-----
With ``psf_pad=True`` and a large PSF, the resulting data can become
very large and consume a lot of memory. See Issue
https://github.com/astropy/astropy/pull/4366 for further detail.
Examples
--------
>>> convolve_fft([1, 0, 3], [1, 1, 1])
array([ 1., 4., 3.])
>>> convolve_fft([1, np.nan, 3], [1, 1, 1])
array([ 1., 4., 3.])
>>> convolve_fft([1, 0, 3], [0, 1, 0])
array([ 1., 0., 3.])
>>> convolve_fft([1, 2, 3], [1])
array([ 1., 2., 3.])
>>> convolve_fft([1, np.nan, 3], [0, 1, 0], nan_treatment='interpolate')
...
array([ 1., 0., 3.])
>>> convolve_fft([1, np.nan, 3], [0, 1, 0], nan_treatment='interpolate',
... min_wt=1e-8)
array([ 1., nan, 3.])
>>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate')
array([ 1., 4., 3.])
>>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate',
... normalize_kernel=True)
array([ 1., 2., 3.])
>>> import scipy.fftpack # optional - requires scipy
>>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate',
... normalize_kernel=True,
... fftn=scipy.fftpack.fft, ifftn=scipy.fftpack.ifft)
array([ 1., 2., 3.])
"""
# Checking copied from convolve.py - however, since FFTs have real &
# complex components, we change the types. Only the real part will be
# returned! Note that this always makes a copy.
# Check kernel is kernel instance
if isinstance(kernel, Kernel):
kernel = kernel.array
if isinstance(array, Kernel):
raise TypeError("Can't convolve two kernels with convolve_fft. "
"Use convolve instead.")
if nan_treatment not in ('interpolate', 'fill'):
raise ValueError("nan_treatment must be one of 'interpolate','fill'")
# Convert array dtype to complex
# and ensure that list inputs become arrays
array = _copy_input_if_needed(array, dtype=complex, order='C',
nan_treatment=nan_treatment, mask=mask,
fill_value=np.nan)
kernel = _copy_input_if_needed(kernel, dtype=complex, order='C',
nan_treatment=None, mask=None,
fill_value=0)
# Check that the number of dimensions is compatible
if array.ndim != kernel.ndim:
raise ValueError("Image and kernel must have same number of "
"dimensions")
arrayshape = array.shape
kernshape = kernel.shape
array_size_B = (np.product(arrayshape, dtype=np.int64) *
np.dtype(complex_dtype).itemsize)*u.byte
if array_size_B > 1*u.GB and not allow_huge:
raise ValueError("Size Error: Arrays will be {}. Use "
"allow_huge=True to override this exception."
.format(human_file_size(array_size_B.to_value(u.byte))))
# NaN and inf catching
nanmaskarray = np.isnan(array) | np.isinf(array)
array[nanmaskarray] = 0
nanmaskkernel = np.isnan(kernel) | np.isinf(kernel)
kernel[nanmaskkernel] = 0
if normalize_kernel is True:
if kernel.sum() < 1. / MAX_NORMALIZATION:
raise Exception("The kernel can't be normalized, because its sum is "
"close to zero. The sum of the given kernel is < {0}"
.format(1. / MAX_NORMALIZATION))
kernel_scale = kernel.sum()
normalized_kernel = kernel / kernel_scale
kernel_scale = 1 # if we want to normalize it, leave it normed!
elif normalize_kernel:
# try this. If a function is not passed, the code will just crash... I
# think type checking would be better but PEPs say otherwise...
kernel_scale = normalize_kernel(kernel)
normalized_kernel = kernel / kernel_scale
else:
kernel_scale = kernel.sum()
if np.abs(kernel_scale) < normalization_zero_tol:
if nan_treatment == 'interpolate':
raise ValueError('Cannot interpolate NaNs with an unnormalizable kernel')
else:
# the kernel's sum is near-zero, so it can't be scaled
kernel_scale = 1
normalized_kernel = kernel
else:
# the kernel is normalizable; we'll temporarily normalize it
# now and undo the normalization later.
normalized_kernel = kernel / kernel_scale
if boundary is None:
warnings.warn("The convolve_fft version of boundary=None is "
"equivalent to the convolve boundary='fill'. There is "
"no FFT equivalent to convolve's "
"zero-if-kernel-leaves-boundary", AstropyUserWarning)
if psf_pad is None:
psf_pad = True
if fft_pad is None:
fft_pad = True
elif boundary == 'fill':
# create a boundary region at least as large as the kernel
if psf_pad is False:
warnings.warn("psf_pad was set to {0}, which overrides the "
"boundary='fill' setting.".format(psf_pad),
AstropyUserWarning)
else:
psf_pad = True
if fft_pad is None:
# default is 'True' according to the docstring
fft_pad = True
elif boundary == 'wrap':
if psf_pad:
raise ValueError("With boundary='wrap', psf_pad cannot be enabled.")
psf_pad = False
if fft_pad:
raise ValueError("With boundary='wrap', fft_pad cannot be enabled.")
fft_pad = False
fill_value = 0 # force zero; it should not be used
elif boundary == 'extend':
raise NotImplementedError("The 'extend' option is not implemented "
"for fft-based convolution")
# find ideal size (power of 2) for fft.
# Can add shapes because they are tuples
if fft_pad: # default=True
if psf_pad: # default=False
# add the dimensions and then take the max (bigger)
fsize = 2 ** np.ceil(np.log2(
np.max(np.array(arrayshape) + np.array(kernshape))))
else:
# add the shape lists (max of a list of length 4) (smaller)
# also makes the shapes square
fsize = 2 ** np.ceil(np.log2(np.max(arrayshape + kernshape)))
newshape = np.array([fsize for ii in range(array.ndim)], dtype=int)
else:
if psf_pad:
# just add the biggest dimensions
newshape = np.array(arrayshape) + np.array(kernshape)
else:
newshape = np.array([np.max([imsh, kernsh])
for imsh, kernsh in zip(arrayshape, kernshape)])
# perform a second check after padding
array_size_C = (np.product(newshape, dtype=np.int64) *
np.dtype(complex_dtype).itemsize)*u.byte
if array_size_C > 1*u.GB and not allow_huge:
raise ValueError("Size Error: Arrays will be {}. Use "
"allow_huge=True to override this exception."
.format(human_file_size(array_size_C)))
# For future reference, this can be used to predict "almost exactly"
# how much *additional* memory will be used.
# size * (array + kernel + kernelfft + arrayfft +
# (kernel*array)fft +
# optional(weight image + weight_fft + weight_ifft) +
# optional(returned_fft))
# total_memory_used_GB = (np.product(newshape)*np.dtype(complex_dtype).itemsize
# * (5 + 3*((interpolate_nan or ) and kernel_is_normalized))
# + (1 + (not return_fft)) *
# np.product(arrayshape)*np.dtype(complex_dtype).itemsize
# + np.product(arrayshape)*np.dtype(bool).itemsize
# + np.product(kernshape)*np.dtype(bool).itemsize)
# ) / 1024.**3
# separate each dimension by the padding size... this is to determine the
# appropriate slice size to get back to the input dimensions
arrayslices = []
kernslices = []
for ii, (newdimsize, arraydimsize, kerndimsize) in enumerate(zip(newshape, arrayshape, kernshape)):
center = newdimsize - (newdimsize + 1) // 2
arrayslices += [slice(center - arraydimsize // 2,
center + (arraydimsize + 1) // 2)]
kernslices += [slice(center - kerndimsize // 2,
center + (kerndimsize + 1) // 2)]
arrayslices = tuple(arrayslices)
kernslices = tuple(kernslices)
if not np.all(newshape == arrayshape):
if np.isfinite(fill_value):
bigarray = np.ones(newshape, dtype=complex_dtype) * fill_value
else:
bigarray = np.zeros(newshape, dtype=complex_dtype)
bigarray[arrayslices] = array
else:
bigarray = array
if not np.all(newshape == kernshape):
bigkernel = np.zeros(newshape, dtype=complex_dtype)
bigkernel[kernslices] = normalized_kernel
else:
bigkernel = normalized_kernel
arrayfft = fftn(bigarray)
# need to shift the kernel so that, e.g., [0,0,1,0] -> [1,0,0,0] = unity
kernfft = fftn(np.fft.ifftshift(bigkernel))
fftmult = arrayfft * kernfft
interpolate_nan = (nan_treatment == 'interpolate')
if interpolate_nan:
if not np.isfinite(fill_value):
bigimwt = np.zeros(newshape, dtype=complex_dtype)
else:
bigimwt = np.ones(newshape, dtype=complex_dtype)
bigimwt[arrayslices] = 1.0 - nanmaskarray * interpolate_nan
wtfft = fftn(bigimwt)
# You can only get to this point if kernel_is_normalized
wtfftmult = wtfft * kernfft
wtsm = ifftn(wtfftmult)
# need to re-zero weights outside of the image (if it is padded, we
# still don't weight those regions)
bigimwt[arrayslices] = wtsm.real[arrayslices]
else:
bigimwt = 1
if np.isnan(fftmult).any():
# this check should be unnecessary; call it an insanity check
raise ValueError("Encountered NaNs in convolve. This is disallowed.")
fftmult *= kernel_scale
if return_fft:
return fftmult
if interpolate_nan:
with np.errstate(divide='ignore'):
# divide by zeros are expected here; if the weight is zero, we want
# the output to be nan or inf
rifft = (ifftn(fftmult)) / bigimwt
if not np.isscalar(bigimwt):
if min_wt > 0.:
rifft[bigimwt < min_wt] = np.nan
else:
# Set anything with no weight to zero (taking into account
# slight offsets due to floating-point errors).
rifft[bigimwt < 10 * np.finfo(bigimwt.dtype).eps] = 0.0
else:
rifft = ifftn(fftmult)
if preserve_nan:
rifft[arrayslices][nanmaskarray] = np.nan
if crop:
result = rifft[arrayslices].real
return result
else:
return rifft.real
def interpolate_replace_nans(array, kernel, convolve=convolve, **kwargs):
"""
Given a data set containing NaNs, replace the NaNs by interpolating from
neighboring data points with a given kernel.
Parameters
----------
array : `numpy.ndarray`
Array to be convolved with ``kernel``. It can be of any
dimensionality, though only 1, 2, and 3d arrays have been tested.
kernel : `numpy.ndarray` or `astropy.convolution.Kernel`
The convolution kernel. The number of dimensions should match those
for the array. The dimensions *do not* have to be odd in all directions,
unlike in the non-fft `convolve` function. The kernel will be
normalized if ``normalize_kernel`` is set. It is assumed to be centered
(i.e., shifts may result if your kernel is asymmetric). The kernel
*must be normalizable* (i.e., its sum cannot be zero).
convolve : `convolve` or `convolve_fft`
One of the two convolution functions defined in this package.
Returns
-------
newarray : `numpy.ndarray`
A copy of the original array with NaN pixels replaced with their
interpolated counterparts
"""
if not np.any(np.isnan(array)):
return array.copy()
newarray = array.copy()
convolved = convolve(array, kernel, nan_treatment='interpolate',
normalize_kernel=True, preserve_nan=False, **kwargs)
isnan = np.isnan(array)
newarray[isnan] = convolved[isnan]
return newarray
def convolve_models(model, kernel, mode='convolve_fft', **kwargs):
"""
Convolve two models using `~astropy.convolution.convolve_fft`.
Parameters
----------
model : `~astropy.modeling.core.Model`
Functional model
kernel : `~astropy.modeling.core.Model`
Convolution kernel
mode : str
Keyword representing which function to use for convolution.
* 'convolve_fft' : use `~astropy.convolution.convolve_fft` function.
* 'convolve' : use `~astropy.convolution.convolve`.
kwargs : dict
Keyword arguments to me passed either to `~astropy.convolution.convolve`
or `~astropy.convolution.convolve_fft` depending on ``mode``.
Returns
-------
default : CompoundModel
Convolved model
"""
if mode == 'convolve_fft':
BINARY_OPERATORS['convolve_fft'] = _make_arithmetic_operator(partial(convolve_fft, **kwargs))
elif mode == 'convolve':
BINARY_OPERATORS['convolve'] = _make_arithmetic_operator(partial(convolve, **kwargs))
else:
raise ValueError('Mode {} is not supported.'.format(mode))
return _CompoundModelMeta._from_operator(mode, model, kernel)
|
6fbb4d9ca54c91d8601c066970dc682c16daf58eee2949ee6bfed8ac29ba6db3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""This module contains classes and functions to standardize access to
configuration files for Astropy and affiliated packages.
.. note::
The configuration system makes use of the 'configobj' package, which stores
configuration in a text format like that used in the standard library
`ConfigParser`. More information and documentation for configobj can be
found at http://www.voidspace.org.uk/python/configobj.html.
"""
from contextlib import contextmanager
import hashlib
import io
from os import path
import re
from warnings import warn
from astropy.extern.configobj import configobj, validate
from astropy.utils.exceptions import AstropyWarning, AstropyDeprecationWarning
from astropy.utils import find_current_module
from astropy.utils.introspection import resolve_name
from astropy.utils.misc import InheritDocstrings
from .paths import get_config_dir
__all__ = ['InvalidConfigurationItemWarning',
'ConfigurationMissingWarning', 'get_config',
'reload_config', 'ConfigNamespace', 'ConfigItem']
class InvalidConfigurationItemWarning(AstropyWarning):
""" A Warning that is issued when the configuration value specified in the
astropy configuration file does not match the type expected for that
configuration value.
"""
class ConfigurationMissingWarning(AstropyWarning):
""" A Warning that is issued when the configuration directory cannot be
accessed (usually due to a permissions problem). If this warning appears,
configuration items will be set to their defaults rather than read from the
configuration file, and no configuration will persist across sessions.
"""
# these are not in __all__ because it's not intended that a user ever see them
class ConfigurationDefaultMissingError(ValueError):
""" An exception that is raised when the configuration defaults (which
should be generated at build-time) are missing.
"""
# this is used in astropy/__init__.py
class ConfigurationDefaultMissingWarning(AstropyWarning):
""" A warning that is issued when the configuration defaults (which
should be generated at build-time) are missing.
"""
class ConfigurationChangedWarning(AstropyWarning):
"""
A warning that the configuration options have changed.
"""
class _ConfigNamespaceMeta(type):
def __init__(cls, name, bases, dict):
if cls.__bases__[0] is object:
return
for key, val in dict.items():
if isinstance(val, ConfigItem):
val.name = key
class ConfigNamespace(metaclass=_ConfigNamespaceMeta):
"""
A namespace of configuration items. Each subpackage with
configuration items should define a subclass of this class,
containing `ConfigItem` instances as members.
For example::
class Conf(_config.ConfigNamespace):
unicode_output = _config.ConfigItem(
False,
'Use Unicode characters when outputting values, ...')
use_color = _config.ConfigItem(
sys.platform != 'win32',
'When True, use ANSI color escape sequences when ...',
aliases=['astropy.utils.console.USE_COLOR'])
conf = Conf()
"""
def set_temp(self, attr, value):
"""
Temporarily set a configuration value.
Parameters
----------
attr : str
Configuration item name
value : object
The value to set temporarily.
Examples
--------
>>> import astropy
>>> with astropy.conf.set_temp('use_color', False):
... pass
... # console output will not contain color
>>> # console output contains color again...
"""
if hasattr(self, attr):
return self.__class__.__dict__[attr].set_temp(value)
raise AttributeError("No configuration parameter '{0}'".format(attr))
def reload(self, attr=None):
"""
Reload a configuration item from the configuration file.
Parameters
----------
attr : str, optional
The name of the configuration parameter to reload. If not
provided, reload all configuration parameters.
"""
if attr is not None:
if hasattr(self, attr):
return self.__class__.__dict__[attr].reload()
raise AttributeError("No configuration parameter '{0}'".format(attr))
for item in self.__class__.__dict__.values():
if isinstance(item, ConfigItem):
item.reload()
def reset(self, attr=None):
"""
Reset a configuration item to its default.
Parameters
----------
attr : str, optional
The name of the configuration parameter to reload. If not
provided, reset all configuration parameters.
"""
if attr is not None:
if hasattr(self, attr):
prop = self.__class__.__dict__[attr]
prop.set(prop.defaultvalue)
return
raise AttributeError("No configuration parameter '{0}'".format(attr))
for item in self.__class__.__dict__.values():
if isinstance(item, ConfigItem):
item.set(item.defaultvalue)
class ConfigItem(metaclass=InheritDocstrings):
"""
A setting and associated value stored in a configuration file.
These objects should be created as members of
`ConfigNamespace` subclasses, for example::
class _Conf(config.ConfigNamespace):
unicode_output = config.ConfigItem(
False,
'Use Unicode characters when outputting values, and writing widgets '
'to the console.')
conf = _Conf()
Parameters
----------
defaultvalue : object, optional
The default value for this item. If this is a list of strings, this
item will be interpreted as an 'options' value - this item must be one
of those values, and the first in the list will be taken as the default
value.
description : str or None, optional
A description of this item (will be shown as a comment in the
configuration file)
cfgtype : str or None, optional
A type specifier like those used as the *values* of a particular key
in a ``configspec`` file of ``configobj``. If None, the type will be
inferred from the default value.
module : str or None, optional
The full module name that this item is associated with. The first
element (e.g. 'astropy' if this is 'astropy.config.configuration')
will be used to determine the name of the configuration file, while
the remaining items determine the section. If None, the package will be
inferred from the package within which this object's initializer is
called.
aliases : str, or list of str, optional
The deprecated location(s) of this configuration item. If the
config item is not found at the new location, it will be
searched for at all of the old locations.
Raises
------
RuntimeError
If ``module`` is `None`, but the module this item is created from
cannot be determined.
"""
# this is used to make validation faster so a Validator object doesn't
# have to be created every time
_validator = validate.Validator()
cfgtype = None
"""
A type specifier like those used as the *values* of a particular key in a
``configspec`` file of ``configobj``.
"""
def __init__(self, defaultvalue='', description=None, cfgtype=None,
module=None, aliases=None):
from astropy.utils import isiterable
if module is None:
module = find_current_module(2)
if module is None:
msg1 = 'Cannot automatically determine get_config module, '
msg2 = 'because it is not called from inside a valid module'
raise RuntimeError(msg1 + msg2)
else:
module = module.__name__
self.module = module
self.description = description
self.__doc__ = description
# now determine cfgtype if it is not given
if cfgtype is None:
if (isiterable(defaultvalue) and not
isinstance(defaultvalue, str)):
# it is an options list
dvstr = [str(v) for v in defaultvalue]
cfgtype = 'option(' + ', '.join(dvstr) + ')'
defaultvalue = dvstr[0]
elif isinstance(defaultvalue, bool):
cfgtype = 'boolean'
elif isinstance(defaultvalue, int):
cfgtype = 'integer'
elif isinstance(defaultvalue, float):
cfgtype = 'float'
elif isinstance(defaultvalue, str):
cfgtype = 'string'
defaultvalue = str(defaultvalue)
self.cfgtype = cfgtype
self._validate_val(defaultvalue)
self.defaultvalue = defaultvalue
if aliases is None:
self.aliases = []
elif isinstance(aliases, str):
self.aliases = [aliases]
else:
self.aliases = aliases
def __set__(self, obj, value):
return self.set(value)
def __get__(self, obj, objtype=None):
if obj is None:
return self
return self()
def set(self, value):
"""
Sets the current value of this ``ConfigItem``.
This also updates the comments that give the description and type
information.
Parameters
----------
value
The value this item should be set to.
Raises
------
TypeError
If the provided ``value`` is not valid for this ``ConfigItem``.
"""
try:
value = self._validate_val(value)
except validate.ValidateError as e:
msg = 'Provided value for configuration item {0} not valid: {1}'
raise TypeError(msg.format(self.name, e.args[0]))
sec = get_config(self.module)
sec[self.name] = value
@contextmanager
def set_temp(self, value):
"""
Sets this item to a specified value only inside a with block.
Use as::
ITEM = ConfigItem('ITEM', 'default', 'description')
with ITEM.set_temp('newval'):
#... do something that wants ITEM's value to be 'newval' ...
print(ITEM)
# ITEM is now 'default' after the with block
Parameters
----------
value
The value to set this item to inside the with block.
"""
initval = self()
self.set(value)
try:
yield
finally:
self.set(initval)
def reload(self):
""" Reloads the value of this ``ConfigItem`` from the relevant
configuration file.
Returns
-------
val
The new value loaded from the configuration file.
"""
self.set(self.defaultvalue)
baseobj = get_config(self.module, True)
secname = baseobj.name
cobj = baseobj
# a ConfigObj's parent is itself, so we look for the parent with that
while cobj.parent is not cobj:
cobj = cobj.parent
newobj = configobj.ConfigObj(cobj.filename, interpolation=False)
if secname is not None:
if secname not in newobj:
return baseobj.get(self.name)
newobj = newobj[secname]
if self.name in newobj:
baseobj[self.name] = newobj[self.name]
return baseobj.get(self.name)
def __repr__(self):
out = '<{0}: name={1!r} value={2!r} at 0x{3:x}>'.format(
self.__class__.__name__, self.name, self(), id(self))
return out
def __str__(self):
out = '\n'.join(('{0}: {1}',
' cfgtype={2!r}',
' defaultvalue={3!r}',
' description={4!r}',
' module={5}',
' value={6!r}'))
out = out.format(self.__class__.__name__, self.name, self.cfgtype,
self.defaultvalue, self.description, self.module,
self())
return out
def __call__(self):
""" Returns the value of this ``ConfigItem``
Returns
-------
val
This item's value, with a type determined by the ``cfgtype``
attribute.
Raises
------
TypeError
If the configuration value as stored is not this item's type.
"""
def section_name(section):
if section == '':
return 'at the top-level'
else:
return 'in section [{0}]'.format(section)
options = []
sec = get_config(self.module)
if self.name in sec:
options.append((sec[self.name], self.module, self.name))
for alias in self.aliases:
module, name = alias.rsplit('.', 1)
sec = get_config(module)
if '.' in module:
filename, module = module.split('.', 1)
else:
filename = module
module = ''
if name in sec:
if '.' in self.module:
new_module = self.module.split('.', 1)[1]
else:
new_module = ''
warn(
"Config parameter '{0}' {1} of the file '{2}' "
"is deprecated. Use '{3}' {4} instead.".format(
name, section_name(module), get_config_filename(filename),
self.name, section_name(new_module)),
AstropyDeprecationWarning)
options.append((sec[name], module, name))
if len(options) == 0:
self.set(self.defaultvalue)
options.append((self.defaultvalue, None, None))
if len(options) > 1:
filename, sec = self.module.split('.', 1)
warn(
"Config parameter '{0}' {1} of the file '{2}' is "
"given by more than one alias ({3}). Using the first.".format(
self.name, section_name(sec), get_config_filename(filename),
', '.join([
'.'.join(x[1:3]) for x in options if x[1] is not None])),
AstropyDeprecationWarning)
val = options[0][0]
try:
return self._validate_val(val)
except validate.ValidateError as e:
raise TypeError('Configuration value not valid:' + e.args[0])
def _validate_val(self, val):
""" Validates the provided value based on cfgtype and returns the
type-cast value
throws the underlying configobj exception if it fails
"""
# note that this will normally use the *class* attribute `_validator`,
# but if some arcane reason is needed for making a special one for an
# instance or sub-class, it will be used
return self._validator.check(self.cfgtype, val)
# this dictionary stores the master copy of the ConfigObj's for each
# root package
_cfgobjs = {}
def get_config_filename(packageormod=None):
"""
Get the filename of the config file associated with the given
package or module.
"""
cfg = get_config(packageormod)
while cfg.parent is not cfg:
cfg = cfg.parent
return cfg.filename
# This is used by testing to override the config file, so we can test
# with various config files that exercise different features of the
# config system.
_override_config_file = None
def get_config(packageormod=None, reload=False):
""" Gets the configuration object or section associated with a particular
package or module.
Parameters
-----------
packageormod : str or None
The package for which to retrieve the configuration object. If a
string, it must be a valid package name, or if `None`, the package from
which this function is called will be used.
reload : bool, optional
Reload the file, even if we have it cached.
Returns
-------
cfgobj : ``configobj.ConfigObj`` or ``configobj.Section``
If the requested package is a base package, this will be the
``configobj.ConfigObj`` for that package, or if it is a subpackage or
module, it will return the relevant ``configobj.Section`` object.
Raises
------
RuntimeError
If ``packageormod`` is `None`, but the package this item is created
from cannot be determined.
"""
if packageormod is None:
packageormod = find_current_module(2)
if packageormod is None:
msg1 = 'Cannot automatically determine get_config module, '
msg2 = 'because it is not called from inside a valid module'
raise RuntimeError(msg1 + msg2)
else:
packageormod = packageormod.__name__
packageormodspl = packageormod.split('.')
rootname = packageormodspl[0]
secname = '.'.join(packageormodspl[1:])
cobj = _cfgobjs.get(rootname, None)
if cobj is None or reload:
cfgfn = None
try:
# This feature is intended only for use by the unit tests
if _override_config_file is not None:
cfgfn = _override_config_file
else:
cfgfn = path.join(get_config_dir(), rootname + '.cfg')
cobj = configobj.ConfigObj(cfgfn, interpolation=False)
except OSError as e:
msg = ('Configuration defaults will be used due to ')
errstr = '' if len(e.args) < 1 else (':' + str(e.args[0]))
msg += e.__class__.__name__ + errstr
msg += ' on {0}'.format(cfgfn)
warn(ConfigurationMissingWarning(msg))
# This caches the object, so if the file becomes accessible, this
# function won't see it unless the module is reloaded
cobj = configobj.ConfigObj(interpolation=False)
_cfgobjs[rootname] = cobj
if secname: # not the root package
if secname not in cobj:
cobj[secname] = {}
return cobj[secname]
else:
return cobj
def reload_config(packageormod=None):
""" Reloads configuration settings from a configuration file for the root
package of the requested package/module.
This overwrites any changes that may have been made in `ConfigItem`
objects. This applies for any items that are based on this file, which
is determined by the *root* package of ``packageormod``
(e.g. ``'astropy.cfg'`` for the ``'astropy.config.configuration'``
module).
Parameters
----------
packageormod : str or None
The package or module name - see `get_config` for details.
"""
sec = get_config(packageormod, True)
# look for the section that is its own parent - that's the base object
while sec.parent is not sec:
sec = sec.parent
sec.reload()
def is_unedited_config_file(content, template_content=None):
"""
Determines if a config file can be safely replaced because it doesn't
actually contain any meaningful content.
To meet this criteria, the config file must be either:
- All comments or completely empty
- An exact match to a "legacy" version of the config file prior to
Astropy 0.4, when APE3 was implemented and the config file
contained commented-out values by default.
"""
# We want to calculate the md5sum using universal line endings, so
# that even if the files had their line endings converted to \r\n
# on Windows, this will still work.
content = content.encode('latin-1')
# The jquery_url setting, present in 0.3.2 and later only, is
# effectively auto-generated by the build system, so we need to
# ignore it in the md5sum calculation for 0.3.2.
content = re.sub(br'\njquery_url\s*=\s*[^\n]+', b'', content)
# First determine if the config file has any effective content
buffer = io.BytesIO(content)
buffer.seek(0)
raw_cfg = configobj.ConfigObj(buffer, interpolation=True)
for v in raw_cfg.values():
if len(v):
break
else:
return True
# Now determine if it matches the md5sum of a known, unedited
# config file.
known_configs = set([
'7d4b4f1120304b286d71f205975b1286', # v0.3.2
'5df7e409425e5bfe7ed041513fda3288', # v0.3
'8355f99a01b3bdfd8761ef45d5d8b7e5', # v0.2
'4ea5a84de146dc3fcea2a5b93735e634' # v0.2.1, v0.2.2, v0.2.3, v0.2.4, v0.2.5
])
md5 = hashlib.md5()
md5.update(content)
digest = md5.hexdigest()
return digest in known_configs
# this is not in __all__ because it's not intended that a user uses it
def update_default_config(pkg, default_cfg_dir_or_fn, version=None):
"""
Checks if the configuration file for the specified package exists,
and if not, copy over the default configuration. If the
configuration file looks like it has already been edited, we do
not write over it, but instead write a file alongside it named
``pkg.version.cfg`` as a "template" for the user.
Parameters
----------
pkg : str
The package to be updated.
default_cfg_dir_or_fn : str
The filename or directory name where the default configuration file is.
If a directory name, ``'pkg.cfg'`` will be used in that directory.
version : str, optional
The current version of the given package. If not provided, it will
be obtained from ``pkg.__version__``.
Returns
-------
updated : bool
If the profile was updated, `True`, otherwise `False`.
Raises
------
AttributeError
If the version number of the package could not determined.
"""
if path.isdir(default_cfg_dir_or_fn):
default_cfgfn = path.join(default_cfg_dir_or_fn, pkg + '.cfg')
else:
default_cfgfn = default_cfg_dir_or_fn
if not path.isfile(default_cfgfn):
# There is no template configuration file, which basically
# means the affiliated package is not using the configuration
# system, so just return.
return False
cfgfn = get_config(pkg).filename
with open(default_cfgfn, 'rt', encoding='latin-1') as fr:
template_content = fr.read()
doupdate = False
if cfgfn is not None:
if path.exists(cfgfn):
with open(cfgfn, 'rt', encoding='latin-1') as fd:
content = fd.read()
identical = (content == template_content)
if not identical:
doupdate = is_unedited_config_file(
content, template_content)
elif path.exists(path.dirname(cfgfn)):
doupdate = True
identical = False
if version is None:
version = resolve_name(pkg, '__version__')
# Don't install template files for dev versions, or we'll end up
# spamming `~/.astropy/config`.
if 'dev' not in version and cfgfn is not None:
template_path = path.join(
get_config_dir(), '{0}.{1}.cfg'.format(pkg, version))
needs_template = not path.exists(template_path)
else:
needs_template = False
if doupdate or needs_template:
if needs_template:
with open(template_path, 'wt', encoding='latin-1') as fw:
fw.write(template_content)
# If we just installed a new template file and we can't
# update the main configuration file because it has user
# changes, display a warning.
if not identical and not doupdate:
warn(
"The configuration options in {0} {1} may have changed, "
"your configuration file was not updated in order to "
"preserve local changes. A new configuration template "
"has been saved to '{2}'.".format(
pkg, version, template_path),
ConfigurationChangedWarning)
if doupdate and not identical:
with open(cfgfn, 'wt', encoding='latin-1') as fw:
fw.write(template_content)
return True
return False
|
6afb00688d8dfc94f66e040a788a20d49faa1491e10a166550c8296c609f863d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
""" This module contains functions to determine where configuration and
data/cache files used by Astropy should be placed.
"""
from functools import wraps
import os
import shutil
import sys
__all__ = ['get_config_dir', 'get_cache_dir', 'set_temp_config',
'set_temp_cache']
def _find_home():
""" Locates and return the home directory (or best approximation) on this
system.
Raises
------
OSError
If the home directory cannot be located - usually means you are running
Astropy on some obscure platform that doesn't have standard home
directories.
"""
# First find the home directory - this is inspired by the scheme ipython
# uses to identify "home"
if os.name == 'posix':
# Linux, Unix, AIX, OS X
if 'HOME' in os.environ:
homedir = os.environ['HOME']
else:
raise OSError('Could not find unix home directory to search for '
'astropy config dir')
elif os.name == 'nt': # This is for all modern Windows (NT or after)
if 'MSYSTEM' in os.environ and os.environ.get('HOME'):
# Likely using an msys shell; use whatever it is using for its
# $HOME directory
homedir = os.environ['HOME']
# Next try for a network home
elif 'HOMESHARE' in os.environ:
homedir = os.environ['HOMESHARE']
# See if there's a local home
elif 'HOMEDRIVE' in os.environ and 'HOMEPATH' in os.environ:
homedir = os.path.join(os.environ['HOMEDRIVE'],
os.environ['HOMEPATH'])
# Maybe a user profile?
elif 'USERPROFILE' in os.environ:
homedir = os.path.join(os.environ['USERPROFILE'])
else:
try:
import winreg as wreg
shell_folders = r'Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders'
key = wreg.OpenKey(wreg.HKEY_CURRENT_USER, shell_folders)
homedir = wreg.QueryValueEx(key, 'Personal')[0]
key.Close()
except Exception:
# As a final possible resort, see if HOME is present
if 'HOME' in os.environ:
homedir = os.environ['HOME']
else:
raise OSError('Could not find windows home directory to '
'search for astropy config dir')
else:
# for other platforms, try HOME, although it probably isn't there
if 'HOME' in os.environ:
homedir = os.environ['HOME']
else:
raise OSError('Could not find a home directory to search for '
'astropy config dir - are you on an unspported '
'platform?')
return homedir
def get_config_dir(create=True):
"""
Determines the Astropy configuration directory name and creates the
directory if it doesn't exist.
This directory is typically ``$HOME/.astropy/config``, but if the
XDG_CONFIG_HOME environment variable is set and the
``$XDG_CONFIG_HOME/astropy`` directory exists, it will be that directory.
If neither exists, the former will be created and symlinked to the latter.
Returns
-------
configdir : str
The absolute path to the configuration directory.
"""
# symlink will be set to this if the directory is created
linkto = None
# If using set_temp_config, that overrides all
if set_temp_config._temp_path is not None:
xch = set_temp_config._temp_path
config_path = os.path.join(xch, 'astropy')
if not os.path.exists(config_path):
os.mkdir(config_path)
return os.path.abspath(config_path)
# first look for XDG_CONFIG_HOME
xch = os.environ.get('XDG_CONFIG_HOME')
if xch is not None and os.path.exists(xch):
xchpth = os.path.join(xch, 'astropy')
if not os.path.islink(xchpth):
if os.path.exists(xchpth):
return os.path.abspath(xchpth)
else:
linkto = xchpth
return os.path.abspath(_find_or_create_astropy_dir('config', linkto))
def get_cache_dir():
"""
Determines the Astropy cache directory name and creates the directory if it
doesn't exist.
This directory is typically ``$HOME/.astropy/cache``, but if the
XDG_CACHE_HOME environment variable is set and the
``$XDG_CACHE_HOME/astropy`` directory exists, it will be that directory.
If neither exists, the former will be created and symlinked to the latter.
Returns
-------
cachedir : str
The absolute path to the cache directory.
"""
# symlink will be set to this if the directory is created
linkto = None
# If using set_temp_cache, that overrides all
if set_temp_cache._temp_path is not None:
xch = set_temp_cache._temp_path
cache_path = os.path.join(xch, 'astropy')
if not os.path.exists(cache_path):
os.mkdir(cache_path)
return os.path.abspath(cache_path)
# first look for XDG_CACHE_HOME
xch = os.environ.get('XDG_CACHE_HOME')
if xch is not None and os.path.exists(xch):
xchpth = os.path.join(xch, 'astropy')
if not os.path.islink(xchpth):
if os.path.exists(xchpth):
return os.path.abspath(xchpth)
else:
linkto = xchpth
return os.path.abspath(_find_or_create_astropy_dir('cache', linkto))
class _SetTempPath:
_temp_path = None
_default_path_getter = None
def __init__(self, path=None, delete=False):
if path is not None:
path = os.path.abspath(path)
self._path = path
self._delete = delete
self._prev_path = self.__class__._temp_path
def __enter__(self):
self.__class__._temp_path = self._path
return self._default_path_getter()
def __exit__(self, *args):
self.__class__._temp_path = self._prev_path
if self._delete and self._path is not None:
shutil.rmtree(self._path)
def __call__(self, func):
"""Implements use as a decorator."""
@wraps(func)
def wrapper(*args, **kwargs):
with self:
func(*args, **kwargs)
return wrapper
class set_temp_config(_SetTempPath):
"""
Context manager to set a temporary path for the Astropy config, primarily
for use with testing.
If the path set by this context manager does not already exist it will be
created, if possible.
This may also be used as a decorator on a function to set the config path
just within that function.
Parameters
----------
path : str, optional
The directory (which must exist) in which to find the Astropy config
files, or create them if they do not already exist. If None, this
restores the config path to the user's default config path as returned
by `get_config_dir` as though this context manager were not in effect
(this is useful for testing). In this case the ``delete`` argument is
always ignored.
delete : bool, optional
If True, cleans up the temporary directory after exiting the temp
context (default: False).
"""
_default_path_getter = staticmethod(get_config_dir)
def __enter__(self):
# Special case for the config case, where we need to reset all the
# cached config objects
from .configuration import _cfgobjs
path = super().__enter__()
_cfgobjs.clear()
return path
def __exit__(self, *args):
from .configuration import _cfgobjs
super().__exit__(*args)
_cfgobjs.clear()
class set_temp_cache(_SetTempPath):
"""
Context manager to set a temporary path for the Astropy download cache,
primarily for use with testing (though there may be other applications
for setting a different cache directory, for example to switch to a cache
dedicated to large files).
If the path set by this context manager does not already exist it will be
created, if possible.
This may also be used as a decorator on a function to set the cache path
just within that function.
Parameters
----------
path : str
The directory (which must exist) in which to find the Astropy cache
files, or create them if they do not already exist. If None, this
restores the cache path to the user's default cache path as returned
by `get_cache_dir` as though this context manager were not in effect
(this is useful for testing). In this case the ``delete`` argument is
always ignored.
delete : bool, optional
If True, cleans up the temporary directory after exiting the temp
context (default: False).
"""
_default_path_getter = staticmethod(get_cache_dir)
def _find_or_create_astropy_dir(dirnm, linkto):
innerdir = os.path.join(_find_home(), '.astropy')
maindir = os.path.join(_find_home(), '.astropy', dirnm)
if not os.path.exists(maindir):
# first create .astropy dir if needed
if not os.path.exists(innerdir):
try:
os.mkdir(innerdir)
except OSError:
if not os.path.isdir(innerdir):
raise
elif not os.path.isdir(innerdir):
msg = 'Intended Astropy directory {0} is actually a file.'
raise OSError(msg.format(innerdir))
try:
os.mkdir(maindir)
except OSError:
if not os.path.isdir(maindir):
raise
if (not sys.platform.startswith('win') and
linkto is not None and
not os.path.exists(linkto)):
os.symlink(maindir, linkto)
elif not os.path.isdir(maindir):
msg = 'Intended Astropy {0} directory {1} is actually a file.'
raise OSError(msg.format(dirnm, maindir))
return os.path.abspath(maindir)
|
4905249e1ddb7e8bee39ca2bc523786688c9b6d177aedc65752d6679e9d61048 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Distribution class and associated machinery.
"""
import numpy as np
from astropy import units as u
from astropy import stats
__all__ = ['Distribution']
# we set this by hand because the symbolic expression (below) requires scipy
# SMAD_SCALE_FACTOR = 1 / scipy.stats.norm.ppf(0.75)
SMAD_SCALE_FACTOR = 1.48260221850560203193936104071326553821563720703125
class Distribution:
"""
A scalar value or array values with associated uncertainty distribution.
This object will take its exact type from whatever the ``samples`` argument
is. In general this is expected to be an `~astropy.units.Quantity` or
`numpy.ndarray`, although anything compatible with `numpy.asanyarray` is
possible.
See also: http://docs.astropy.org/en/stable/uncertainty/
Parameters
----------
samples : array-like
The distribution, with sampling along the *leading* axis. If 1D, the
sole dimension is used as the sampling axis (i.e., it is a scalar
distribution).
"""
_generated_subclasses = {}
def __new__(cls, samples):
if isinstance(samples, Distribution):
samples = samples.distribution
else:
samples = np.asanyarray(samples, order='C')
if samples.shape == ():
raise TypeError('Attempted to initialize a Distribution with a scalar')
new_dtype = np.dtype({'names': ['samples'],
'formats': [(samples.dtype, (samples.shape[-1],))]})
samples_cls = type(samples)
if not issubclass(samples_cls, Distribution):
# Make sure first letter is uppercase, but note that we can't use
# str.capitalize since that converts the rest of the name to lowercase.
new_name = samples_cls.__name__[0].upper() + samples_cls.__name__[1:] + cls.__name__
if new_name in cls._generated_subclasses:
new_cls = cls._generated_subclasses[new_name]
else:
new_cls = type(new_name, (cls, samples_cls),
{'_samples_cls': samples_cls})
cls._generated_subclasses[new_name] = new_cls
self = samples.view(dtype=new_dtype, type=new_cls)
# Get rid of trailing dimension of 1.
self.shape = samples.shape[:-1]
return self
@property
def distribution(self):
return self['samples']
def __getitem__(self, item):
result = super().__getitem__(item)
if item == 'samples':
return super(Distribution, result).view(self._samples_cls)
else:
return Distribution.__new__(self.__class__, result['samples'])
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
converted = []
outputs = kwargs.pop('out', None)
if outputs:
kwargs['out'] = tuple((output.distribution if
isinstance(output, Distribution)
else output) for output in outputs)
if method in {'reduce', 'accumulate', 'reduceat'}:
axis = kwargs.get('axis', None)
if axis is None:
assert isinstance(inputs[0], Distribution)
kwargs['axis'] = tuple(range(inputs[0].ndim))
for input_ in inputs:
if isinstance(input_, Distribution):
converted.append(input_.distribution)
else:
shape = getattr(input_, 'shape', ())
if shape:
converted.append(input_[..., np.newaxis])
else:
converted.append(input_)
results = getattr(ufunc, method)(*converted, **kwargs)
if not isinstance(results, tuple):
results = (results,)
if outputs is None:
outputs = (None,) * len(results)
finals = []
for result, output in zip(results, outputs):
if output is not None:
finals.append(output)
else:
if getattr(result, 'shape', False):
finals.append(Distribution(result))
else:
finals.append(result)
return finals if len(finals) > 1 else finals[0]
# Override view so that we stay a Distribution version of the new type.
def view(self, dtype=None, type=None):
if type is None:
if issubclass(dtype, np.ndarray):
type = dtype
dtype = None
else:
raise ValueError('Cannot set just dtype for a Distribution.')
result = self.distribution.view(dtype, type)
return Distribution(result)
def __repr__(self):
reprarr = repr(self.distribution)
if reprarr.endswith('>'):
firstspace = reprarr.find(' ')
reprarr = reprarr[firstspace+1:-1] # :-1] removes the ending '>'
return '<{} {} with n_samples={}>'.format(self.__class__.__name__,
reprarr, self.n_samples)
else: # numpy array-like
firstparen = reprarr.find('(')
reprarr = reprarr[firstparen:]
return '{}{} with n_samples={}'.format(self.__class__.__name__,
reprarr, self.n_samples)
return reprarr
def __str__(self):
distrstr = str(self.distribution)
toadd = ' with n_samples={}'.format(self.n_samples)
return distrstr + toadd
def _repr_latex_(self):
if hasattr(self.distribution, '_repr_latex_'):
superlatex = self.distribution._repr_latex_()
toadd = r', \; n_{{\rm samp}}={}'.format(self.n_samples)
return superlatex[:-1] + toadd + superlatex[-1]
else:
return None
@property
def n_samples(self):
"""
The number of samples of this distribution. A single `int`.
"""
return self.dtype['samples'].shape[0]
@property
def pdf_mean(self):
"""
The mean of this distribution.
"""
return self.distribution.mean(axis=-1)
@property
def pdf_std(self):
"""
The standard deviation of this distribution.
"""
return self.distribution.std(axis=-1)
@property
def pdf_var(self):
"""
The variance of this distribution.
"""
return self.distribution.var(axis=-1)
@property
def pdf_median(self):
"""
The median of this distribution.
"""
return np.median(self.distribution, axis=-1)
@property
def pdf_mad(self):
"""
The median absolute deviation of this distribution.
"""
return np.abs(self - self.pdf_median).pdf_median
@property
def pdf_smad(self):
"""
The median absolute deviation of this distribution rescaled to match the
standard deviation for a normal distribution.
"""
return self.pdf_mad * SMAD_SCALE_FACTOR
def pdf_percentiles(self, percentile, **kwargs):
"""
Compute percentiles of this Distribution.
Parameters
----------
percentile : float or array of floats or `~astropy.units.Quantity`
The desired precentiles of the distribution (i.e., on [0,100]).
`~astropy.units.Quantity` will be converted to percent, meaning
that a ``dimensionless_unscaled`` `~astropy.units.Quantity` will
be interpreted as a quantile.
Additional keywords are passed into `numpy.percentile`.
Returns
-------
percentiles : `~astropy.units.Quantity`
The ``fracs`` percentiles of this distribution.
"""
percentile = u.Quantity(percentile, u.percent).value
percs = np.percentile(self.distribution, percentile, axis=-1, **kwargs)
# numpy.percentile strips units for unclear reasons, so we have to make
# a new object with units
if hasattr(self.distribution, '_new_view'):
return self.distribution._new_view(percs)
else:
return percs
def pdf_histogram(self, **kwargs):
"""
Compute histogram over the samples in the distribution.
Parameters
----------
All keyword arguments are passed into `astropy.stats.histogram`. Note
That some of these options may not be valid for some multidimensional
distributions.
Returns
-------
hist : array
The values of the histogram. Trailing dimension is the histogram
dimension.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``. Trailing dimension is the
bin histogram dimension.
"""
distr = self.distribution
raveled_distr = distr.reshape(distr.size//distr.shape[-1], distr.shape[-1])
nhists = []
bin_edges = []
for d in raveled_distr:
nhist, bin_edge = stats.histogram(d, **kwargs)
nhists.append(nhist)
bin_edges.append(bin_edge)
nhists = np.array(nhists)
nh_shape = self.shape + (nhists.size//self.size,)
bin_edges = np.array(bin_edges)
be_shape = self.shape + (bin_edges.size//self.size,)
return nhists.reshape(nh_shape), bin_edges.reshape(be_shape)
|
d484791f5a84387b2f96a702aac42c7b9e755a142ddfbe02bfed1ec8a7eb3b20 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Built-in distribution-creation functions.
"""
from warnings import warn
import numpy as np
from astropy import units as u
from .core import Distribution
__all__ = ['normal', 'poisson', 'uniform']
def normal(center, *, std=None, var=None, ivar=None, n_samples,
cls=Distribution, **kwargs):
"""
Create a Gaussian/normal distribution.
Parameters
----------
center : `~astropy.units.Quantity`
The center of this distribution
std : `~astropy.units.Quantity` or `None`
The standard deviation/σ of this distribution. Shape must match and unit
must be compatible with ``center``, or be `None` (if ``var`` or ``ivar``
are set).
var : `~astropy.units.Quantity` or `None`
The variance of this distribution. Shape must match and unit must be
compatible with ``center``, or be `None` (if ``std`` or ``ivar`` are set).
ivar : `~astropy.units.Quantity` or `None`
The inverse variance of this distribution. Shape must match and unit
must be compatible with ``center``, or be `None` (if ``std`` or ``var``
are set).
n_samples : int
The number of Monte Carlo samples to use with this distribution
cls : class
The class to use to create this distribution. Typically a
`Distribution` subclass.
Remaining keywords are passed into the constructor of the ``cls``
Returns
-------
distr : ``cls``, usually `Distribution`
The sampled Gaussian distribution.
"""
center = np.asanyarray(center)
if var is not None:
if std is None:
std = np.asanyarray(var)**0.5
else:
raise ValueError('normal cannot take both std and var')
if ivar is not None:
if std is None:
std = np.asanyarray(ivar)**-0.5
else:
raise ValueError('normal cannot take both ivar and '
'and std or var')
if std is None:
raise ValueError('normal requires one of std, var, or ivar')
else:
std = np.asanyarray(std)
randshape = np.broadcast(std, center).shape + (n_samples,)
samples = center[..., np.newaxis] + np.random.randn(*randshape) * std[..., np.newaxis]
return cls(samples, **kwargs)
COUNT_UNITS = (u.count, u.electron, u.dimensionless_unscaled, u.chan, u.bin, u.vox, u.bit, u.byte)
def poisson(center, n_samples, cls=Distribution, **kwargs):
"""
Create a Poisson distribution.
Parameters
----------
center : `~astropy.units.Quantity`
The center value of this distribution (i.e., λ).
n_samples : int
The number of Monte Carlo samples to use with this distribution
cls : class
The class to use to create this distribution. Typically a
`Distribution` subclass.
Remaining keywords are passed into the constructor of the ``cls``
Returns
-------
distr : ``cls``, usually `Distribution`
The sampled poisson distribution.
"""
# we convert to arrays because np.random.poisson has trouble with quantities
has_unit = False
if hasattr(center, 'unit'):
has_unit = True
poissonarr = np.asanyarray(center.value)
else:
poissonarr = np.asanyarray(center)
randshape = poissonarr.shape + (n_samples,)
samples = np.random.poisson(poissonarr[..., np.newaxis], randshape)
if has_unit:
if center.unit == u.adu:
warn('ADUs were provided to poisson. ADUs are not strictly count'
'units because they need the gain to be applied. It is '
'recommended you apply the gain to convert to e.g. electrons.')
elif center.unit not in COUNT_UNITS:
warn('Unit {} was provided to poisson, which is not one of {}, '
'and therefore suspect as a "counting" unit. Ensure you mean '
'to use Poisson statistics.'.format(center.unit, COUNT_UNITS))
# re-attach the unit
samples = samples * center.unit
return cls(samples, **kwargs)
def uniform(*, lower=None, upper=None, center=None, width=None, n_samples,
cls=Distribution, **kwargs):
"""
Create a Uniform distriution from the lower and upper bounds.
Note that this function requires keywords to be explicit, and requires
either ``lower``/``upper`` or ``center``/``width``.
Parameters
----------
lower : array-like
The lower edge of this distribution. If a `~astropy.units.Quantity`, the
distribution will have the same units as ``lower``.
upper : `~astropy.units.Quantity`
The upper edge of this distribution. Must match shape and if a
`~astropy.units.Quantity` must have compatible units with ``lower``.
center : array-like
The center value of the distribution. Cannot be provided at the same
time as ``lower``/``upper``.
width : array-like
The width of the distribution. Must have the same shape and compatible
units with ``center`` (if any).
n_samples : int
The number of Monte Carlo samples to use with this distribution
cls : class
The class to use to create this distribution. Typically a
`Distribution` subclass.
Remaining keywords are passed into the constructor of the ``cls``
Returns
-------
distr : ``cls``, usually `Distribution`
The sampled uniform distribution.
"""
if center is None and width is None:
lower = np.asanyarray(lower)
upper = np.asanyarray(upper)
if lower.shape != upper.shape:
raise ValueError('lower and upper must have consistent shapes')
elif upper is None and lower is None:
center = np.asanyarray(center)
width = np.asanyarray(width)
lower = center - width/2
upper = center + width/2
else:
raise ValueError('either upper/lower or center/width must be given '
'to uniform - other combinations are not valid')
newshape = lower.shape + (n_samples,)
if lower.shape == tuple() and upper.shape == tuple():
width = upper - lower # scalar
else:
width = (upper - lower)[:, np.newaxis]
lower = lower[:, np.newaxis]
samples = lower + width * np.random.uniform(size=newshape)
return cls(samples, **kwargs)
|
c8a90cce8ad8facbdf36a1c7ff8464dd2a530c9ea4df9009dbdde2383010aff4 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This sub-package contains classes and functions for creating distributions that
work similar to `~astropy.units.Quantity` or array objects, but can propogate
uncertainties.
"""
from .core import *
from .distributions import *
|
cba1433a6b06778d8622a4e333821d974d788a467b25eaa2a9cc40b515c7ef65 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module implements classes (called Fitters) which combine optimization
algorithms (typically from `scipy.optimize`) with statistic functions to perform
fitting. Fitters are implemented as callable classes. In addition to the data
to fit, the ``__call__`` method takes an instance of
`~astropy.modeling.core.FittableModel` as input, and returns a copy of the
model with its parameters determined by the optimizer.
Optimization algorithms, called "optimizers" are implemented in
`~astropy.modeling.optimizers` and statistic functions are in
`~astropy.modeling.statistic`. The goal is to provide an easy to extend
framework and allow users to easily create new fitters by combining statistics
with optimizers.
There are two exceptions to the above scheme.
`~astropy.modeling.fitting.LinearLSQFitter` uses Numpy's `~numpy.linalg.lstsq`
function. `~astropy.modeling.fitting.LevMarLSQFitter` uses
`~scipy.optimize.leastsq` which combines optimization and statistic in one
implementation.
"""
import abc
import inspect
import operator
import warnings
from functools import reduce, wraps
import numpy as np
from .utils import poly_map_domain, _combine_equivalency_dict
from astropy.units import Quantity
from astropy.utils.exceptions import AstropyUserWarning
from .optimizers import (SLSQP, Simplex)
from .statistic import (leastsquare)
# Check pkg_resources exists
try:
from pkg_resources import iter_entry_points
HAS_PKG = True
except ImportError:
HAS_PKG = False
__all__ = ['LinearLSQFitter', 'LevMarLSQFitter', 'FittingWithOutlierRemoval',
'SLSQPLSQFitter', 'SimplexLSQFitter', 'JointFitter', 'Fitter']
# Statistic functions implemented in `astropy.modeling.statistic.py
STATISTICS = [leastsquare]
# Optimizers implemented in `astropy.modeling.optimizers.py
OPTIMIZERS = [Simplex, SLSQP]
from .optimizers import (DEFAULT_MAXITER, DEFAULT_EPS, DEFAULT_ACC)
class ModelsError(Exception):
"""Base class for model exceptions"""
class ModelLinearityError(ModelsError):
""" Raised when a non-linear model is passed to a linear fitter."""
class UnsupportedConstraintError(ModelsError, ValueError):
"""
Raised when a fitter does not support a type of constraint.
"""
class _FitterMeta(abc.ABCMeta):
"""
Currently just provides a registry for all Fitter classes.
"""
registry = set()
def __new__(mcls, name, bases, members):
cls = super().__new__(mcls, name, bases, members)
if not inspect.isabstract(cls) and not name.startswith('_'):
mcls.registry.add(cls)
return cls
def fitter_unit_support(func):
"""
This is a decorator that can be used to add support for dealing with
quantities to any __call__ method on a fitter which may not support
quantities itself. This is done by temporarily removing units from all
parameters then adding them back once the fitting has completed.
"""
@wraps(func)
def wrapper(self, model, x, y, z=None, **kwargs):
equivalencies = kwargs.pop('equivalencies', None)
data_has_units = (isinstance(x, Quantity) or
isinstance(y, Quantity) or
isinstance(z, Quantity))
model_has_units = model._has_units
if data_has_units or model_has_units:
if model._supports_unit_fitting:
# We now combine any instance-level input equivalencies with user
# specified ones at call-time.
input_units_equivalencies = _combine_equivalency_dict(
model.inputs, equivalencies, model.input_units_equivalencies)
# If input_units is defined, we transform the input data into those
# expected by the model. We hard-code the input names 'x', and 'y'
# here since FittableModel instances have input names ('x',) or
# ('x', 'y')
if model.input_units is not None:
if isinstance(x, Quantity):
x = x.to(model.input_units['x'], equivalencies=input_units_equivalencies['x'])
if isinstance(y, Quantity) and z is not None:
y = y.to(model.input_units['y'], equivalencies=input_units_equivalencies['y'])
# We now strip away the units from the parameters, taking care to
# first convert any parameters to the units that correspond to the
# input units (to make sure that initial guesses on the parameters)
# are in the right unit system
model = model.without_units_for_data(x=x, y=y, z=z)
# We strip away the units from the input itself
add_back_units = False
if isinstance(x, Quantity):
add_back_units = True
xdata = x.value
else:
xdata = np.asarray(x)
if isinstance(y, Quantity):
add_back_units = True
ydata = y.value
else:
ydata = np.asarray(y)
if z is not None:
if isinstance(z, Quantity):
add_back_units = True
zdata = z.value
else:
zdata = np.asarray(z)
# We run the fitting
if z is None:
model_new = func(self, model, xdata, ydata, **kwargs)
else:
model_new = func(self, model, xdata, ydata, zdata, **kwargs)
# And finally we add back units to the parameters
if add_back_units:
model_new = model_new.with_units_from_data(x=x, y=y, z=z)
return model_new
else:
raise NotImplementedError("This model does not support being fit to data with units")
else:
return func(self, model, x, y, z=z, **kwargs)
return wrapper
class Fitter(metaclass=_FitterMeta):
"""
Base class for all fitters.
Parameters
----------
optimizer : callable
A callable implementing an optimization algorithm
statistic : callable
Statistic function
"""
def __init__(self, optimizer, statistic):
if optimizer is None:
raise ValueError("Expected an optimizer.")
if statistic is None:
raise ValueError("Expected a statistic function.")
if inspect.isclass(optimizer):
# a callable class
self._opt_method = optimizer()
elif inspect.isfunction(optimizer):
self._opt_method = optimizer
else:
raise ValueError("Expected optimizer to be a callable class or a function.")
if inspect.isclass(statistic):
self._stat_method = statistic()
else:
self._stat_method = statistic
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
parameters returned by the fitter
args : list
[model, [other_args], [input coordinates]]
other_args may include weights or any other quantities specific for
a statistic
Notes
-----
The list of arguments (args) is set in the `__call__` method.
Fitters may overwrite this method, e.g. when statistic functions
require other arguments.
"""
model = args[0]
meas = args[-1]
_fitter_to_model_params(model, fps)
res = self._stat_method(meas, model, *args[1:-1])
return res
@abc.abstractmethod
def __call__(self):
"""
This method performs the actual fitting and modifies the parameter list
of a model.
Fitter subclasses should implement this method.
"""
raise NotImplementedError("Subclasses should implement this method.")
# TODO: I have ongoing branch elsewhere that's refactoring this module so that
# all the fitter classes in here are Fitter subclasses. In the meantime we
# need to specify that _FitterMeta is its metaclass.
class LinearLSQFitter(metaclass=_FitterMeta):
"""
A class performing a linear least square fitting.
Uses `numpy.linalg.lstsq` to do the fitting.
Given a model and data, fits the model to the data and changes the
model's parameters. Keeps a dictionary of auxiliary fitting information.
Notes
-----
Note that currently LinearLSQFitter does not support compound models.
"""
supported_constraints = ['fixed']
supports_masked_input = True
def __init__(self):
self.fit_info = {'residuals': None,
'rank': None,
'singular_values': None,
'params': None
}
@staticmethod
def _deriv_with_constraints(model, param_indices, x=None, y=None):
if y is None:
d = np.array(model.fit_deriv(x, *model.parameters))
else:
d = np.array(model.fit_deriv(x, y, *model.parameters))
if model.col_fit_deriv:
return d[param_indices]
else:
return d[..., param_indices]
def _map_domain_window(self, model, x, y=None):
"""
Maps domain into window for a polynomial model which has these
attributes.
"""
if y is None:
if hasattr(model, 'domain') and model.domain is None:
model.domain = [x.min(), x.max()]
if hasattr(model, 'window') and model.window is None:
model.window = [-1, 1]
return poly_map_domain(x, model.domain, model.window)
else:
if hasattr(model, 'x_domain') and model.x_domain is None:
model.x_domain = [x.min(), x.max()]
if hasattr(model, 'y_domain') and model.y_domain is None:
model.y_domain = [y.min(), y.max()]
if hasattr(model, 'x_window') and model.x_window is None:
model.x_window = [-1., 1.]
if hasattr(model, 'y_window') and model.y_window is None:
model.y_window = [-1., 1.]
xnew = poly_map_domain(x, model.x_domain, model.x_window)
ynew = poly_map_domain(y, model.y_domain, model.y_window)
return xnew, ynew
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, rcond=None):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
Input coordinates
y : array-like
Input coordinates
z : array-like (optional)
Input coordinates.
If the dependent (``y`` or ``z``) co-ordinate values are provided
as a `numpy.ma.MaskedArray`, any masked points are ignored when
fitting. Note that model set fitting is significantly slower when
there are masked points (not just an empty mask), as the matrix
equation has to be solved for each model separately when their
co-ordinate grids differ.
weights : array (optional)
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
rcond : float, optional
Cut-off ratio for small singular values of ``a``.
Singular values are set to zero if they are smaller than ``rcond``
times the largest singular value of ``a``.
equivalencies : list or None, optional and keyword-only argument
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
if not model.fittable:
raise ValueError("Model must be a subclass of FittableModel")
if not model.linear:
raise ModelLinearityError('Model is not linear in parameters, '
'linear fit methods should not be used.')
if hasattr(model, "submodel_names"):
raise ValueError("Model must be simple, not compound")
_validate_constraints(self.supported_constraints, model)
model_copy = model.copy()
_, fitparam_indices = _model_to_fit_params(model_copy)
if model_copy.n_inputs == 2 and z is None:
raise ValueError("Expected x, y and z for a 2 dimensional model.")
farg = _convert_input(x, y, z, n_models=len(model_copy),
model_set_axis=model_copy.model_set_axis)
has_fixed = any(model_copy.fixed.values())
if has_fixed:
# The list of fixed params is the complement of those being fitted:
fixparam_indices = [idx for idx in
range(len(model_copy.param_names))
if idx not in fitparam_indices]
# Construct matrix of user-fixed parameters that can be dotted with
# the corresponding fit_deriv() terms, to evaluate corrections to
# the dependent variable in order to fit only the remaining terms:
fixparams = np.asarray([getattr(model_copy,
model_copy.param_names[idx]).value
for idx in fixparam_indices])
if len(farg) == 2:
x, y = farg
# map domain into window
if hasattr(model_copy, 'domain'):
x = self._map_domain_window(model_copy, x)
if has_fixed:
lhs = self._deriv_with_constraints(model_copy,
fitparam_indices,
x=x)
fixderivs = self._deriv_with_constraints(model_copy,
fixparam_indices,
x=x)
else:
lhs = model_copy.fit_deriv(x, *model_copy.parameters)
sum_of_implicit_terms = model_copy.sum_of_implicit_terms(x)
rhs = y
else:
x, y, z = farg
# map domain into window
if hasattr(model_copy, 'x_domain'):
x, y = self._map_domain_window(model_copy, x, y)
if has_fixed:
lhs = self._deriv_with_constraints(model_copy,
fitparam_indices, x=x, y=y)
fixderivs = self._deriv_with_constraints(model_copy,
fixparam_indices, x=x, y=y)
else:
lhs = model_copy.fit_deriv(x, y, *model_copy.parameters)
sum_of_implicit_terms = model_copy.sum_of_implicit_terms(x, y)
if len(model_copy) > 1:
# Just to be explicit (rather than baking in False == 0):
model_axis = model_copy.model_set_axis or 0
if z.ndim > 2:
# For higher-dimensional z, flatten all the axes except the
# dimension along which models are stacked and transpose so
# the model axis is *last* (I think this resolves Erik's
# pending generalization from 80a6f25a):
rhs = np.rollaxis(z, model_axis, z.ndim)
rhs = rhs.reshape(-1, rhs.shape[-1])
else:
# This "else" seems to handle the corner case where the
# user has already flattened x/y before attempting a 2D fit
# but z has a second axis for the model set. NB. This is
# ~5-10x faster than using rollaxis.
rhs = z.T if model_axis == 0 else z
else:
rhs = z.flatten()
# If the derivative is defined along rows (as with non-linear models)
if model_copy.col_fit_deriv:
lhs = np.asarray(lhs).T
# Some models (eg. Polynomial1D) don't flatten multi-dimensional inputs
# when constructing their Vandermonde matrix, which can lead to obscure
# failures below. Ultimately, np.linalg.lstsq can't handle >2D matrices,
# so just raise a slightly more informative error when this happens:
if lhs.ndim > 2:
raise ValueError('{0} gives unsupported >2D derivative matrix for '
'this x/y'.format(type(model_copy).__name__))
# Subtract any terms fixed by the user from (a copy of) the RHS, in
# order to fit the remaining terms correctly:
if has_fixed:
if model_copy.col_fit_deriv:
fixderivs = np.asarray(fixderivs).T # as for lhs above
rhs = rhs - fixderivs.dot(fixparams) # evaluate user-fixed terms
# Subtract any terms implicit in the model from the RHS, which, like
# user-fixed terms, affect the dependent variable but are not fitted:
if sum_of_implicit_terms is not None:
# If we have a model set, the extra axis must be added to
# sum_of_implicit_terms as its innermost dimension, to match the
# dimensionality of rhs after _convert_input "rolls" it as needed
# by np.linalg.lstsq. The vector then gets broadcast to the right
# number of sets (columns). This assumes all the models share the
# same input co-ordinates, as is currently the case.
if len(model_copy) > 1:
sum_of_implicit_terms = sum_of_implicit_terms[..., np.newaxis]
rhs = rhs - sum_of_implicit_terms
if weights is not None:
weights = np.asarray(weights, dtype=float)
if len(x) != len(weights):
raise ValueError("x and weights should have the same length")
if rhs.ndim == 2:
lhs *= weights[:, np.newaxis]
# Don't modify in-place in case rhs was the original dependent
# variable array
rhs = rhs * weights[:, np.newaxis]
else:
lhs *= weights[:, np.newaxis]
rhs = rhs * weights
if rcond is None:
rcond = len(x) * np.finfo(x.dtype).eps
scl = (lhs * lhs).sum(0)
lhs /= scl
masked = np.any(np.ma.getmask(rhs))
if len(model_copy) == 1 or not masked:
# If we're fitting one or more models over a common set of points,
# we only have to solve a single matrix equation, which is an order
# of magnitude faster than calling lstsq() once per model below:
good = ~rhs.mask if masked else slice(None) # latter is a no-op
# Solve for one or more models:
lacoef, resids, rank, sval = np.linalg.lstsq(lhs[good],
rhs[good], rcond)
else:
# Where fitting multiple models with masked pixels, initialize an
# empty array of coefficients and populate it one model at a time.
# The shape matches the number of coefficients from the Vandermonde
# matrix and the number of models from the RHS:
lacoef = np.zeros(lhs.shape[-1:] + rhs.shape[-1:], dtype=rhs.dtype)
# Loop over the models and solve for each one. By this point, the
# model set axis is the second of two. Transpose rather than using,
# say, np.moveaxis(array, -1, 0), since it's slightly faster and
# lstsq can't handle >2D arrays anyway. This could perhaps be
# optimized by collecting together models with identical masks
# (eg. those with no rejected points) into one operation, though it
# will still be relatively slow when calling lstsq repeatedly.
for model_rhs, model_lacoef in zip(rhs.T, lacoef.T):
# Cull masked points on both sides of the matrix equation:
good = ~model_rhs.mask
model_lhs = lhs[good]
model_rhs = model_rhs[good][..., np.newaxis]
# Solve for this model:
t_coef, resids, rank, sval = np.linalg.lstsq(model_lhs,
model_rhs, rcond)
model_lacoef[:] = t_coef.T
self.fit_info['residuals'] = resids
self.fit_info['rank'] = rank
self.fit_info['singular_values'] = sval
lacoef = (lacoef.T / scl).T
self.fit_info['params'] = lacoef
# TODO: Only Polynomial models currently have an _order attribute;
# maybe change this to read isinstance(model, PolynomialBase)
if hasattr(model_copy, '_order') and rank != model_copy._order:
warnings.warn("The fit may be poorly conditioned\n",
AstropyUserWarning)
_fitter_to_model_params(model_copy, lacoef.flatten())
return model_copy
class FittingWithOutlierRemoval:
"""
This class combines an outlier removal technique with a fitting procedure.
Basically, given a number of iterations ``niter``, outliers are removed
and fitting is performed for each iteration.
Parameters
----------
fitter : An Astropy fitter
An instance of any Astropy fitter, i.e., LinearLSQFitter,
LevMarLSQFitter, SLSQPLSQFitter, SimplexLSQFitter, JointFitter. For
model set fitting, this must understand masked input data (as
indicated by the fitter class attribute ``supports_masked_input``).
outlier_func : function
A function for outlier removal.
If this accepts an ``axis`` parameter like the `numpy` functions, the
appropriate value will be supplied automatically when fitting model
sets (unless overridden in ``outlier_kwargs``), to find outliers for
each model separately; otherwise, the same filtering must be performed
in a loop over models, which is almost an order of magnitude slower.
niter : int (optional)
Number of iterations.
outlier_kwargs : dict (optional)
Keyword arguments for outlier_func.
"""
def __init__(self, fitter, outlier_func, niter=3, **outlier_kwargs):
self.fitter = fitter
self.outlier_func = outlier_func
self.niter = niter
self.outlier_kwargs = outlier_kwargs
def __str__(self):
return ("Fitter: {0}\nOutlier function: {1}\nNum. of iterations: {2}" +
("\nOutlier func. args.: {3}"))\
.format(self.fitter__class__.__name__,
self.outlier_func.__name__, self.niter,
self.outlier_kwargs)
def __repr__(self):
return ("{0}(fitter: {1}, outlier_func: {2}," +
" niter: {3}, outlier_kwargs: {4})")\
.format(self.__class__.__name__,
self.fitter.__class__.__name__,
self.outlier_func.__name__, self.niter,
self.outlier_kwargs)
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
"""
Parameters
----------
model : `~astropy.modeling.FittableModel`
An analytic model which will be fit to the provided data.
This also contains the initial guess for an optimization
algorithm.
x : array-like
Input coordinates.
y : array-like
Data measurements (1D case) or input coordinates (2D case).
z : array-like (optional)
Data measurements (2D case).
weights : array-like (optional)
Weights to be passed to the fitter.
kwargs : dict (optional)
Keyword arguments to be passed to the fitter.
Returns
-------
fitted_model : `~astropy.modeling.FittableModel`
Fitted model after outlier removal.
mask : `numpy.ndarray`
Boolean mask array, identifying which points were used in the final
fitting iteration (False) and which were found to be outliers or
were masked in the input (True).
"""
# For single models, the data get filtered here at each iteration and
# then passed to the fitter, which is the historical behavior and
# works even for fitters that don't understand masked arrays. For model
# sets, the fitter must be able to filter masked data internally,
# because fitters require a single set of x/y co-ordinates whereas the
# eliminated points can vary between models. To avoid this limitation,
# we could fall back to looping over individual model fits, but it
# would likely be fiddly and involve even more overhead (and the
# non-linear fitters don't work with model sets anyway, as of writing).
if len(model) == 1:
model_set_axis = None
else:
if not hasattr(self.fitter, 'supports_masked_input') or \
self.fitter.supports_masked_input is not True:
raise ValueError("{0} cannot fit model sets with masked "
"values".format(type(self.fitter).__name__))
# Fitters use their input model's model_set_axis to determine how
# their input data are stacked:
model_set_axis = model.model_set_axis
# Construct input co-ordinate tuples for fitters & models that are
# appropriate for the dimensionality being fitted:
if z is None:
coords = x,
data = y
else:
coords = x, y
data = z
# For model sets, construct a numpy-standard "axis" tuple for the
# outlier function, to treat each model separately (if supported):
if model_set_axis is not None:
if model_set_axis < 0:
model_set_axis += data.ndim
if 'axis' not in self.outlier_kwargs: # allow user override
# This also works for False (like model instantiation):
self.outlier_kwargs['axis'] = tuple(
n for n in range(data.ndim) if n != model_set_axis
)
loop = False
# Starting fit, prior to any iteration and masking:
fitted_model = self.fitter(model, x, y, z, weights=weights, **kwargs)
filtered_data = np.ma.masked_array(data)
if filtered_data.mask is np.ma.nomask:
filtered_data.mask = False
filtered_weights = weights
# Perform the iterative fitting:
# TO DO: add a stopping criterion when results aren't changing?
for n in range(self.niter):
# (Re-)evaluate the last model:
model_vals = fitted_model(*coords, model_set_axis=False)
# Determine the outliers:
if not loop:
# Pass axis parameter if outlier_func accepts it, otherwise
# prepare for looping over models:
try:
filtered_data = self.outlier_func(
filtered_data - model_vals, **self.outlier_kwargs
)
# If this happens to catch an error with a parameter other
# than axis, the next attempt will fail accordingly:
except TypeError:
if model_set_axis is None:
raise
else:
self.outlier_kwargs.pop('axis', None)
loop = True
# Construct MaskedArray to hold filtered values:
filtered_data = np.ma.masked_array(
filtered_data,
dtype=np.result_type(filtered_data, model_vals),
copy=True
)
# Make sure the mask is an array, not just nomask:
if filtered_data.mask is np.ma.nomask:
filtered_data.mask = False
# Get views transposed appropriately for iteration
# over the set (handling data & mask separately due to
# NumPy issue #8506):
data_T = np.rollaxis(filtered_data, model_set_axis, 0)
mask_T = np.rollaxis(filtered_data.mask,
model_set_axis, 0)
if loop:
model_vals_T = np.rollaxis(model_vals, model_set_axis, 0)
for row_data, row_mask, row_mod_vals in zip(data_T, mask_T,
model_vals_T):
masked_residuals = self.outlier_func(
row_data - row_mod_vals, **self.outlier_kwargs
)
row_data.data[:] = masked_residuals.data
row_mask[:] = masked_residuals.mask
# Issue speed warning after the fact, so it only shows up when
# the TypeError is genuinely due to the axis argument.
warnings.warn('outlier_func did not accept axis argument; '
'reverted to slow loop over models.',
AstropyUserWarning)
# Recombine newly-masked residuals with model to get masked values:
filtered_data += model_vals
# Re-fit the data after filtering, passing masked/unmasked values
# for single models / sets, respectively:
if model_set_axis is None:
good = ~filtered_data.mask
if weights is not None:
filtered_weights = weights[good]
fitted_model = self.fitter(fitted_model,
*(c[good] for c in coords),
filtered_data.data[good],
weights=filtered_weights, **kwargs)
else:
fitted_model = self.fitter(fitted_model, *coords,
filtered_data,
weights=filtered_weights, **kwargs)
return fitted_model, filtered_data.mask
class LevMarLSQFitter(metaclass=_FitterMeta):
"""
Levenberg-Marquardt algorithm and least squares statistic.
Attributes
----------
fit_info : dict
The `scipy.optimize.leastsq` result for the most recent fit (see
notes).
Notes
-----
The ``fit_info`` dictionary contains the values returned by
`scipy.optimize.leastsq` for the most recent fit, including the values from
the ``infodict`` dictionary it returns. See the `scipy.optimize.leastsq`
documentation for details on the meaning of these values. Note that the
``x`` return value is *not* included (as it is instead the parameter values
of the returned model).
Additionally, one additional element of ``fit_info`` is computed whenever a
model is fit, with the key 'param_cov'. The corresponding value is the
covariance matrix of the parameters as a 2D numpy array. The order of the
matrix elements matches the order of the parameters in the fitted model
(i.e., the same order as ``model.param_names``).
"""
supported_constraints = ['fixed', 'tied', 'bounds']
"""
The constraint types supported by this fitter type.
"""
def __init__(self):
self.fit_info = {'nfev': None,
'fvec': None,
'fjac': None,
'ipvt': None,
'qtf': None,
'message': None,
'ierr': None,
'param_jac': None,
'param_cov': None}
super().__init__()
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
parameters returned by the fitter
args : list
[model, [weights], [input coordinates]]
"""
model = args[0]
weights = args[1]
_fitter_to_model_params(model, fps)
meas = args[-1]
if weights is None:
return np.ravel(model(*args[2: -1]) - meas)
else:
return np.ravel(weights * (model(*args[2: -1]) - meas))
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None,
maxiter=DEFAULT_MAXITER, acc=DEFAULT_ACC,
epsilon=DEFAULT_EPS, estimate_jacobian=False):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array (optional)
input coordinates
weights : array (optional)
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
maxiter : int
maximum number of iterations
acc : float
Relative error desired in the approximate solution
epsilon : float
A suitable step length for the forward-difference
approximation of the Jacobian (if model.fjac=None). If
epsfcn is less than the machine precision, it is
assumed that the relative errors in the functions are
of the order of the machine precision.
estimate_jacobian : bool
If False (default) and if the model has a fit_deriv method,
it will be used. Otherwise the Jacobian will be estimated.
If True, the Jacobian will be estimated in any case.
equivalencies : list or None, optional and keyword-only argument
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
from scipy import optimize
model_copy = _validate_model(model, self.supported_constraints)
farg = (model_copy, weights, ) + _convert_input(x, y, z)
if model_copy.fit_deriv is None or estimate_jacobian:
dfunc = None
else:
dfunc = self._wrap_deriv
init_values, _ = _model_to_fit_params(model_copy)
fitparams, cov_x, dinfo, mess, ierr = optimize.leastsq(
self.objective_function, init_values, args=farg, Dfun=dfunc,
col_deriv=model_copy.col_fit_deriv, maxfev=maxiter, epsfcn=epsilon,
xtol=acc, full_output=True)
_fitter_to_model_params(model_copy, fitparams)
self.fit_info.update(dinfo)
self.fit_info['cov_x'] = cov_x
self.fit_info['message'] = mess
self.fit_info['ierr'] = ierr
if ierr not in [1, 2, 3, 4]:
warnings.warn("The fit may be unsuccessful; check "
"fit_info['message'] for more information.",
AstropyUserWarning)
# now try to compute the true covariance matrix
if (len(y) > len(init_values)) and cov_x is not None:
sum_sqrs = np.sum(self.objective_function(fitparams, *farg)**2)
dof = len(y) - len(init_values)
self.fit_info['param_cov'] = cov_x * sum_sqrs / dof
else:
self.fit_info['param_cov'] = None
return model_copy
@staticmethod
def _wrap_deriv(params, model, weights, x, y, z=None):
"""
Wraps the method calculating the Jacobian of the function to account
for model constraints.
`scipy.optimize.leastsq` expects the function derivative to have the
above signature (parlist, (argtuple)). In order to accommodate model
constraints, instead of using p directly, we set the parameter list in
this function.
"""
if weights is None:
weights = 1.0
if any(model.fixed.values()) or any(model.tied.values()):
# update the parameters with the current values from the fitter
_fitter_to_model_params(model, params)
if z is None:
full = np.array(model.fit_deriv(x, *model.parameters))
if not model.col_fit_deriv:
full_deriv = np.ravel(weights) * full.T
else:
full_deriv = np.ravel(weights) * full
else:
full = np.array([np.ravel(_) for _ in model.fit_deriv(x, y, *model.parameters)])
if not model.col_fit_deriv:
full_deriv = np.ravel(weights) * full.T
else:
full_deriv = np.ravel(weights) * full
pars = [getattr(model, name) for name in model.param_names]
fixed = [par.fixed for par in pars]
tied = [par.tied for par in pars]
tied = list(np.where([par.tied is not False for par in pars],
True, tied))
fix_and_tie = np.logical_or(fixed, tied)
ind = np.logical_not(fix_and_tie)
if not model.col_fit_deriv:
residues = np.asarray(full_deriv[np.nonzero(ind)]).T
else:
residues = full_deriv[np.nonzero(ind)]
return [np.ravel(_) for _ in residues]
else:
if z is None:
return [np.ravel(_) for _ in np.ravel(weights) * np.array(model.fit_deriv(x, *params))]
else:
if not model.col_fit_deriv:
return [np.ravel(_) for _ in (
np.ravel(weights) * np.array(model.fit_deriv(x, y, *params)).T).T]
else:
return [np.ravel(_) for _ in (weights * np.array(model.fit_deriv(x, y, *params)))]
class SLSQPLSQFitter(Fitter):
"""
SLSQP optimization algorithm and least squares statistic.
Raises
------
ModelLinearityError
A linear model is passed to a nonlinear fitter
"""
supported_constraints = SLSQP.supported_constraints
def __init__(self):
super().__init__(optimizer=SLSQP, statistic=leastsquare)
self.fit_info = {}
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array (optional)
input coordinates
weights : array (optional)
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
kwargs : dict
optional keyword arguments to be passed to the optimizer or the statistic
verblevel : int
0-silent
1-print summary upon completion,
2-print summary after each iteration
maxiter : int
maximum number of iterations
epsilon : float
the step size for finite-difference derivative estimates
acc : float
Requested accuracy
equivalencies : list or None, optional and keyword-only argument
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
model_copy = _validate_model(model, self._opt_method.supported_constraints)
farg = _convert_input(x, y, z)
farg = (model_copy, weights, ) + farg
p0, _ = _model_to_fit_params(model_copy)
fitparams, self.fit_info = self._opt_method(
self.objective_function, p0, farg, **kwargs)
_fitter_to_model_params(model_copy, fitparams)
return model_copy
class SimplexLSQFitter(Fitter):
"""
Simplex algorithm and least squares statistic.
Raises
------
ModelLinearityError
A linear model is passed to a nonlinear fitter
"""
supported_constraints = Simplex.supported_constraints
def __init__(self):
super().__init__(optimizer=Simplex, statistic=leastsquare)
self.fit_info = {}
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array (optional)
input coordinates
weights : array (optional)
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
kwargs : dict
optional keyword arguments to be passed to the optimizer or the statistic
maxiter : int
maximum number of iterations
acc : float
Relative error in approximate solution
equivalencies : list or None, optional and keyword-only argument
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
model_copy = _validate_model(model,
self._opt_method.supported_constraints)
farg = _convert_input(x, y, z)
farg = (model_copy, weights, ) + farg
p0, _ = _model_to_fit_params(model_copy)
fitparams, self.fit_info = self._opt_method(
self.objective_function, p0, farg, **kwargs)
_fitter_to_model_params(model_copy, fitparams)
return model_copy
class JointFitter(metaclass=_FitterMeta):
"""
Fit models which share a parameter.
For example, fit two gaussians to two data sets but keep
the FWHM the same.
Parameters
----------
models : list
a list of model instances
jointparameters : list
a list of joint parameters
initvals : list
a list of initial values
"""
def __init__(self, models, jointparameters, initvals):
self.models = list(models)
self.initvals = list(initvals)
self.jointparams = jointparameters
self._verify_input()
self.fitparams = self._model_to_fit_params()
# a list of model.n_inputs
self.modeldims = [m.n_inputs for m in self.models]
# sum all model dimensions
self.ndim = np.sum(self.modeldims)
def _model_to_fit_params(self):
fparams = []
fparams.extend(self.initvals)
for model in self.models:
params = model.parameters.tolist()
joint_params = self.jointparams[model]
param_metrics = model._param_metrics
for param_name in joint_params:
slice_ = param_metrics[param_name]['slice']
del params[slice_]
fparams.extend(params)
return fparams
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
the fitted parameters - result of an one iteration of the
fitting algorithm
args : dict
tuple of measured and input coordinates
args is always passed as a tuple from optimize.leastsq
"""
lstsqargs = list(args)
fitted = []
fitparams = list(fps)
numjp = len(self.initvals)
# make a separate list of the joint fitted parameters
jointfitparams = fitparams[:numjp]
del fitparams[:numjp]
for model in self.models:
joint_params = self.jointparams[model]
margs = lstsqargs[:model.n_inputs + 1]
del lstsqargs[:model.n_inputs + 1]
# separate each model separately fitted parameters
numfp = len(model._parameters) - len(joint_params)
mfparams = fitparams[:numfp]
del fitparams[:numfp]
# recreate the model parameters
mparams = []
param_metrics = model._param_metrics
for param_name in model.param_names:
if param_name in joint_params:
index = joint_params.index(param_name)
# should do this with slices in case the
# parameter is not a number
mparams.extend([jointfitparams[index]])
else:
slice_ = param_metrics[param_name]['slice']
plen = slice_.stop - slice_.start
mparams.extend(mfparams[:plen])
del mfparams[:plen]
modelfit = model.evaluate(margs[:-1], *mparams)
fitted.extend(modelfit - margs[-1])
return np.ravel(fitted)
def _verify_input(self):
if len(self.models) <= 1:
raise TypeError("Expected >1 models, {} is given".format(
len(self.models)))
if len(self.jointparams.keys()) < 2:
raise TypeError("At least two parameters are expected, "
"{} is given".format(len(self.jointparams.keys())))
for j in self.jointparams.keys():
if len(self.jointparams[j]) != len(self.initvals):
raise TypeError("{} parameter(s) provided but {} expected".format(
len(self.jointparams[j]), len(self.initvals)))
def __call__(self, *args):
"""
Fit data to these models keeping some of the parameters common to the
two models.
"""
from scipy import optimize
if len(args) != reduce(lambda x, y: x + 1 + y + 1, self.modeldims):
raise ValueError("Expected {} coordinates in args but {} provided"
.format(reduce(lambda x, y: x + 1 + y + 1,
self.modeldims), len(args)))
self.fitparams[:], _ = optimize.leastsq(self.objective_function,
self.fitparams, args=args)
fparams = self.fitparams[:]
numjp = len(self.initvals)
# make a separate list of the joint fitted parameters
jointfitparams = fparams[:numjp]
del fparams[:numjp]
for model in self.models:
# extract each model's fitted parameters
joint_params = self.jointparams[model]
numfp = len(model._parameters) - len(joint_params)
mfparams = fparams[:numfp]
del fparams[:numfp]
# recreate the model parameters
mparams = []
param_metrics = model._param_metrics
for param_name in model.param_names:
if param_name in joint_params:
index = joint_params.index(param_name)
# should do this with slices in case the parameter
# is not a number
mparams.extend([jointfitparams[index]])
else:
slice_ = param_metrics[param_name]['slice']
plen = slice_.stop - slice_.start
mparams.extend(mfparams[:plen])
del mfparams[:plen]
model.parameters = np.array(mparams)
def _convert_input(x, y, z=None, n_models=1, model_set_axis=0):
"""Convert inputs to float arrays."""
x = np.asanyarray(x, dtype=float)
y = np.asanyarray(y, dtype=float)
if z is not None:
z = np.asanyarray(z, dtype=float)
# For compatibility with how the linear fitter code currently expects to
# work, shift the dependent variable's axes to the expected locations
if n_models > 1:
if z is None:
if y.shape[model_set_axis] != n_models:
raise ValueError(
"Number of data sets (y array is expected to equal "
"the number of parameter sets)")
# For a 1-D model the y coordinate's model-set-axis is expected to
# be last, so that its first dimension is the same length as the x
# coordinates. This is in line with the expectations of
# numpy.linalg.lstsq:
# http://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.lstsq.html
# That is, each model should be represented by a column. TODO:
# Obviously this is a detail of np.linalg.lstsq and should be
# handled specifically by any fitters that use it...
y = np.rollaxis(y, model_set_axis, y.ndim)
else:
# Shape of z excluding model_set_axis
z_shape = z.shape[:model_set_axis] + z.shape[model_set_axis + 1:]
if not (x.shape == y.shape == z_shape):
raise ValueError("x, y and z should have the same shape")
if z is None:
farg = (x, y)
else:
farg = (x, y, z)
return farg
# TODO: These utility functions are really particular to handling
# bounds/tied/fixed constraints for scipy.optimize optimizers that do not
# support them inherently; this needs to be reworked to be clear about this
# distinction (and the fact that these are not necessarily applicable to any
# arbitrary fitter--as evidenced for example by the fact that JointFitter has
# its own versions of these)
# TODO: Most of this code should be entirely rewritten; it should not be as
# inefficient as it is.
def _fitter_to_model_params(model, fps):
"""
Constructs the full list of model parameters from the fitted and
constrained parameters.
"""
_, fit_param_indices = _model_to_fit_params(model)
has_tied = any(model.tied.values())
has_fixed = any(model.fixed.values())
has_bound = any(b != (None, None) for b in model.bounds.values())
if not (has_tied or has_fixed or has_bound):
# We can just assign directly
model.parameters = fps
return
fit_param_indices = set(fit_param_indices)
offset = 0
param_metrics = model._param_metrics
for idx, name in enumerate(model.param_names):
if idx not in fit_param_indices:
continue
slice_ = param_metrics[name]['slice']
shape = param_metrics[name]['shape']
# This is determining which range of fps (the fitted parameters) maps
# to parameters of the model
size = reduce(operator.mul, shape, 1)
values = fps[offset:offset + size]
# Check bounds constraints
if model.bounds[name] != (None, None):
_min, _max = model.bounds[name]
if _min is not None:
values = np.fmax(values, _min)
if _max is not None:
values = np.fmin(values, _max)
model.parameters[slice_] = values
offset += size
# This has to be done in a separate loop due to how tied parameters are
# currently evaluated (the fitted parameters need to actually be *set* on
# the model first, for use in evaluating the "tied" expression--it might be
# better to change this at some point
if has_tied:
for idx, name in enumerate(model.param_names):
if model.tied[name]:
value = model.tied[name](model)
slice_ = param_metrics[name]['slice']
model.parameters[slice_] = value
def _model_to_fit_params(model):
"""
Convert a model instance's parameter array to an array that can be used
with a fitter that doesn't natively support fixed or tied parameters.
In particular, it removes fixed/tied parameters from the parameter
array.
These may be a subset of the model parameters, if some of them are held
constant or tied.
"""
fitparam_indices = list(range(len(model.param_names)))
if any(model.fixed.values()) or any(model.tied.values()):
params = list(model.parameters)
param_metrics = model._param_metrics
for idx, name in list(enumerate(model.param_names))[::-1]:
if model.fixed[name] or model.tied[name]:
slice_ = param_metrics[name]['slice']
del params[slice_]
del fitparam_indices[idx]
return (np.array(params), fitparam_indices)
else:
return (model.parameters, fitparam_indices)
def _validate_constraints(supported_constraints, model):
"""Make sure model constraints are supported by the current fitter."""
message = 'Optimizer cannot handle {0} constraints.'
if (any(model.fixed.values()) and
'fixed' not in supported_constraints):
raise UnsupportedConstraintError(
message.format('fixed parameter'))
if any(model.tied.values()) and 'tied' not in supported_constraints:
raise UnsupportedConstraintError(
message.format('tied parameter'))
if (any(tuple(b) != (None, None) for b in model.bounds.values()) and
'bounds' not in supported_constraints):
raise UnsupportedConstraintError(
message.format('bound parameter'))
if model.eqcons and 'eqcons' not in supported_constraints:
raise UnsupportedConstraintError(message.format('equality'))
if model.ineqcons and 'ineqcons' not in supported_constraints:
raise UnsupportedConstraintError(message.format('inequality'))
def _validate_model(model, supported_constraints):
"""
Check that model and fitter are compatible and return a copy of the model.
"""
if not model.fittable:
raise ValueError("Model does not appear to be fittable.")
if model.linear:
warnings.warn('Model is linear in parameters; '
'consider using linear fitting methods.',
AstropyUserWarning)
elif len(model) != 1:
# for now only single data sets ca be fitted
raise ValueError("Non-linear fitters can only fit "
"one data set at a time.")
_validate_constraints(supported_constraints, model)
model_copy = model.copy()
return model_copy
def populate_entry_points(entry_points):
"""
This injects entry points into the `astropy.modeling.fitting` namespace.
This provides a means of inserting a fitting routine without requirement
of it being merged into astropy's core.
Parameters
----------
entry_points : a list of `~pkg_resources.EntryPoint`
entry_points are objects which encapsulate
importable objects and are defined on the
installation of a package.
Notes
-----
An explanation of entry points can be found `here <http://setuptools.readthedocs.io/en/latest/setuptools.html#dynamic-discovery-of-services-and-plugins>`
"""
for entry_point in entry_points:
name = entry_point.name
try:
entry_point = entry_point.load()
except Exception as e:
# This stops the fitting from choking if an entry_point produces an error.
warnings.warn(AstropyUserWarning('{type} error occurred in entry '
'point {name}.' .format(type=type(e).__name__, name=name)))
else:
if not inspect.isclass(entry_point):
warnings.warn(AstropyUserWarning(
'Modeling entry point {0} expected to be a '
'Class.' .format(name)))
else:
if issubclass(entry_point, Fitter):
name = entry_point.__name__
globals()[name] = entry_point
__all__.append(name)
else:
warnings.warn(AstropyUserWarning(
'Modeling entry point {0} expected to extend '
'astropy.modeling.Fitter' .format(name)))
# this is so fitting doesn't choke if pkg_resources doesn't exist
if HAS_PKG:
populate_entry_points(iter_entry_points(group='astropy.modeling', name=None))
|
251de86c7203efa79f2956c8801d88a4b5774868c02e02ed5d70e5ec1cadae09 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module defines two classes that deal with parameters.
It is unlikely users will need to work with these classes directly, unless they
define their own models.
"""
import functools
import numbers
import types
import operator
import numpy as np
from astropy import units as u
from astropy.units import Quantity, UnitsError
from astropy.utils import isiterable, OrderedDescriptor
from .utils import array_repr_oneline
from .utils import get_inputs_and_params
__all__ = ['Parameter', 'InputParameterError', 'ParameterError']
class ParameterError(Exception):
"""Generic exception class for all exceptions pertaining to Parameters."""
class InputParameterError(ValueError, ParameterError):
"""Used for incorrect input parameter values and definitions."""
class ParameterDefinitionError(ParameterError):
"""Exception in declaration of class-level Parameters."""
def _tofloat(value):
"""Convert a parameter to float or float array"""
if isiterable(value):
try:
value = np.asanyarray(value, dtype=float)
except (TypeError, ValueError):
# catch arrays with strings or user errors like different
# types of parameters in a parameter set
raise InputParameterError(
"Parameter of {0} could not be converted to "
"float".format(type(value)))
elif isinstance(value, Quantity):
# Quantities are fine as is
pass
elif isinstance(value, np.ndarray):
# A scalar/dimensionless array
value = float(value.item())
elif isinstance(value, (numbers.Number, np.number)):
value = float(value)
elif isinstance(value, bool):
raise InputParameterError(
"Expected parameter to be of numerical type, not boolean")
else:
raise InputParameterError(
"Don't know how to convert parameter of {0} to "
"float".format(type(value)))
return value
# Helpers for implementing operator overloading on Parameter
def _binary_arithmetic_operation(op, reflected=False):
@functools.wraps(op)
def wrapper(self, val):
if self._model is None:
return NotImplemented
if self.unit is not None:
self_value = Quantity(self.value, self.unit)
else:
self_value = self.value
if reflected:
return op(val, self_value)
else:
return op(self_value, val)
return wrapper
def _binary_comparison_operation(op):
@functools.wraps(op)
def wrapper(self, val):
if self._model is None:
if op is operator.lt:
# Because OrderedDescriptor uses __lt__ to work, we need to
# call the super method, but only when not bound to an instance
# anyways
return super(self.__class__, self).__lt__(val)
else:
return NotImplemented
if self.unit is not None:
self_value = Quantity(self.value, self.unit)
else:
self_value = self.value
return op(self_value, val)
return wrapper
def _unary_arithmetic_operation(op):
@functools.wraps(op)
def wrapper(self):
if self._model is None:
return NotImplemented
if self.unit is not None:
self_value = Quantity(self.value, self.unit)
else:
self_value = self.value
return op(self_value)
return wrapper
class Parameter(OrderedDescriptor):
"""
Wraps individual parameters.
This class represents a model's parameter (in a somewhat broad sense). It
acts as both a descriptor that can be assigned to a class attribute to
describe the parameters accepted by an individual model (this is called an
"unbound parameter"), or it can act as a proxy for the parameter values on
an individual model instance (called a "bound parameter").
Parameter instances never store the actual value of the parameter directly.
Rather, each instance of a model stores its own parameters parameter values
in an array. A *bound* Parameter simply wraps the value in a Parameter
proxy which provides some additional information about the parameter such
as its constraints. In other words, this is a high-level interface to a
model's adjustable parameter values.
*Unbound* Parameters are not associated with any specific model instance,
and are merely used by model classes to determine the names of their
parameters and other information about each parameter such as their default
values and default constraints.
See :ref:`modeling-parameters` for more details.
Parameters
----------
name : str
parameter name
.. warning::
The fact that `Parameter` accepts ``name`` as an argument is an
implementation detail, and should not be used directly. When
defining a new `Model` class, parameter names are always
automatically defined by the class attribute they're assigned to.
description : str
parameter description
default : float or array
default value to use for this parameter
unit : `~astropy.units.Unit`
if specified, the parameter will be in these units, and when the
parameter is updated in future, it should be set to a
:class:`~astropy.units.Quantity` that has equivalent units.
getter : callable
a function that wraps the raw (internal) value of the parameter
when returning the value through the parameter proxy (eg. a
parameter may be stored internally as radians but returned to the
user as degrees)
setter : callable
a function that wraps any values assigned to this parameter; should
be the inverse of getter
fixed : bool
if True the parameter is not varied during fitting
tied : callable or False
if callable is supplied it provides a way to link the value of this
parameter to another parameter (or some other arbitrary function)
min : float
the lower bound of a parameter
max : float
the upper bound of a parameter
bounds : tuple
specify min and max as a single tuple--bounds may not be specified
simultaneously with min or max
model : `Model` instance
binds the the `Parameter` instance to a specific model upon
instantiation; this should only be used internally for creating bound
Parameters, and should not be used for `Parameter` descriptors defined
as class attributes
"""
constraints = ('fixed', 'tied', 'bounds', 'prior', 'posterior')
"""
Types of constraints a parameter can have. Excludes 'min' and 'max'
which are just aliases for the first and second elements of the 'bounds'
constraint (which is represented as a 2-tuple). 'prior' and 'posterior'
are available for use by user fitters but are not used by any built-in
fitters as of this writing.
"""
# Settings for OrderedDescriptor
_class_attribute_ = '_parameters_'
_name_attribute_ = '_name'
def __init__(self, name='', description='', default=None, unit=None,
getter=None, setter=None, fixed=False, tied=False, min=None,
max=None, bounds=None, prior=None, posterior=None, model=None):
super().__init__()
self._name = name
self.__doc__ = self._description = description.strip()
# We only need to perform this check on unbound parameters
if model is None and isinstance(default, Quantity):
if unit is not None and not unit.is_equivalent(default.unit):
raise ParameterDefinitionError(
"parameter default {0} does not have units equivalent to "
"the required unit {1}".format(default, unit))
unit = default.unit
default = default.value
self._default = default
self._unit = unit
# NOTE: These are *default* constraints--on model instances constraints
# are taken from the model if set, otherwise the defaults set here are
# used
if bounds is not None:
if min is not None or max is not None:
raise ValueError(
'bounds may not be specified simultaneously with min or '
'or max when instantiating Parameter {0}'.format(name))
else:
bounds = (min, max)
self._fixed = fixed
self._tied = tied
self._bounds = bounds
self._posterior = posterior
self._prior = prior
self._order = None
self._model = None
# The getter/setter functions take one or two arguments: The first
# argument is always the value itself (either the value returned or the
# value being set). The second argument is optional, but if present
# will contain a reference to the model object tied to a parameter (if
# it exists)
self._getter = self._create_value_wrapper(getter, None)
self._setter = self._create_value_wrapper(setter, None)
self._validator = None
# Only Parameters declared as class-level descriptors require
# and ordering ID
if model is not None:
self._bind(model)
def __get__(self, obj, objtype):
if obj is None:
return self
# All of the Parameter.__init__ work should already have been done for
# the class-level descriptor; we can skip that stuff and just copy the
# existing __dict__ and then bind to the model instance
parameter = self.__class__.__new__(self.__class__)
parameter.__dict__.update(self.__dict__)
parameter._bind(obj)
return parameter
def __set__(self, obj, value):
value = _tofloat(value)
# Check that units are compatible with default or units already set
param_unit = obj._param_metrics[self.name]['orig_unit']
if param_unit is None:
if isinstance(value, Quantity):
obj._param_metrics[self.name]['orig_unit'] = value.unit
else:
if not isinstance(value, Quantity):
raise UnitsError("The '{0}' parameter should be given as a "
"Quantity because it was originally initialized "
"as a Quantity".format(self._name))
else:
# We need to make sure we update the unit because the units are
# then dropped from the value below.
obj._param_metrics[self.name]['orig_unit'] = value.unit
# Call the validator before the setter
if self._validator is not None:
self._validator(obj, value)
if self._setter is not None:
setter = self._create_value_wrapper(self._setter, obj)
if self.unit is not None:
value = setter(value * self.unit).value
else:
value = setter(value)
self._set_model_value(obj, value)
def __len__(self):
if self._model is None:
raise TypeError('Parameter definitions do not have a length.')
return len(self._model)
def __getitem__(self, key):
value = self.value
if len(self._model) == 1:
# Wrap the value in a list so that getitem can work for sensible
# indices like [0] and [-1]
value = [value]
return value[key]
def __setitem__(self, key, value):
# Get the existing value and check whether it even makes sense to
# apply this index
oldvalue = self.value
n_models = len(self._model)
# if n_models == 1:
# # Convert the single-dimension value to a list to allow some slices
# # that would be compatible with a length-1 array like [:] and [0:]
# oldvalue = [oldvalue]
if isinstance(key, slice):
if len(oldvalue[key]) == 0:
raise InputParameterError(
"Slice assignment outside the parameter dimensions for "
"'{0}'".format(self.name))
for idx, val in zip(range(*key.indices(len(self))), value):
self.__setitem__(idx, val)
else:
try:
oldvalue[key] = value
except IndexError:
raise InputParameterError(
"Input dimension {0} invalid for {1!r} parameter with "
"dimension {2}".format(key, self.name, n_models))
def __repr__(self):
args = "'{0}'".format(self._name)
if self._model is None:
if self._default is not None:
args += ', default={0}'.format(self._default)
else:
args += ', value={0}'.format(self.value)
if self.unit is not None:
args += ', unit={0}'.format(self.unit)
for cons in self.constraints:
val = getattr(self, cons)
if val not in (None, False, (None, None)):
# Maybe non-obvious, but False is the default for the fixed and
# tied constraints
args += ', {0}={1}'.format(cons, val)
return "{0}({1})".format(self.__class__.__name__, args)
@property
def name(self):
"""Parameter name"""
return self._name
@property
def default(self):
"""Parameter default value"""
if (self._model is None or self._default is None or
len(self._model) == 1):
return self._default
# Otherwise the model we are providing for has more than one parameter
# sets, so ensure that the default is repeated the correct number of
# times along the model_set_axis if necessary
n_models = len(self._model)
model_set_axis = self._model._model_set_axis
default = self._default
new_shape = (np.shape(default) +
(1,) * (model_set_axis + 1 - np.ndim(default)))
default = np.reshape(default, new_shape)
# Now roll the new axis into its correct position if necessary
default = np.rollaxis(default, -1, model_set_axis)
# Finally repeat the last newly-added axis to match n_models
default = np.repeat(default, n_models, axis=-1)
# NOTE: Regardless of what order the last two steps are performed in,
# the resulting array will *look* the same, but only if the repeat is
# performed last will it result in a *contiguous* array
return default
@property
def value(self):
"""The unadorned value proxied by this parameter."""
if self._model is None:
raise AttributeError('Parameter definition does not have a value')
value = self._get_model_value(self._model)
if self._getter is None:
return value
else:
raw_unit = self._model._param_metrics[self.name]['raw_unit']
orig_unit = self._model._param_metrics[self.name]['orig_unit']
if raw_unit is not None:
return np.float64(self._getter(value, raw_unit, orig_unit).value)
else:
return self._getter(value)
@value.setter
def value(self, value):
if self._model is None:
raise AttributeError('Cannot set a value on a parameter '
'definition')
if self._setter is not None:
val = self._setter(value)
if isinstance(value, Quantity):
raise TypeError("The .value property on parameters should be set to "
"unitless values, not Quantity objects. To set a "
"parameter to a quantity simply set the parameter "
"directly without using .value")
self._set_model_value(self._model, value)
@property
def unit(self):
"""
The unit attached to this parameter, if any.
On unbound parameters (i.e. parameters accessed through the
model class, rather than a model instance) this is the required/
default unit for the parameter.
"""
if self._model is None:
return self._unit
else:
# orig_unit may be undefined early on in model instantiation
return self._model._param_metrics[self.name].get('orig_unit',
self._unit)
@unit.setter
def unit(self, unit):
self._set_unit(unit)
def _set_unit(self, unit, force=False):
if self._model is None:
raise AttributeError('Cannot set unit on a parameter definition')
orig_unit = self._model._param_metrics[self.name]['orig_unit']
if force:
self._model._param_metrics[self.name]['orig_unit'] = unit
else:
if orig_unit is None:
raise ValueError('Cannot attach units to parameters that were '
'not initially specified with units')
else:
raise ValueError('Cannot change the unit attribute directly, '
'instead change the parameter to a new quantity')
@property
def quantity(self):
"""
This parameter, as a :class:`~astropy.units.Quantity` instance.
"""
if self.unit is not None:
return self.value * self.unit
else:
return None
@quantity.setter
def quantity(self, quantity):
if not isinstance(quantity, Quantity):
raise TypeError("The .quantity attribute should be set to a Quantity object")
self.value = quantity.value
self._set_unit(quantity.unit, force=True)
@property
def shape(self):
"""The shape of this parameter's value array."""
if self._model is None:
raise AttributeError('Parameter definition does not have a '
'shape.')
shape = self._model._param_metrics[self._name]['shape']
if len(self._model) > 1:
# If we are dealing with a model *set* the shape is the shape of
# the parameter within a single model in the set
model_axis = self._model._model_set_axis
if model_axis < 0:
model_axis = len(shape) + model_axis
shape = shape[:model_axis] + shape[model_axis + 1:]
else:
# When a model set is initialized, the dimension of the parameters
# is increased by model_set_axis+1. To find the shape of a parameter
# within a single model the extra dimensions need to be removed first.
# The following dimension shows the number of models.
# The rest of the shape tuple represents the shape of the parameter
# in a single model.
shape = shape[model_axis + 1:]
return shape
@property
def size(self):
"""The size of this parameter's value array."""
# TODO: Rather than using self.value this could be determined from the
# size of the parameter in _param_metrics
return np.size(self.value)
@property
def prior(self):
if self._model is not None:
prior = self._model._constraints['prior']
return prior.get(self._name, self._prior)
else:
return self._prior
@prior.setter
def prior(self, val):
if self._model is not None:
self._model._constraints['prior'][self._name] = val
else:
raise AttributeError("can't set attribute 'prior' on Parameter "
"definition")
@property
def posterior(self):
if self._model is not None:
posterior = self._model._constraints['posterior']
return posterior.get(self._name, self._posterior)
else:
return self._posterior
@posterior.setter
def posterior(self, val):
if self._model is not None:
self._model._constraints['posterior'][self._name] = val
else:
raise AttributeError("can't set attribute 'posterior' on Parameter "
"definition")
@property
def fixed(self):
"""
Boolean indicating if the parameter is kept fixed during fitting.
"""
if self._model is not None:
fixed = self._model._constraints['fixed']
return fixed.get(self._name, self._fixed)
else:
return self._fixed
@fixed.setter
def fixed(self, value):
"""Fix a parameter"""
if self._model is not None:
if not isinstance(value, bool):
raise TypeError("Fixed can be True or False")
self._model._constraints['fixed'][self._name] = value
else:
raise AttributeError("can't set attribute 'fixed' on Parameter "
"definition")
@property
def tied(self):
"""
Indicates that this parameter is linked to another one.
A callable which provides the relationship of the two parameters.
"""
if self._model is not None:
tied = self._model._constraints['tied']
return tied.get(self._name, self._tied)
else:
return self._tied
@tied.setter
def tied(self, value):
"""Tie a parameter"""
if self._model is not None:
if not callable(value) and value not in (False, None):
raise TypeError("Tied must be a callable")
self._model._constraints['tied'][self._name] = value
else:
raise AttributeError("can't set attribute 'tied' on Parameter "
"definition")
@property
def bounds(self):
"""The minimum and maximum values of a parameter as a tuple"""
if self._model is not None:
bounds = self._model._constraints['bounds']
return bounds.get(self._name, self._bounds)
else:
return self._bounds
@bounds.setter
def bounds(self, value):
"""Set the minimum and maximum values of a parameter from a tuple"""
if self._model is not None:
_min, _max = value
if _min is not None:
if not isinstance(_min, numbers.Number):
raise TypeError("Min value must be a number")
_min = float(_min)
if _max is not None:
if not isinstance(_max, numbers.Number):
raise TypeError("Max value must be a number")
_max = float(_max)
bounds = self._model._constraints.setdefault('bounds', {})
self._model._constraints['bounds'][self._name] = (_min, _max)
else:
raise AttributeError("can't set attribute 'bounds' on Parameter "
"definition")
@property
def min(self):
"""A value used as a lower bound when fitting a parameter"""
return self.bounds[0]
@min.setter
def min(self, value):
"""Set a minimum value of a parameter"""
if self._model is not None:
self.bounds = (value, self.max)
else:
raise AttributeError("can't set attribute 'min' on Parameter "
"definition")
@property
def max(self):
"""A value used as an upper bound when fitting a parameter"""
return self.bounds[1]
@max.setter
def max(self, value):
"""Set a maximum value of a parameter."""
if self._model is not None:
self.bounds = (self.min, value)
else:
raise AttributeError("can't set attribute 'max' on Parameter "
"definition")
@property
def validator(self):
"""
Used as a decorator to set the validator method for a `Parameter`.
The validator method validates any value set for that parameter.
It takes two arguments--``self``, which refers to the `Model`
instance (remember, this is a method defined on a `Model`), and
the value being set for this parameter. The validator method's
return value is ignored, but it may raise an exception if the value
set on the parameter is invalid (typically an `InputParameterError`
should be raised, though this is not currently a requirement).
The decorator *returns* the `Parameter` instance that the validator
is set on, so the underlying validator method should have the same
name as the `Parameter` itself (think of this as analogous to
``property.setter``). For example::
>>> from astropy.modeling import Fittable1DModel
>>> class TestModel(Fittable1DModel):
... a = Parameter()
... b = Parameter()
...
... @a.validator
... def a(self, value):
... # Remember, the value can be an array
... if np.any(value < self.b):
... raise InputParameterError(
... "parameter 'a' must be greater than or equal "
... "to parameter 'b'")
...
... @staticmethod
... def evaluate(x, a, b):
... return a * x + b
...
>>> m = TestModel(a=1, b=2) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InputParameterError: parameter 'a' must be greater than or equal
to parameter 'b'
>>> m = TestModel(a=2, b=2)
>>> m.a = 0 # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InputParameterError: parameter 'a' must be greater than or equal
to parameter 'b'
On bound parameters this property returns the validator method itself,
as a bound method on the `Parameter`. This is not often as useful, but
it allows validating a parameter value without setting that parameter::
>>> m.a.validator(42) # Passes
>>> m.a.validator(-42) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InputParameterError: parameter 'a' must be greater than or equal
to parameter 'b'
"""
if self._model is None:
# For unbound parameters return the validator setter
def validator(func, self=self):
self._validator = func
return self
return validator
else:
# Return the validator method, bound to the Parameter instance with
# the name "validator"
def validator(self, value):
if self._validator is not None:
return self._validator(self._model, value)
return types.MethodType(validator, self)
def copy(self, name=None, description=None, default=None, unit=None,
getter=None, setter=None, fixed=False, tied=False, min=None,
max=None, bounds=None, prior=None, posterior=None):
"""
Make a copy of this `Parameter`, overriding any of its core attributes
in the process (or an exact copy).
The arguments to this method are the same as those for the `Parameter`
initializer. This simply returns a new `Parameter` instance with any
or all of the attributes overridden, and so returns the equivalent of:
.. code:: python
Parameter(self.name, self.description, ...)
"""
kwargs = locals().copy()
del kwargs['self']
for key, value in kwargs.items():
if value is None:
# Annoying special cases for min/max where are just aliases for
# the components of bounds
if key in ('min', 'max'):
continue
else:
if hasattr(self, key):
value = getattr(self, key)
elif hasattr(self, '_' + key):
value = getattr(self, '_' + key)
kwargs[key] = value
return self.__class__(**kwargs)
@property
def _raw_value(self):
"""
Currently for internal use only.
Like Parameter.value but does not pass the result through
Parameter.getter. By design this should only be used from bound
parameters.
This will probably be removed are retweaked at some point in the
process of rethinking how parameter values are stored/updated.
"""
return self._get_model_value(self._model)
def _bind(self, model):
"""
Bind the `Parameter` to a specific `Model` instance; don't use this
directly on *unbound* parameters, i.e. `Parameter` descriptors that
are defined in class bodies.
"""
self._model = model
self._getter = self._create_value_wrapper(self._getter, model)
self._setter = self._create_value_wrapper(self._setter, model)
# TODO: These methods should probably be moved to the Model class, since it
# has entirely to do with details of how the model stores parameters.
# Parameter should just act as a user front-end to this.
def _get_model_value(self, model):
"""
This method implements how to retrieve the value of this parameter from
the model instance. See also `Parameter._set_model_value`.
These methods take an explicit model argument rather than using
self._model so that they can be used from unbound `Parameter`
instances.
"""
if not hasattr(model, '_parameters'):
# The _parameters array hasn't been initialized yet; just translate
# this to an AttributeError
raise AttributeError(self._name)
# Use the _param_metrics to extract the parameter value from the
# _parameters array
param_metrics = model._param_metrics[self._name]
param_slice = param_metrics['slice']
param_shape = param_metrics['shape']
value = model._parameters[param_slice]
if param_shape:
value = value.reshape(param_shape)
else:
value = value[0]
return value
def _set_model_value(self, model, value):
"""
This method implements how to store the value of a parameter on the
model instance.
Currently there is only one storage mechanism (via the ._parameters
array) but other mechanisms may be desireable, in which case really the
model class itself should dictate this and *not* `Parameter` itself.
"""
def _update_parameter_value(model, name, value):
# TODO: Maybe handle exception on invalid input shape
param_metrics = model._param_metrics[name]
param_slice = param_metrics['slice']
param_shape = param_metrics['shape']
param_size = np.prod(param_shape)
if np.size(value) != param_size:
raise InputParameterError(
"Input value for parameter {0!r} does not have {1} elements "
"as the current value does".format(name, param_size))
model._parameters[param_slice] = np.array(value).ravel()
_update_parameter_value(model, self._name, value)
if hasattr(model, "_param_map"):
submodel_ind, param_name = model._param_map[self._name]
if hasattr(model._submodels[submodel_ind], "_param_metrics"):
_update_parameter_value(model._submodels[submodel_ind], param_name, value)
@staticmethod
def _create_value_wrapper(wrapper, model):
"""Wraps a getter/setter function to support optionally passing in
a reference to the model object as the second argument.
If a model is tied to this parameter and its getter/setter supports
a second argument then this creates a partial function using the model
instance as the second argument.
"""
if isinstance(wrapper, np.ufunc):
if wrapper.nin != 1:
raise TypeError("A numpy.ufunc used for Parameter "
"getter/setter may only take one input "
"argument")
elif wrapper is None:
# Just allow non-wrappers to fall through silently, for convenience
return None
else:
inputs, params = get_inputs_and_params(wrapper)
nargs = len(inputs)
if nargs == 1:
pass
elif nargs == 2:
if model is not None:
# Don't make a partial function unless we're tied to a
# specific model instance
model_arg = inputs[1].name
wrapper = functools.partial(wrapper, **{model_arg: model})
else:
raise TypeError("Parameter getter/setter must be a function "
"of either one or two arguments")
return wrapper
def __array__(self, dtype=None):
# Make np.asarray(self) work a little more straightforwardly
arr = np.asarray(self.value, dtype=dtype)
if self.unit is not None:
arr = Quantity(arr, self.unit, copy=False)
return arr
def __bool__(self):
if self._model is None:
return True
else:
return bool(self.value)
__add__ = _binary_arithmetic_operation(operator.add)
__radd__ = _binary_arithmetic_operation(operator.add, reflected=True)
__sub__ = _binary_arithmetic_operation(operator.sub)
__rsub__ = _binary_arithmetic_operation(operator.sub, reflected=True)
__mul__ = _binary_arithmetic_operation(operator.mul)
__rmul__ = _binary_arithmetic_operation(operator.mul, reflected=True)
__pow__ = _binary_arithmetic_operation(operator.pow)
__rpow__ = _binary_arithmetic_operation(operator.pow, reflected=True)
__div__ = _binary_arithmetic_operation(operator.truediv)
__rdiv__ = _binary_arithmetic_operation(operator.truediv, reflected=True)
__truediv__ = _binary_arithmetic_operation(operator.truediv)
__rtruediv__ = _binary_arithmetic_operation(operator.truediv, reflected=True)
__eq__ = _binary_comparison_operation(operator.eq)
__ne__ = _binary_comparison_operation(operator.ne)
__lt__ = _binary_comparison_operation(operator.lt)
__gt__ = _binary_comparison_operation(operator.gt)
__le__ = _binary_comparison_operation(operator.le)
__ge__ = _binary_comparison_operation(operator.ge)
__neg__ = _unary_arithmetic_operation(operator.neg)
__abs__ = _unary_arithmetic_operation(operator.abs)
def param_repr_oneline(param):
"""
Like array_repr_oneline but works on `Parameter` objects and supports
rendering parameters with units like quantities.
"""
out = array_repr_oneline(param.value)
if param.unit is not None:
out = '{0} {1!s}'.format(out, param.unit)
return out
|
4e5be679b6ccd91a0051256b23274511ef563aa1e90cea900e62782cd007f438 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module defines base classes for all models. The base class of all
models is `~astropy.modeling.Model`. `~astropy.modeling.FittableModel` is
the base class for all fittable models. Fittable models can be linear or
nonlinear in a regression analysis sense.
All models provide a `__call__` method which performs the transformation in
a purely mathematical way, i.e. the models are unitless. Model instances can
represent either a single model, or a "model set" representing multiple copies
of the same type of model, but with potentially different values of the
parameters in each model making up the set.
"""
import abc
import copy
import copyreg
import inspect
import functools
import operator
import types
import warnings
from collections import defaultdict, OrderedDict
from contextlib import suppress
from inspect import signature
from itertools import chain, islice
import numpy as np
from astropy.utils import indent, metadata
from astropy.table import Table
from astropy.units import Quantity, UnitsError, dimensionless_unscaled
from astropy.units.utils import quantity_asanyarray
from astropy.utils import (sharedmethod, find_current_module,
InheritDocstrings, OrderedDescriptorContainer,
check_broadcast, IncompatibleShapeError, isiterable)
from astropy.utils.codegen import make_function_with_signature
from astropy.utils.exceptions import AstropyDeprecationWarning
from .utils import (combine_labels, make_binary_operator_eval,
ExpressionTree, AliasDict, get_inputs_and_params,
_BoundingBox, _combine_equivalency_dict)
from astropy.nddata.utils import add_array, extract_array
from .parameters import Parameter, InputParameterError, param_repr_oneline
__all__ = ['Model', 'FittableModel', 'Fittable1DModel', 'Fittable2DModel',
'custom_model', 'ModelDefinitionError']
class ModelDefinitionError(TypeError):
"""Used for incorrect models definitions"""
def _model_oper(oper, **kwargs):
"""
Returns a function that evaluates a given Python arithmetic operator
between two models. The operator should be given as a string, like ``'+'``
or ``'**'``.
Any additional keyword arguments passed in are passed to
`_CompoundModelMeta._from_operator`.
"""
# Note: Originally this used functools.partial, but that won't work when
# used in the class definition of _CompoundModelMeta since
# _CompoundModelMeta has not been defined yet.
def _opfunc(left, right):
# Deprecation is for https://github.com/astropy/astropy/issues/8234
if not (isinstance(left, Model) and isinstance(right, Model)):
warnings.warn(
'Composition of model classes will be removed in 4.0'
'(but composition of model instances is not affected)',
AstropyDeprecationWarning)
# Perform an arithmetic operation on two models.
return _CompoundModelMeta._from_operator(oper, left, right, **kwargs)
return _opfunc
class _ModelMeta(OrderedDescriptorContainer, InheritDocstrings, abc.ABCMeta):
"""
Metaclass for Model.
Currently just handles auto-generating the param_names list based on
Parameter descriptors declared at the class-level of Model subclasses.
"""
_is_dynamic = False
"""
This flag signifies whether this class was created in the "normal" way,
with a class statement in the body of a module, as opposed to a call to
`type` or some other metaclass constructor, such that the resulting class
does not belong to a specific module. This is important for pickling of
dynamic classes.
This flag is always forced to False for new classes, so code that creates
dynamic classes should manually set it to True on those classes when
creating them.
"""
# Default empty dict for _parameters_, which will be empty on model
# classes that don't have any Parameters
_parameters_ = OrderedDict()
def __new__(mcls, name, bases, members):
# See the docstring for _is_dynamic above
if '_is_dynamic' not in members:
members['_is_dynamic'] = mcls._is_dynamic
return super().__new__(mcls, name, bases, members)
def __init__(cls, name, bases, members):
# Make sure OrderedDescriptorContainer gets to run before doing
# anything else
super().__init__(name, bases, members)
if cls._parameters_:
if hasattr(cls, '_param_names'):
# Slight kludge to support compound models, where
# cls.param_names is a property; could be improved with a
# little refactoring but fine for now
cls._param_names = tuple(cls._parameters_)
else:
cls.param_names = tuple(cls._parameters_)
cls._create_inverse_property(members)
cls._create_bounding_box_property(members)
cls._handle_special_methods(members)
def __repr__(cls):
"""
Custom repr for Model subclasses.
"""
return cls._format_cls_repr()
def _repr_pretty_(cls, p, cycle):
"""
Repr for IPython's pretty printer.
By default IPython "pretty prints" classes, so we need to implement
this so that IPython displays the custom repr for Models.
"""
p.text(repr(cls))
def __reduce__(cls):
if not cls._is_dynamic:
# Just return a string specifying where the class can be imported
# from
return cls.__name__
else:
members = dict(cls.__dict__)
# Delete any ABC-related attributes--these will be restored when
# the class is reconstructed:
for key in list(members):
if key.startswith('_abc_'):
del members[key]
# Delete custom __init__ and __call__ if they exist:
for key in ('__init__', '__call__'):
if key in members:
del members[key]
return (type(cls), (cls.__name__, cls.__bases__, members))
@property
def name(cls):
"""
The name of this model class--equivalent to ``cls.__name__``.
This attribute is provided for symmetry with the `Model.name` attribute
of model instances.
"""
return cls.__name__
@property
def n_inputs(cls):
return len(cls.inputs)
@property
def n_outputs(cls):
return len(cls.outputs)
@property
def _is_concrete(cls):
"""
A class-level property that determines whether the class is a concrete
implementation of a Model--i.e. it is not some abstract base class or
internal implementation detail (i.e. begins with '_').
"""
return not (cls.__name__.startswith('_') or inspect.isabstract(cls))
def rename(cls, name):
"""
Creates a copy of this model class with a new name.
The new class is technically a subclass of the original class, so that
instance and type checks will still work. For example::
>>> from astropy.modeling.models import Rotation2D
>>> SkyRotation = Rotation2D.rename('SkyRotation')
>>> SkyRotation
<class '__main__.SkyRotation'>
Name: SkyRotation (Rotation2D)
Inputs: ('x', 'y')
Outputs: ('x', 'y')
Fittable parameters: ('angle',)
>>> issubclass(SkyRotation, Rotation2D)
True
>>> r = SkyRotation(90)
>>> isinstance(r, Rotation2D)
True
"""
mod = find_current_module(2)
if mod:
modname = mod.__name__
else:
modname = '__main__'
new_cls = type(name, (cls,), {})
new_cls.__module__ = modname
if hasattr(cls, '__qualname__'):
if new_cls.__module__ == '__main__':
# __main__ is not added to a class's qualified name
new_cls.__qualname__ = name
else:
new_cls.__qualname__ = '{0}.{1}'.format(modname, name)
return new_cls
def _create_inverse_property(cls, members):
inverse = members.get('inverse')
if inverse is None or cls.__bases__[0] is object:
# The latter clause is the prevent the below code from running on
# the Model base class, which implements the default getter and
# setter for .inverse
return
if isinstance(inverse, property):
# We allow the @property decorator to be omitted entirely from
# the class definition, though its use should be encouraged for
# clarity
inverse = inverse.fget
# Store the inverse getter internally, then delete the given .inverse
# attribute so that cls.inverse resolves to Model.inverse instead
cls._inverse = inverse
del cls.inverse
def _create_bounding_box_property(cls, members):
"""
Takes any bounding_box defined on a concrete Model subclass (either
as a fixed tuple or a property or method) and wraps it in the generic
getter/setter interface for the bounding_box attribute.
"""
# TODO: Much of this is verbatim from _create_inverse_property--I feel
# like there could be a way to generify properties that work this way,
# but for the time being that would probably only confuse things more.
bounding_box = members.get('bounding_box')
if bounding_box is None or cls.__bases__[0] is object:
return
if isinstance(bounding_box, property):
bounding_box = bounding_box.fget
if not callable(bounding_box):
# See if it's a hard-coded bounding_box (as a sequence) and
# normalize it
try:
bounding_box = _BoundingBox.validate(cls, bounding_box)
except ValueError as exc:
raise ModelDefinitionError(exc.args[0])
else:
sig = signature(bounding_box)
# May be a method that only takes 'self' as an argument (like a
# property, but the @property decorator was forgotten)
# TODO: Maybe warn in the above case?
#
# However, if the method takes additional arguments then this is a
# parameterized bounding box and should be callable
if len(sig.parameters) > 1:
bounding_box = \
cls._create_bounding_box_subclass(bounding_box, sig)
# See the Model.bounding_box getter definition for how this attribute
# is used
cls._bounding_box = bounding_box
del cls.bounding_box
def _create_bounding_box_subclass(cls, func, sig):
"""
For Models that take optional arguments for defining their bounding
box, we create a subclass of _BoundingBox with a ``__call__`` method
that supports those additional arguments.
Takes the function's Signature as an argument since that is already
computed in _create_bounding_box_property, so no need to duplicate that
effort.
"""
# TODO: Might be convenient if calling the bounding box also
# automatically sets the _user_bounding_box. So that
#
# >>> model.bounding_box(arg=1)
#
# in addition to returning the computed bbox, also sets it, so that
# it's a shortcut for
#
# >>> model.bounding_box = model.bounding_box(arg=1)
#
# Not sure if that would be non-obvious / confusing though...
def __call__(self, **kwargs):
return func(self._model, **kwargs)
kwargs = []
for idx, param in enumerate(sig.parameters.values()):
if idx == 0:
# Presumed to be a 'self' argument
continue
if param.default is param.empty:
raise ModelDefinitionError(
'The bounding_box method for {0} is not correctly '
'defined: If defined as a method all arguments to that '
'method (besides self) must be keyword arguments with '
'default values that can be used to compute a default '
'bounding box.'.format(cls.name))
kwargs.append((param.name, param.default))
__call__.__signature__ = sig
return type(str('_{0}BoundingBox'.format(cls.name)), (_BoundingBox,),
{'__call__': __call__})
def _handle_special_methods(cls, members):
# Handle init creation from inputs
def update_wrapper(wrapper, cls):
# Set up the new __call__'s metadata attributes as though it were
# manually defined in the class definition
# A bit like functools.update_wrapper but uses the class instead of
# the wrapped function
wrapper.__module__ = cls.__module__
wrapper.__doc__ = getattr(cls, wrapper.__name__).__doc__
if hasattr(cls, '__qualname__'):
wrapper.__qualname__ = '{0}.{1}'.format(
cls.__qualname__, wrapper.__name__)
if ('__call__' not in members and 'inputs' in members and
isinstance(members['inputs'], tuple)):
# Don't create a custom __call__ for classes that already have one
# explicitly defined (this includes the Model base class, and any
# other classes that manually override __call__
def __call__(self, *inputs, **kwargs):
"""Evaluate this model on the supplied inputs."""
return super(cls, self).__call__(*inputs, **kwargs)
# When called, models can take two optional keyword arguments:
#
# * model_set_axis, which indicates (for multi-dimensional input)
# which axis is used to indicate different models
#
# * equivalencies, a dictionary of equivalencies to be applied to
# the input values, where each key should correspond to one of
# the inputs.
#
# The following code creates the __call__ function with these
# two keyword arguments.
inputs = members['inputs']
args = ('self',) + inputs
new_call = make_function_with_signature(
__call__, args, [('model_set_axis', None),
('with_bounding_box', False),
('fill_value', np.nan),
('equivalencies', None)])
# The following makes it look like __call__ was defined in the class
update_wrapper(new_call, cls)
cls.__call__ = new_call
if ('__init__' not in members and not inspect.isabstract(cls) and
cls._parameters_):
# If *all* the parameters have default values we can make them
# keyword arguments; otherwise they must all be positional arguments
if all(p.default is not None for p in cls._parameters_.values()):
args = ('self',)
kwargs = []
for param_name in cls.param_names:
default = cls._parameters_[param_name].default
unit = cls._parameters_[param_name].unit
# If the unit was specified in the parameter but the default
# is not a Quantity, attach the unit to the default.
if unit is not None:
default = Quantity(default, unit, copy=False)
kwargs.append((param_name, default))
else:
args = ('self',) + cls.param_names
kwargs = {}
def __init__(self, *params, **kwargs):
return super(cls, self).__init__(*params, **kwargs)
new_init = make_function_with_signature(
__init__, args, kwargs, varkwargs='kwargs')
update_wrapper(new_init, cls)
cls.__init__ = new_init
# *** Arithmetic operators for creating compound models ***
__add__ = _model_oper('+')
__sub__ = _model_oper('-')
__mul__ = _model_oper('*')
__truediv__ = _model_oper('/')
__pow__ = _model_oper('**')
__or__ = _model_oper('|')
__and__ = _model_oper('&')
# *** Other utilities ***
def _format_cls_repr(cls, keywords=[]):
"""
Internal implementation of ``__repr__``.
This is separated out for ease of use by subclasses that wish to
override the default ``__repr__`` while keeping the same basic
formatting.
"""
# For the sake of familiarity start the output with the standard class
# __repr__
parts = [super().__repr__()]
if not cls._is_concrete:
return parts[0]
def format_inheritance(cls):
bases = []
for base in cls.mro()[1:]:
if not issubclass(base, Model):
continue
elif (inspect.isabstract(base) or
base.__name__.startswith('_')):
break
bases.append(base.name)
if bases:
return '{0} ({1})'.format(cls.name, ' -> '.join(bases))
else:
return cls.name
try:
default_keywords = [
('Name', format_inheritance(cls)),
('Inputs', cls.inputs),
('Outputs', cls.outputs),
]
if cls.param_names:
default_keywords.append(('Fittable parameters',
cls.param_names))
for keyword, value in default_keywords + keywords:
if value is not None:
parts.append('{0}: {1}'.format(keyword, value))
return '\n'.join(parts)
except Exception:
# If any of the above formatting fails fall back on the basic repr
# (this is particularly useful in debugging)
return parts[0]
class Model(metaclass=_ModelMeta):
"""
Base class for all models.
This is an abstract class and should not be instantiated directly.
This class sets the constraints and other properties for all individual
parameters and performs parameter validation.
The following initialization arguments apply to the majority of Model
subclasses by default (exceptions include specialized utility models
like `~astropy.modeling.mappings.Mapping`). Parametric models take all
their parameters as arguments, followed by any of the following optional
keyword arguments:
Parameters
----------
name : str, optional
A human-friendly name associated with this model instance
(particularly useful for identifying the individual components of a
compound model).
meta : dict, optional
An optional dict of user-defined metadata to attach to this model.
How this is used and interpreted is up to the user or individual use
case.
n_models : int, optional
If given an integer greater than 1, a *model set* is instantiated
instead of a single model. This affects how the parameter arguments
are interpreted. In this case each parameter must be given as a list
or array--elements of this array are taken along the first axis (or
``model_set_axis`` if specified), such that the Nth element is the
value of that parameter for the Nth model in the set.
See the section on model sets in the documentation for more details.
model_set_axis : int, optional
This argument only applies when creating a model set (i.e. ``n_models >
1``). It changes how parameter values are interpreted. Normally the
first axis of each input parameter array (properly the 0th axis) is
taken as the axis corresponding to the model sets. However, any axis
of an input array may be taken as this "model set axis". This accepts
negative integers as well--for example use ``model_set_axis=-1`` if the
last (most rapidly changing) axis should be associated with the model
sets. Also, ``model_set_axis=False`` can be used to tell that a given
input should be used to evaluate all the models in the model set.
fixed : dict, optional
Dictionary ``{parameter_name: bool}`` setting the fixed constraint
for one or more parameters. `True` means the parameter is held fixed
during fitting and is prevented from updates once an instance of the
model has been created.
Alternatively the `~astropy.modeling.Parameter.fixed` property of a
parameter may be used to lock or unlock individual parameters.
tied : dict, optional
Dictionary ``{parameter_name: callable}`` of parameters which are
linked to some other parameter. The dictionary values are callables
providing the linking relationship.
Alternatively the `~astropy.modeling.Parameter.tied` property of a
parameter may be used to set the ``tied`` constraint on individual
parameters.
bounds : dict, optional
A dictionary ``{parameter_name: value}`` of lower and upper bounds of
parameters. Keys are parameter names. Values are a list or a tuple
of length 2 giving the desired range for the parameter.
Alternatively the `~astropy.modeling.Parameter.min` and
`~astropy.modeling.Parameter.max` or
~astropy.modeling.Parameter.bounds` properties of a parameter may be
used to set bounds on individual parameters.
eqcons : list, optional
List of functions of length n such that ``eqcons[j](x0, *args) == 0.0``
in a successfully optimized problem.
ineqcons : list, optional
List of functions of length n such that ``ieqcons[j](x0, *args) >=
0.0`` is a successfully optimized problem.
Examples
--------
>>> from astropy.modeling import models
>>> def tie_center(model):
... mean = 50 * model.stddev
... return mean
>>> tied_parameters = {'mean': tie_center}
Specify that ``'mean'`` is a tied parameter in one of two ways:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... tied=tied_parameters)
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.mean.tied
False
>>> g1.mean.tied = tie_center
>>> g1.mean.tied
<function tie_center at 0x...>
Fixed parameters:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... fixed={'stddev': True})
>>> g1.stddev.fixed
True
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.stddev.fixed
False
>>> g1.stddev.fixed = True
>>> g1.stddev.fixed
True
"""
parameter_constraints = Parameter.constraints
"""
Primarily for informational purposes, these are the types of constraints
that can be set on a model's parameters.
"""
model_constraints = ('eqcons', 'ineqcons')
"""
Primarily for informational purposes, these are the types of constraints
that constrain model evaluation.
"""
param_names = ()
"""
Names of the parameters that describe models of this type.
The parameters in this tuple are in the same order they should be passed in
when initializing a model of a specific type. Some types of models, such
as polynomial models, have a different number of parameters depending on
some other property of the model, such as the degree.
When defining a custom model class the value of this attribute is
automatically set by the `~astropy.modeling.Parameter` attributes defined
in the class body.
"""
inputs = ()
"""The name(s) of the input variable(s) on which a model is evaluated."""
outputs = ()
"""The name(s) of the output(s) of the model."""
standard_broadcasting = True
fittable = False
linear = True
_separable = None
""" A boolean flag to indicate whether a model is separable."""
meta = metadata.MetaData()
"""A dict-like object to store optional information."""
# By default models either use their own inverse property or have no
# inverse at all, but users may also assign a custom inverse to a model,
# optionally; in that case it is of course up to the user to determine
# whether their inverse is *actually* an inverse to the model they assign
# it to.
_inverse = None
_user_inverse = None
_bounding_box = None
_user_bounding_box = None
# Default n_models attribute, so that __len__ is still defined even when a
# model hasn't completed initialization yet
_n_models = 1
# New classes can set this as a boolean value.
# It is converted to a dictionary mapping input name to a boolean value.
_input_units_strict = False
# Allow dimensionless input (and corresponding output). If this is True,
# input values to evaluate will gain the units specified in input_units. If
# this is a dictionary then it should map input name to a bool to allow
# dimensionless numbers for that input.
# Only has an effect if input_units is defined.
_input_units_allow_dimensionless = False
# Default equivalencies to apply to input values. If set, this should be a
# dictionary where each key is a string that corresponds to one of the
# model inputs. Only has an effect if input_units is defined.
input_units_equivalencies = None
def __init__(self, *args, meta=None, name=None, **kwargs):
super().__init__()
if meta is not None:
self.meta = meta
self._name = name
self._initialize_constraints(kwargs)
# Remaining keyword args are either parameter values or invalid
# Parameter values must be passed in as keyword arguments in order to
# distinguish them
self._initialize_parameters(args, kwargs)
self._initialize_unit_support()
def _initialize_unit_support(self):
"""
Convert self._input_units_strict and
self.input_units_allow_dimensionless to dictionaries
mapping input name to a boolena value.
"""
if isinstance(self._input_units_strict, bool):
self._input_units_strict = {key: self._input_units_strict for
key in self.__class__.inputs}
if isinstance(self._input_units_allow_dimensionless, bool):
self._input_units_allow_dimensionless = {key: self._input_units_allow_dimensionless
for key in self.__class__.inputs}
@property
def input_units_strict(self):
"""
Enforce strict units on inputs to evaluate. If this is set to True,
input values to evaluate will be in the exact units specified by
input_units. If the input quantities are convertible to input_units,
they are converted. If this is a dictionary then it should map input
name to a bool to set strict input units for that parameter.
"""
val = self._input_units_strict
if isinstance(val, bool):
return {key: val for key in self.__class__.inputs}
else:
return val
@property
def input_units_allow_dimensionless(self):
"""
Allow dimensionless input (and corresponding output). If this is True,
input values to evaluate will gain the units specified in input_units. If
this is a dictionary then it should map input name to a bool to allow
dimensionless numbers for that input.
Only has an effect if input_units is defined.
"""
val = self._input_units_allow_dimensionless
if isinstance(val, bool):
return {key: val for key in self.__class__.inputs}
else:
return val
@property
def uses_quantity(self):
"""
True if this model has been created with `~astropy.units.Quantity`
objects or if there are no parameters.
This can be used to determine if this model should be evaluated with
`~astropy.units.Quantity` or regular floats.
"""
pisq = [isinstance(p, Quantity) for p in self._param_sets(units=True)]
return (len(pisq) == 0) or any(pisq)
def __repr__(self):
return self._format_repr()
def __str__(self):
return self._format_str()
def __len__(self):
return self._n_models
def __call__(self, *inputs, **kwargs):
"""
Evaluate this model using the given input(s) and the parameter values
that were specified when the model was instantiated.
"""
inputs, format_info = self.prepare_inputs(*inputs, **kwargs)
parameters = self._param_sets(raw=True, units=True)
with_bbox = kwargs.pop('with_bounding_box', False)
fill_value = kwargs.pop('fill_value', np.nan)
bbox = None
if with_bbox:
try:
bbox = self.bounding_box
except NotImplementedError:
bbox = None
if self.n_inputs > 1 and bbox is not None:
# bounding_box is in python order - convert it to the order of the inputs
bbox = bbox[::-1]
if bbox is None:
outputs = self.evaluate(*chain(inputs, parameters))
else:
if self.n_inputs == 1:
bbox = [bbox]
# indices where input is outside the bbox
# have a value of 1 in ``nan_ind``
nan_ind = np.zeros(inputs[0].shape, dtype=bool)
for ind, inp in enumerate(inputs):
# Pass an ``out`` array so that ``axis_ind`` is array for scalars as well.
axis_ind = np.zeros(inp.shape, dtype=bool)
axis_ind = np.logical_or(inp < bbox[ind][0], inp > bbox[ind][1], out=axis_ind)
nan_ind[axis_ind] = 1
# get an array with indices of valid inputs
valid_ind = np.logical_not(nan_ind).nonzero()
# inputs holds only inputs within the bbox
args = []
for input in inputs:
if not input.shape:
# shape is ()
if nan_ind:
outputs = [fill_value for a in args]
else:
args.append(input)
else:
args.append(input[valid_ind])
valid_result = self.evaluate(*chain(args, parameters))
if self.n_outputs == 1:
valid_result = [valid_result]
# combine the valid results with the ``fill_value`` values
# outside the bbox
result = [np.zeros(inputs[0].shape) + fill_value for i in range(len(valid_result))]
for ind, r in enumerate(valid_result):
if not result[ind].shape:
# shape is ()
result[ind] = r
else:
result[ind][valid_ind] = r
# format output
if self.n_outputs == 1:
outputs = np.asarray(result[0])
else:
outputs = [np.asarray(r) for r in result]
else:
outputs = self.evaluate(*chain(inputs, parameters))
if self.n_outputs == 1:
outputs = (outputs,)
outputs = self.prepare_outputs(format_info, *outputs, **kwargs)
outputs = self._process_output_units(inputs, outputs)
if self.n_outputs == 1:
return outputs[0]
else:
return outputs
# *** Arithmetic operators for creating compound models ***
__add__ = _model_oper('+')
__sub__ = _model_oper('-')
__mul__ = _model_oper('*')
__truediv__ = _model_oper('/')
__pow__ = _model_oper('**')
__or__ = _model_oper('|')
__and__ = _model_oper('&')
# *** Properties ***
@property
def name(self):
"""User-provided name for this model instance."""
return self._name
@name.setter
def name(self, val):
"""Assign a (new) name to this model."""
self._name = val
@property
def n_inputs(self):
"""
The number of inputs to this model.
Equivalent to ``len(model.inputs)``.
"""
return len(self.inputs)
@property
def n_outputs(self):
"""
The number of outputs from this model.
Equivalent to ``len(model.outputs)``.
"""
return len(self.outputs)
@property
def model_set_axis(self):
"""
The index of the model set axis--that is the axis of a parameter array
that pertains to which model a parameter value pertains to--as
specified when the model was initialized.
See the documentation on `Model Sets
<http://docs.astropy.org/en/stable/modeling/models.html#model-sets>`_
for more details.
"""
return self._model_set_axis
@property
def param_sets(self):
"""
Return parameters as a pset.
This is a list with one item per parameter set, which is an array of
that parameter's values across all parameter sets, with the last axis
associated with the parameter set.
"""
return self._param_sets()
@property
def parameters(self):
"""
A flattened array of all parameter values in all parameter sets.
Fittable parameters maintain this list and fitters modify it.
"""
# Currently the sequence of a model's parameters must be contiguous
# within the _parameters array (which may be a view of a larger array,
# for example when taking a sub-expression of a compound model), so
# the assumption here is reliable:
if not self.param_names:
# Trivial, but not unheard of
return self._parameters
start = self._param_metrics[self.param_names[0]]['slice'].start
stop = self._param_metrics[self.param_names[-1]]['slice'].stop
return self._parameters[start:stop]
@parameters.setter
def parameters(self, value):
"""
Assigning to this attribute updates the parameters array rather than
replacing it.
"""
if not self.param_names:
return
start = self._param_metrics[self.param_names[0]]['slice'].start
stop = self._param_metrics[self.param_names[-1]]['slice'].stop
try:
value = np.array(value).flatten()
self._parameters[start:stop] = value
except ValueError as e:
raise InputParameterError(
"Input parameter values not compatible with the model "
"parameters array: {0}".format(e))
@property
def fixed(self):
"""
A `dict` mapping parameter names to their fixed constraint.
"""
return self._constraints['fixed']
@property
def tied(self):
"""
A `dict` mapping parameter names to their tied constraint.
"""
return self._constraints['tied']
@property
def bounds(self):
"""
A `dict` mapping parameter names to their upper and lower bounds as
``(min, max)`` tuples or ``[min, max]`` lists.
"""
return self._constraints['bounds']
@property
def eqcons(self):
"""List of parameter equality constraints."""
return self._constraints['eqcons']
@property
def ineqcons(self):
"""List of parameter inequality constraints."""
return self._constraints['ineqcons']
@property
def inverse(self):
"""
Returns a new `~astropy.modeling.Model` instance which performs the
inverse transform, if an analytic inverse is defined for this model.
Even on models that don't have an inverse defined, this property can be
set with a manually-defined inverse, such a pre-computed or
experimentally determined inverse (often given as a
`~astropy.modeling.polynomial.PolynomialModel`, but not by
requirement).
A custom inverse can be deleted with ``del model.inverse``. In this
case the model's inverse is reset to its default, if a default exists
(otherwise the default is to raise `NotImplementedError`).
Note to authors of `~astropy.modeling.Model` subclasses: To define an
inverse for a model simply override this property to return the
appropriate model representing the inverse. The machinery that will
make the inverse manually-overridable is added automatically by the
base class.
"""
if self._user_inverse is not None:
return self._user_inverse
elif self._inverse is not None:
return self._inverse()
raise NotImplementedError("An analytical inverse transform has not "
"been implemented for this model.")
@inverse.setter
def inverse(self, value):
if not isinstance(value, (Model, type(None))):
raise ValueError(
"The ``inverse`` attribute may be assigned a `Model` "
"instance or `None` (where `None` explicitly forces the "
"model to have no inverse.")
self._user_inverse = value
@inverse.deleter
def inverse(self):
"""
Resets the model's inverse to its default (if one exists, otherwise
the model will have no inverse).
"""
del self._user_inverse
@property
def has_user_inverse(self):
"""
A flag indicating whether or not a custom inverse model has been
assigned to this model by a user, via assignment to ``model.inverse``.
"""
return self._user_inverse is not None
@property
def bounding_box(self):
r"""
A `tuple` of length `n_inputs` defining the bounding box limits, or
`None` for no bounding box.
The default limits are given by a ``bounding_box`` property or method
defined in the class body of a specific model. If not defined then
this property just raises `NotImplementedError` by default (but may be
assigned a custom value by a user). ``bounding_box`` can be set
manually to an array-like object of shape ``(model.n_inputs, 2)``. For
further usage, see :ref:`bounding-boxes`
The limits are ordered according to the `numpy` indexing
convention, and are the reverse of the model input order,
e.g. for inputs ``('x', 'y', 'z')``, ``bounding_box`` is defined:
* for 1D: ``(x_low, x_high)``
* for 2D: ``((y_low, y_high), (x_low, x_high))``
* for 3D: ``((z_low, z_high), (y_low, y_high), (x_low, x_high))``
Examples
--------
Setting the ``bounding_box`` limits for a 1D and 2D model:
>>> from astropy.modeling.models import Gaussian1D, Gaussian2D
>>> model_1d = Gaussian1D()
>>> model_2d = Gaussian2D(x_stddev=1, y_stddev=1)
>>> model_1d.bounding_box = (-5, 5)
>>> model_2d.bounding_box = ((-6, 6), (-5, 5))
Setting the bounding_box limits for a user-defined 3D `custom_model`:
>>> from astropy.modeling.models import custom_model
>>> def const3d(x, y, z, amp=1):
... return amp
...
>>> Const3D = custom_model(const3d)
>>> model_3d = Const3D()
>>> model_3d.bounding_box = ((-6, 6), (-5, 5), (-4, 4))
To reset ``bounding_box`` to its default limits just delete the
user-defined value--this will reset it back to the default defined
on the class:
>>> del model_1d.bounding_box
To disable the bounding box entirely (including the default),
set ``bounding_box`` to `None`:
>>> model_1d.bounding_box = None
>>> model_1d.bounding_box # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "astropy\modeling\core.py", line 980, in bounding_box
"No bounding box is defined for this model (note: the "
NotImplementedError: No bounding box is defined for this model (note:
the bounding box was explicitly disabled for this model; use `del
model.bounding_box` to restore the default bounding box, if one is
defined for this model).
"""
if self._user_bounding_box is not None:
if self._user_bounding_box is NotImplemented:
raise NotImplementedError(
"No bounding box is defined for this model (note: the "
"bounding box was explicitly disabled for this model; "
"use `del model.bounding_box` to restore the default "
"bounding box, if one is defined for this model).")
return self._user_bounding_box
elif self._bounding_box is None:
raise NotImplementedError(
"No bounding box is defined for this model.")
elif isinstance(self._bounding_box, _BoundingBox):
# This typically implies a hard-coded bounding box. This will
# probably be rare, but it is an option
return self._bounding_box
elif isinstance(self._bounding_box, types.MethodType):
return self._bounding_box()
else:
# The only other allowed possibility is that it's a _BoundingBox
# subclass, so we call it with its default arguments and return an
# instance of it (that can be called to recompute the bounding box
# with any optional parameters)
# (In other words, in this case self._bounding_box is a *class*)
bounding_box = self._bounding_box((), _model=self)()
return self._bounding_box(bounding_box, _model=self)
@bounding_box.setter
def bounding_box(self, bounding_box):
"""
Assigns the bounding box limits.
"""
if bounding_box is None:
cls = None
# We use this to explicitly set an unimplemented bounding box (as
# opposed to no user bounding box defined)
bounding_box = NotImplemented
elif (isinstance(self._bounding_box, type) and
issubclass(self._bounding_box, _BoundingBox)):
cls = self._bounding_box
else:
cls = _BoundingBox
if cls is not None:
try:
bounding_box = cls.validate(self, bounding_box)
except ValueError as exc:
raise ValueError(exc.args[0])
self._user_bounding_box = bounding_box
@bounding_box.deleter
def bounding_box(self):
self._user_bounding_box = None
@property
def has_user_bounding_box(self):
"""
A flag indicating whether or not a custom bounding_box has been
assigned to this model by a user, via assignment to
``model.bounding_box``.
"""
return self._user_bounding_box is not None
@property
def separable(self):
""" A flag indicating whether a model is separable."""
if self._separable is not None:
return self._separable
else:
raise NotImplementedError(
'The "separable" property is not defined for '
'model {}'.format(self.__class__.__name__))
# *** Public methods ***
def without_units_for_data(self, **kwargs):
"""
Return an instance of the model for which the parameter values have been
converted to the right units for the data, then the units have been
stripped away.
The input and output Quantity objects should be given as keyword
arguments.
Notes
-----
This method is needed in order to be able to fit models with units in
the parameters, since we need to temporarily strip away the units from
the model during the fitting (which might be done by e.g. scipy
functions).
The units that the parameters should be converted to are not necessarily
the units of the input data, but are derived from them. Model subclasses
that want fitting to work in the presence of quantities need to define a
_parameter_units_for_data_units method that takes the input and output
units (as two dictionaries) and returns a dictionary giving the target
units for each parameter.
"""
model = self.copy()
inputs_unit = {inp: getattr(kwargs[inp], 'unit', dimensionless_unscaled)
for inp in self.inputs if kwargs[inp] is not None}
outputs_unit = {out: getattr(kwargs[out], 'unit', dimensionless_unscaled)
for out in self.outputs if kwargs[out] is not None}
parameter_units = self._parameter_units_for_data_units(inputs_unit, outputs_unit)
for name, unit in parameter_units.items():
parameter = getattr(model, name)
if parameter.unit is not None:
parameter.value = parameter.quantity.to(unit).value
parameter._set_unit(None, force=True)
if isinstance(model, _CompoundModel):
model.strip_units_from_tree()
return model
def strip_units_from_tree(self):
for item in self._tree.traverse_inorder():
if isinstance(item.value, Model):
for parname in item.value.param_names:
par = getattr(item.value, parname)
par._set_unit(None, force=True)
setattr(item.value, parname, par)
def with_units_from_data(self, **kwargs):
"""
Return an instance of the model which has units for which the parameter
values are compatible with the data units specified.
The input and output Quantity objects should be given as keyword
arguments.
Notes
-----
This method is needed in order to be able to fit models with units in
the parameters, since we need to temporarily strip away the units from
the model during the fitting (which might be done by e.g. scipy
functions).
The units that the parameters will gain are not necessarily the units of
the input data, but are derived from them. Model subclasses that want
fitting to work in the presence of quantities need to define a
_parameter_units_for_data_units method that takes the input and output
units (as two dictionaries) and returns a dictionary giving the target
units for each parameter.
"""
model = self.copy()
inputs_unit = {inp: getattr(kwargs[inp], 'unit', dimensionless_unscaled)
for inp in self.inputs if kwargs[inp] is not None}
outputs_unit = {out: getattr(kwargs[out], 'unit', dimensionless_unscaled)
for out in self.outputs if kwargs[out] is not None}
parameter_units = self._parameter_units_for_data_units(inputs_unit, outputs_unit)
# We are adding units to parameters that already have a value, but we
# don't want to convert the parameter, just add the unit directly, hence
# the call to _set_unit.
for name, unit in parameter_units.items():
parameter = getattr(model, name)
parameter._set_unit(unit, force=True)
return model
@property
def _has_units(self):
# Returns True if any of the parameters have units
for param in self.param_names:
if getattr(self, param).unit is not None:
return True
else:
return False
@property
def _supports_unit_fitting(self):
# If the model has a '_parameter_units_for_data_units' method, this
# indicates that we have enough information to strip the units away
# and add them back after fitting, when fitting quantities
return hasattr(self, '_parameter_units_for_data_units')
@abc.abstractmethod
def evaluate(self, *args, **kwargs):
"""Evaluate the model on some input variables."""
def sum_of_implicit_terms(self, *args, **kwargs):
"""
Evaluate the sum of any implicit model terms on some input variables.
This includes any fixed terms used in evaluating a linear model that
do not have corresponding parameters exposed to the user. The
prototypical case is `astropy.modeling.functional_models.Shift`, which
corresponds to a function y = a + bx, where b=1 is intrinsically fixed
by the type of model, such that sum_of_implicit_terms(x) == x. This
method is needed by linear fitters to correct the dependent variable
for the implicit term(s) when solving for the remaining terms
(ie. a = y - bx).
"""
def render(self, out=None, coords=None):
"""
Evaluate a model at fixed positions, respecting the ``bounding_box``.
The key difference relative to evaluating the model directly is that
this method is limited to a bounding box if the `Model.bounding_box`
attribute is set.
Parameters
----------
out : `numpy.ndarray`, optional
An array that the evaluated model will be added to. If this is not
given (or given as ``None``), a new array will be created.
coords : array-like, optional
An array to be used to translate from the model's input coordinates
to the ``out`` array. It should have the property that
``self(coords)`` yields the same shape as ``out``. If ``out`` is
not specified, ``coords`` will be used to determine the shape of the
returned array. If this is not provided (or None), the model will be
evaluated on a grid determined by `Model.bounding_box`.
Returns
-------
out : `numpy.ndarray`
The model added to ``out`` if ``out`` is not ``None``, or else a
new array from evaluating the model over ``coords``.
If ``out`` and ``coords`` are both `None`, the returned array is
limited to the `Model.bounding_box` limits. If
`Model.bounding_box` is `None`, ``arr`` or ``coords`` must be passed.
Raises
------
ValueError
If ``coords`` are not given and the the `Model.bounding_box` of this
model is not set.
Examples
--------
:ref:`bounding-boxes`
"""
try:
bbox = self.bounding_box
except NotImplementedError:
bbox = None
ndim = self.n_inputs
if (coords is None) and (out is None) and (bbox is None):
raise ValueError('If no bounding_box is set, '
'coords or out must be input.')
# for consistent indexing
if ndim == 1:
if coords is not None:
coords = [coords]
if bbox is not None:
bbox = [bbox]
if coords is not None:
coords = np.asanyarray(coords, dtype=float)
# Check dimensions match out and model
assert len(coords) == ndim
if out is not None:
if coords[0].shape != out.shape:
raise ValueError('inconsistent shape of the output.')
else:
out = np.zeros(coords[0].shape)
if out is not None:
out = np.asanyarray(out, dtype=float)
if out.ndim != ndim:
raise ValueError('the array and model must have the same '
'number of dimensions.')
if bbox is not None:
# assures position is at center pixel, important when using add_array
pd = np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2))
for bb in bbox]).astype(int).T
pos, delta = pd
if coords is not None:
sub_shape = tuple(delta * 2 + 1)
sub_coords = np.array([extract_array(c, sub_shape, pos)
for c in coords])
else:
limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T]
sub_coords = np.mgrid[limits]
sub_coords = sub_coords[::-1]
if out is None:
out = self(*sub_coords)
else:
try:
out = add_array(out, self(*sub_coords), pos)
except ValueError:
raise ValueError(
'The `bounding_box` is larger than the input out in '
'one or more dimensions. Set '
'`model.bounding_box = None`.')
else:
if coords is None:
im_shape = out.shape
limits = [slice(i) for i in im_shape]
coords = np.mgrid[limits]
coords = coords[::-1]
out += self(*coords)
return out
@property
def input_units(self):
"""
This property is used to indicate what units or sets of units the
evaluate method expects, and returns a dictionary mapping inputs to
units (or `None` if any units are accepted).
Model sub-classes can also use function annotations in evaluate to
indicate valid input units, in which case this property should
not be overridden since it will return the input units based on the
annotations.
"""
if hasattr(self, '_input_units'):
return self._input_units
elif hasattr(self.evaluate, '__annotations__'):
annotations = self.evaluate.__annotations__.copy()
annotations.pop('return', None)
if annotations:
# If there are not annotations for all inputs this will error.
return dict((name, annotations[name]) for name in self.inputs)
else:
# None means any unit is accepted
return None
@property
def return_units(self):
"""
This property is used to indicate what units or sets of units the output
of evaluate should be in, and returns a dictionary mapping outputs to
units (or `None` if any units are accepted).
Model sub-classes can also use function annotations in evaluate to
indicate valid output units, in which case this property should not be
overridden since it will return the return units based on the
annotations.
"""
if hasattr(self, '_return_units'):
return self._return_units
elif hasattr(self.evaluate, '__annotations__'):
return self.evaluate.__annotations__.get('return', None)
else:
# None means any unit is accepted
return None
def prepare_inputs(self, *inputs, model_set_axis=None, equivalencies=None,
**kwargs):
"""
This method is used in `~astropy.modeling.Model.__call__` to ensure
that all the inputs to the model can be broadcast into compatible
shapes (if one or both of them are input as arrays), particularly if
there are more than one parameter sets. This also makes sure that (if
applicable) the units of the input will be compatible with the evaluate
method.
"""
# When we instantiate the model class, we make sure that __call__ can
# take the following two keyword arguments: model_set_axis and
# equivalencies.
if model_set_axis is None:
# By default the model_set_axis for the input is assumed to be the
# same as that for the parameters the model was defined with
# TODO: Ensure that negative model_set_axis arguments are respected
model_set_axis = self.model_set_axis
n_models = len(self)
params = [getattr(self, name) for name in self.param_names]
inputs = [np.asanyarray(_input, dtype=float) for _input in inputs]
_validate_input_shapes(inputs, self.inputs, n_models,
model_set_axis, self.standard_broadcasting)
inputs = self._validate_input_units(inputs, equivalencies)
# The input formatting required for single models versus a multiple
# model set are different enough that they've been split into separate
# subroutines
if n_models == 1:
return _prepare_inputs_single_model(self, params, inputs,
**kwargs)
else:
return _prepare_inputs_model_set(self, params, inputs, n_models,
model_set_axis, **kwargs)
def _validate_input_units(self, inputs, equivalencies=None):
inputs = list(inputs)
name = self.name or self.__class__.__name__
# Check that the units are correct, if applicable
if self.input_units is not None:
# We combine any instance-level input equivalencies with user
# specified ones at call-time.
input_units_equivalencies = _combine_equivalency_dict(self.inputs,
equivalencies,
self.input_units_equivalencies)
# We now iterate over the different inputs and make sure that their
# units are consistent with those specified in input_units.
for i in range(len(inputs)):
input_name = self.inputs[i]
input_unit = self.input_units.get(input_name, None)
if input_unit is None:
continue
if isinstance(inputs[i], Quantity):
# We check for consistency of the units with input_units,
# taking into account any equivalencies
if inputs[i].unit.is_equivalent(input_unit, equivalencies=input_units_equivalencies[input_name]):
# If equivalencies have been specified, we need to
# convert the input to the input units - this is because
# some equivalencies are non-linear, and we need to be
# sure that we evaluate the model in its own frame
# of reference. If input_units_strict is set, we also
# need to convert to the input units.
if len(input_units_equivalencies) > 0 or self.input_units_strict[input_name]:
inputs[i] = inputs[i].to(input_unit, equivalencies=input_units_equivalencies[input_name])
else:
# We consider the following two cases separately so as
# to be able to raise more appropriate/nicer exceptions
if input_unit is dimensionless_unscaled:
raise UnitsError("{0}: Units of input '{1}', {2} ({3}), could not be "
"converted to required dimensionless "
"input".format(name,
self.inputs[i],
inputs[i].unit,
inputs[i].unit.physical_type))
else:
raise UnitsError("{0}: Units of input '{1}', {2} ({3}), could not be "
"converted to required input units of "
"{4} ({5})".format(name, self.inputs[i],
inputs[i].unit,
inputs[i].unit.physical_type,
input_unit,
input_unit.physical_type))
else:
# If we allow dimensionless input, we add the units to the
# input values without conversion, otherwise we raise an
# exception.
if (not self.input_units_allow_dimensionless[input_name] and
input_unit is not dimensionless_unscaled and input_unit is not None):
if np.any(inputs[i] != 0):
raise UnitsError("{0}: Units of input '{1}', (dimensionless), could not be "
"converted to required input units of "
"{2} ({3})".format(name, self.inputs[i], input_unit,
input_unit.physical_type))
return inputs
def _process_output_units(self, inputs, outputs):
inputs_are_quantity = any([isinstance(i, Quantity) for i in inputs])
if self.return_units and inputs_are_quantity:
# We allow a non-iterable unit only if there is one output
if self.n_outputs == 1 and not isiterable(self.return_units):
return_units = {self.outputs[0]: self.return_units}
else:
return_units = self.return_units
outputs = tuple([Quantity(out, return_units.get(out_name, None), subok=True)
for out, out_name in zip(outputs, self.outputs)])
return outputs
def prepare_outputs(self, format_info, *outputs, **kwargs):
model_set_axis = kwargs.get('model_set_axis', None)
if len(self) == 1:
return _prepare_outputs_single_model(self, outputs, format_info)
else:
return _prepare_outputs_model_set(self, outputs, format_info, model_set_axis)
def copy(self):
"""
Return a copy of this model.
Uses a deep copy so that all model attributes, including parameter
values, are copied as well.
"""
return copy.deepcopy(self)
def deepcopy(self):
"""
Return a deep copy of this model.
"""
return copy.deepcopy(self)
@sharedmethod
def rename(self, name):
"""
Return a copy of this model with a new name.
"""
new_model = self.copy()
new_model._name = name
return new_model
@sharedmethod
def n_submodels(self):
"""
Return the number of components in a single model, which is
obviously 1.
"""
return 1
# *** Internal methods ***
@sharedmethod
def _from_existing(self, existing, param_names):
"""
Creates a new instance of ``cls`` that shares its underlying parameter
values with an existing model instance given by ``existing``.
This is used primarily by compound models to return a view of an
individual component of a compound model. ``param_names`` should be
the names of the parameters in the *existing* model to use as the
parameters in this new model. Its length should equal the number of
parameters this model takes, so that it can map parameters on the
existing model to parameters on this model one-to-one.
"""
# Basically this is an alternative __init__
if isinstance(self, type):
# self is a class, not an instance
needs_initialization = True
dummy_args = (0,) * len(param_names)
self = self.__new__(self, *dummy_args)
else:
needs_initialization = False
self = self.copy()
aliases = dict(zip(self.param_names, param_names))
# This is basically an alternative _initialize_constraints
constraints = {}
for cons_type in self.parameter_constraints:
orig = existing._constraints[cons_type]
constraints[cons_type] = AliasDict(orig, aliases)
self._constraints = constraints
self._n_models = existing._n_models
self._model_set_axis = existing._model_set_axis
self._parameters = existing._parameters
self._param_metrics = defaultdict(dict)
for param_a, param_b in aliases.items():
# Take the param metrics info for the giving parameters in the
# existing model, and hand them to the appropriate parameters in
# the new model
self._param_metrics[param_a] = existing._param_metrics[param_b]
if needs_initialization:
self.__init__(*dummy_args)
return self
def _initialize_constraints(self, kwargs):
"""
Pop parameter constraint values off the keyword arguments passed to
`Model.__init__` and store them in private instance attributes.
"""
if hasattr(self, '_constraints'):
# Skip constraint initialization if it has already been handled via
# an alternate initialization
return
self._constraints = {}
# Pop any constraints off the keyword arguments
for constraint in self.parameter_constraints:
values = kwargs.pop(constraint, {})
self._constraints[constraint] = values.copy()
# Update with default parameter constraints
for param_name in self.param_names:
param = getattr(self, param_name)
# Parameters don't have all constraint types
value = getattr(param, constraint)
if value is not None:
self._constraints[constraint][param_name] = value
for constraint in self.model_constraints:
values = kwargs.pop(constraint, [])
self._constraints[constraint] = values
def _initialize_parameters(self, args, kwargs):
"""
Initialize the _parameters array that stores raw parameter values for
all parameter sets for use with vectorized fitting algorithms; on
FittableModels the _param_name attributes actually just reference
slices of this array.
"""
if hasattr(self, '_parameters'):
# Skip parameter initialization if it has already been handled via
# an alternate initialization
return
n_models = kwargs.pop('n_models', None)
if not (n_models is None or
(isinstance(n_models, (int, np.integer)) and n_models >= 1)):
raise ValueError(
"n_models must be either None (in which case it is "
"determined from the model_set_axis of the parameter initial "
"values) or it must be a positive integer "
"(got {0!r})".format(n_models))
model_set_axis = kwargs.pop('model_set_axis', None)
if model_set_axis is None:
if n_models is not None and n_models > 1:
# Default to zero
model_set_axis = 0
else:
# Otherwise disable
model_set_axis = False
else:
if not (model_set_axis is False or
(isinstance(model_set_axis, int) and
not isinstance(model_set_axis, bool))):
raise ValueError(
"model_set_axis must be either False or an integer "
"specifying the parameter array axis to map to each "
"model in a set of models (got {0!r}).".format(
model_set_axis))
# Process positional arguments by matching them up with the
# corresponding parameters in self.param_names--if any also appear as
# keyword arguments this presents a conflict
params = {}
if len(args) > len(self.param_names):
raise TypeError(
"{0}.__init__() takes at most {1} positional arguments ({2} "
"given)".format(self.__class__.__name__, len(self.param_names),
len(args)))
self._model_set_axis = model_set_axis
self._param_metrics = defaultdict(dict)
for idx, arg in enumerate(args):
if arg is None:
# A value of None implies using the default value, if exists
continue
# We use quantity_asanyarray here instead of np.asanyarray because
# if any of the arguments are quantities, we need to return a
# Quantity object not a plain Numpy array.
params[self.param_names[idx]] = quantity_asanyarray(arg, dtype=float)
# At this point the only remaining keyword arguments should be
# parameter names; any others are in error.
for param_name in self.param_names:
if param_name in kwargs:
if param_name in params:
raise TypeError(
"{0}.__init__() got multiple values for parameter "
"{1!r}".format(self.__class__.__name__, param_name))
value = kwargs.pop(param_name)
if value is None:
continue
# We use quantity_asanyarray here instead of np.asanyarray because
# if any of the arguments are quantities, we need to return a
# Quantity object not a plain Numpy array.
params[param_name] = quantity_asanyarray(value, dtype=float)
if kwargs:
# If any keyword arguments were left over at this point they are
# invalid--the base class should only be passed the parameter
# values, constraints, and param_dim
for kwarg in kwargs:
# Just raise an error on the first unrecognized argument
raise TypeError(
'{0}.__init__() got an unrecognized parameter '
'{1!r}'.format(self.__class__.__name__, kwarg))
# Determine the number of model sets: If the model_set_axis is
# None then there is just one parameter set; otherwise it is determined
# by the size of that axis on the first parameter--if the other
# parameters don't have the right number of axes or the sizes of their
# model_set_axis don't match an error is raised
if model_set_axis is not False and n_models != 1 and params:
max_ndim = 0
if model_set_axis < 0:
min_ndim = abs(model_set_axis)
else:
min_ndim = model_set_axis + 1
for name, value in params.items():
param_ndim = np.ndim(value)
if param_ndim < min_ndim:
raise InputParameterError(
"All parameter values must be arrays of dimension "
"at least {0} for model_set_axis={1} (the value "
"given for {2!r} is only {3}-dimensional)".format(
min_ndim, model_set_axis, name, param_ndim))
max_ndim = max(max_ndim, param_ndim)
if n_models is None:
# Use the dimensions of the first parameter to determine
# the number of model sets
n_models = value.shape[model_set_axis]
elif value.shape[model_set_axis] != n_models:
raise InputParameterError(
"Inconsistent dimensions for parameter {0!r} for "
"{1} model sets. The length of axis {2} must be the "
"same for all input parameter values".format(
name, n_models, model_set_axis))
self._check_param_broadcast(params, max_ndim)
else:
if n_models is None:
n_models = 1
self._check_param_broadcast(params, None)
self._n_models = n_models
self._initialize_parameter_values(params)
def _initialize_parameter_values(self, params):
# self._param_metrics should have been initialized in
# self._initialize_parameters
param_metrics = self._param_metrics
total_size = 0
for name in self.param_names:
unit = None
param_descr = getattr(self, name)
if params.get(name) is None:
default = param_descr.default
if default is None:
# No value was supplied for the parameter and the
# parameter does not have a default, therefore the model
# is underspecified
raise TypeError(
"{0}.__init__() requires a value for parameter "
"{1!r}".format(self.__class__.__name__, name))
value = params[name] = default
unit = param_descr.unit
else:
value = params[name]
if isinstance(value, Quantity):
unit = value.unit
else:
unit = None
param_size = np.size(value)
param_shape = np.shape(value)
param_slice = slice(total_size, total_size + param_size)
param_metrics[name]['slice'] = param_slice
param_metrics[name]['shape'] = param_shape
if unit is None and param_descr.unit is not None:
raise InputParameterError(
"{0}.__init__() requires a Quantity for parameter "
"{1!r}".format(self.__class__.__name__, name))
param_metrics[name]['orig_unit'] = unit
param_metrics[name]['raw_unit'] = None
if param_descr._setter is not None:
_val = param_descr._setter(value)
if isinstance(_val, Quantity):
param_metrics[name]['raw_unit'] = _val.unit
else:
param_metrics[name]['raw_unit'] = None
total_size += param_size
self._param_metrics = param_metrics
self._parameters = np.empty(total_size, dtype=np.float64)
# Now set the parameter values (this will also fill
# self._parameters)
# TODO: This is a bit ugly, but easier to deal with than how this was
# done previously. There's still lots of opportunity for refactoring
# though, in particular once we move the _get/set_model_value methods
# out of Parameter and into Model (renaming them
# _get/set_parameter_value)
for name, value in params.items():
# value here may be a Quantity object.
param_descr = getattr(self, name)
unit = param_descr.unit
value = np.array(value)
orig_unit = param_metrics[name]['orig_unit']
if param_descr._setter is not None:
if unit is not None:
value = np.asarray(param_descr._setter(value * orig_unit).value)
else:
value = param_descr._setter(value)
self._parameters[param_metrics[name]['slice']] = value.ravel()
# Finally validate all the parameters; we do this last so that
# validators that depend on one of the other parameters' values will
# work
for name in params:
param_descr = getattr(self, name)
param_descr.validator(param_descr.value)
def _check_param_broadcast(self, params, max_ndim):
"""
This subroutine checks that all parameter arrays can be broadcast
against each other, and determines the shapes parameters must have in
order to broadcast correctly.
If model_set_axis is None this merely checks that the parameters
broadcast and returns an empty dict if so. This mode is only used for
single model sets.
"""
all_shapes = []
param_names = []
model_set_axis = self._model_set_axis
for name in self.param_names:
# Previously this just used iteritems(params), but we loop over all
# param_names instead just to ensure some determinism in the
# ordering behavior
if name not in params:
continue
value = params[name]
param_names.append(name)
# We've already checked that each parameter array is compatible in
# the model_set_axis dimension, but now we need to check the
# dimensions excluding that axis
# Split the array dimensions into the axes before model_set_axis
# and after model_set_axis
param_shape = np.shape(value)
param_ndim = len(param_shape)
if max_ndim is not None and param_ndim < max_ndim:
# All arrays have the same number of dimensions up to the
# model_set_axis dimension, but after that they may have a
# different number of trailing axes. The number of trailing
# axes must be extended for mutual compatibility. For example
# if max_ndim = 3 and model_set_axis = 0, an array with the
# shape (2, 2) must be extended to (2, 1, 2). However, an
# array with shape (2,) is extended to (2, 1).
new_axes = (1,) * (max_ndim - param_ndim)
if model_set_axis < 0:
# Just need to prepend axes to make up the difference
broadcast_shape = new_axes + param_shape
else:
broadcast_shape = (param_shape[:model_set_axis + 1] +
new_axes +
param_shape[model_set_axis + 1:])
self._param_metrics[name]['broadcast_shape'] = broadcast_shape
all_shapes.append(broadcast_shape)
else:
all_shapes.append(param_shape)
# Now check mutual broadcastability of all shapes
try:
check_broadcast(*all_shapes)
except IncompatibleShapeError as exc:
shape_a, shape_a_idx, shape_b, shape_b_idx = exc.args
param_a = param_names[shape_a_idx]
param_b = param_names[shape_b_idx]
raise InputParameterError(
"Parameter {0!r} of shape {1!r} cannot be broadcast with "
"parameter {2!r} of shape {3!r}. All parameter arrays "
"must have shapes that are mutually compatible according "
"to the broadcasting rules.".format(param_a, shape_a,
param_b, shape_b))
def _param_sets(self, raw=False, units=False):
"""
Implementation of the Model.param_sets property.
This internal implementation has a ``raw`` argument which controls
whether or not to return the raw parameter values (i.e. the values that
are actually stored in the ._parameters array, as opposed to the values
displayed to users. In most cases these are one in the same but there
are currently a few exceptions.
Note: This is notably an overcomplicated device and may be removed
entirely in the near future.
"""
param_metrics = self._param_metrics
values = []
shapes = []
for name in self.param_names:
param = getattr(self, name)
if raw:
value = param._raw_value
else:
value = param.value
broadcast_shape = param_metrics[name].get('broadcast_shape')
if broadcast_shape is not None:
value = value.reshape(broadcast_shape)
shapes.append(np.shape(value))
if len(self) == 1:
# Add a single param set axis to the parameter's value (thus
# converting scalars to shape (1,) array values) for
# consistency
value = np.array([value])
if units:
if raw and self._param_metrics[name]['raw_unit'] is not None:
unit = self._param_metrics[name]['raw_unit']
else:
unit = param.unit
if unit is not None:
value = Quantity(value, unit)
values.append(value)
if len(set(shapes)) != 1 or units:
# If the parameters are not all the same shape, converting to an
# array is going to produce an object array
# However the way Numpy creates object arrays is tricky in that it
# will recurse into array objects in the list and break them up
# into separate objects. Doing things this way ensures a 1-D
# object array the elements of which are the individual parameter
# arrays. There's not much reason to do this over returning a list
# except for consistency
psets = np.empty(len(values), dtype=object)
psets[:] = values
return psets
# TODO: Returning an array from this method may be entirely pointless
# for internal use--perhaps only the external param_sets method should
# return an array (and just for backwards compat--I would prefer to
# maybe deprecate that method)
return np.array(values)
def _format_repr(self, args=[], kwargs={}, defaults={}):
"""
Internal implementation of ``__repr__``.
This is separated out for ease of use by subclasses that wish to
override the default ``__repr__`` while keeping the same basic
formatting.
"""
# TODO: I think this could be reworked to preset model sets better
parts = [repr(a) for a in args]
parts.extend(
"{0}={1}".format(name,
param_repr_oneline(getattr(self, name)))
for name in self.param_names)
if self.name is not None:
parts.append('name={0!r}'.format(self.name))
for kwarg, value in kwargs.items():
if kwarg in defaults and defaults[kwarg] != value:
continue
parts.append('{0}={1!r}'.format(kwarg, value))
if len(self) > 1:
parts.append("n_models={0}".format(len(self)))
return '<{0}({1})>'.format(self.__class__.__name__, ', '.join(parts))
def _format_str(self, keywords=[]):
"""
Internal implementation of ``__str__``.
This is separated out for ease of use by subclasses that wish to
override the default ``__str__`` while keeping the same basic
formatting.
"""
default_keywords = [
('Model', self.__class__.__name__),
('Name', self.name),
('Inputs', self.inputs),
('Outputs', self.outputs),
('Model set size', len(self))
]
parts = ['{0}: {1}'.format(keyword, value)
for keyword, value in default_keywords + keywords
if value is not None]
parts.append('Parameters:')
if len(self) == 1:
columns = [[getattr(self, name).value]
for name in self.param_names]
else:
columns = [getattr(self, name).value
for name in self.param_names]
if columns:
param_table = Table(columns, names=self.param_names)
# Set units on the columns
for name in self.param_names:
param_table[name].unit = getattr(self, name).unit
parts.append(indent(str(param_table), width=4))
return '\n'.join(parts)
class FittableModel(Model):
"""
Base class for models that can be fitted using the built-in fitting
algorithms.
"""
linear = False
# derivative with respect to parameters
fit_deriv = None
"""
Function (similar to the model's `~Model.evaluate`) to compute the
derivatives of the model with respect to its parameters, for use by fitting
algorithms. In other words, this computes the Jacobian matrix with respect
to the model's parameters.
"""
# Flag that indicates if the model derivatives with respect to parameters
# are given in columns or rows
col_fit_deriv = True
fittable = True
class Fittable1DModel(FittableModel):
"""
Base class for one-dimensional fittable models.
This class provides an easier interface to defining new models.
Examples can be found in `astropy.modeling.functional_models`.
"""
inputs = ('x',)
outputs = ('y',)
_separable = True
class Fittable2DModel(FittableModel):
"""
Base class for two-dimensional fittable models.
This class provides an easier interface to defining new models.
Examples can be found in `astropy.modeling.functional_models`.
"""
inputs = ('x', 'y')
outputs = ('z',)
def _make_arithmetic_operator(oper):
# We don't bother with tuple unpacking here for efficiency's sake, but for
# documentation purposes:
#
# f_eval, f_n_inputs, f_n_outputs = f
#
# and similarly for g
def op(f, g):
return (make_binary_operator_eval(oper, f[0], g[0]), f[1], f[2])
return op
def _composition_operator(f, g):
# We don't bother with tuple unpacking here for efficiency's sake, but for
# documentation purposes:
#
# f_eval, f_n_inputs, f_n_outputs = f
#
# and similarly for g
return (lambda inputs, params: g[0](f[0](inputs, params), params),
f[1], g[2])
def _join_operator(f, g):
# We don't bother with tuple unpacking here for efficiency's sake, but for
# documentation purposes:
#
# f_eval, f_n_inputs, f_n_outputs = f
#
# and similarly for g
return (lambda inputs, params: (f[0](inputs[:f[1]], params) +
g[0](inputs[f[1]:], params)),
f[1] + g[1], f[2] + g[2])
# TODO: Support a couple unary operators--at least negation?
BINARY_OPERATORS = {
'+': _make_arithmetic_operator(operator.add),
'-': _make_arithmetic_operator(operator.sub),
'*': _make_arithmetic_operator(operator.mul),
'/': _make_arithmetic_operator(operator.truediv),
'**': _make_arithmetic_operator(operator.pow),
'|': _composition_operator,
'&': _join_operator
}
_ORDER_OF_OPERATORS = [('|',), ('&',), ('+', '-'), ('*', '/'), ('**',)]
OPERATOR_PRECEDENCE = {}
for idx, ops in enumerate(_ORDER_OF_OPERATORS):
for op in ops:
OPERATOR_PRECEDENCE[op] = idx
del idx, op, ops
class _CompoundModelMeta(_ModelMeta):
_tree = None
_submodels = None
_submodel_names = None
_nextid = 0
_param_names = None
# _param_map is a mapping of the compound model's generated param names to
# the parameters of submodels they are associated with. The values in this
# mapping are (idx, name) tuples were idx is the index of the submodel this
# parameter is associated with, and name is the same parameter's name on
# the submodel
# In principle this will allow compound models to give entirely new names
# to parameters that don't have to be the same as their original names on
# the submodels, but right now that isn't taken advantage of
_param_map = None
_slice_offset = 0
# When taking slices of a compound model, this keeps track of how offset
# the first model in the slice is from the first model in the original
# compound model it was taken from
# This just inverts _param_map, swapping keys with values. This is also
# useful to have.
_param_map_inverse = None
_fittable = None
_evaluate = None
def __getitem__(cls, index):
index = cls._normalize_index(index)
if isinstance(index, (int, np.integer)):
return cls._get_submodels()[index]
else:
return cls._get_slice(index.start, index.stop)
def __getattr__(cls, attr):
# Make sure the _tree attribute is set; otherwise we are not looking up
# an attribute on a concrete compound model class and should just raise
# the AttributeError
if cls._tree is not None and attr in cls.param_names:
cls._init_param_descriptors()
return getattr(cls, attr)
raise AttributeError(attr)
def __repr__(cls):
if cls._tree is None:
# This case is mostly for debugging purposes
return cls._format_cls_repr()
expression = cls._format_expression()
components = cls._format_components()
keywords = [
('Expression', expression),
('Components', '\n' + indent(components))
]
return cls._format_cls_repr(keywords=keywords)
def __dir__(cls):
"""
Returns a list of attributes defined on a compound model, including
all of its parameters.
"""
basedir = super().__dir__()
if cls._tree is not None:
for name in cls.param_names:
basedir.append(name)
basedir.sort()
return basedir
def __reduce__(cls):
rv = super().__reduce__()
if isinstance(rv, tuple):
# Delete _evaluate from the members dict
with suppress(KeyError):
del rv[1][2]['_evaluate']
return rv
@property
def submodel_names(cls):
if cls._submodel_names is None:
seen = {}
names = []
for idx, submodel in enumerate(cls._get_submodels()):
name = str(submodel.name)
if name in seen:
names.append('{0}_{1}'.format(name, idx))
if seen[name] >= 0:
jdx = seen[name]
names[jdx] = '{0}_{1}'.format(names[jdx], jdx)
seen[name] = -1
else:
names.append(name)
seen[name] = idx
cls._submodel_names = tuple(names)
return cls._submodel_names
@property
def param_names(cls):
if cls._param_names is None:
cls._init_param_names()
return cls._param_names
@property
def fittable(cls):
if cls._fittable is None:
cls._fittable = all(m.fittable for m in cls._get_submodels())
return cls._fittable
# TODO: Maybe we could use make_function_with_signature for evaluate, but
# it's probably not worth it (and I'm not sure what the limit is on number
# of function arguments/local variables but we could break that limit for
# complicated compound models...
def evaluate(cls, *args):
if cls._evaluate is None:
func = cls._tree.evaluate(BINARY_OPERATORS,
getter=cls._model_evaluate_getter)[0]
cls._evaluate = func
inputs = args[:cls.n_inputs]
params = iter(args[cls.n_inputs:])
result = cls._evaluate(inputs, params)
if cls.n_outputs == 1:
return result[0]
else:
return result
# TODO: This supports creating a new compound model from two existing
# compound models (or normal models) and a single operator. However, it
# ought also to be possible to create a new model from an *entire*
# expression, represented as a sequence of operators and their operands (or
# an exiting ExpressionTree) and build that into a compound model without
# creating an intermediate _CompoundModel class for every single operator
# in the expression. This will prove to be a useful optimization in many
# cases
@classmethod
def _from_operator(mcls, operator, left, right, additional_members={}):
"""
Given a Python operator (represented by a string, such as ``'+'``
or ``'*'``, and two model classes or instances, return a new compound
model that evaluates the given operator on the outputs of the left and
right input models.
If either of the input models are a model *class* (i.e. a subclass of
`~astropy.modeling.Model`) then the returned model is a new subclass of
`~astropy.modeling.Model` that may be instantiated with any parameter
values. If both input models are *instances* of a model, a new class
is still created, but this method returns an *instance* of that class,
taking the parameter values from the parameters of the input model
instances.
If given, the ``additional_members`` `dict` may provide additional
class members that should be added to the generated
`~astropy.modeling.Model` subclass. Some members that are generated by
this method should not be provided by ``additional_members``. These
include ``_tree``, ``inputs``, ``outputs``, ``linear``,
``standard_broadcasting``, and ``__module__`. This is currently for
internal use only.
"""
# Note, currently this only supports binary operators, but could be
# easily extended to support unary operators (namely '-') if/when
# needed
children = []
for child in (left, right):
if isinstance(child, (_CompoundModelMeta, _CompoundModel)):
"""
Although the original child models were copied we make another
copy here to ensure that changes in this child compound model
parameters will not propagate to the reuslt, that is
cm1 = Gaussian1D(1, 5, .1) + Gaussian1D()
cm2 = cm1 | Scale()
cm1.amplitude_0 = 100
assert(cm2.amplitude_0 == 1)
"""
children.append(copy.deepcopy(child._tree))
elif isinstance(child, Model):
children.append(ExpressionTree(child.copy(),
inputs=child.inputs,
outputs=child.outputs))
else:
children.append(ExpressionTree(child, inputs=child.inputs, outputs=child.outputs))
inputs, outputs = mcls._check_inputs_and_outputs(operator, left, right)
tree = ExpressionTree(operator, left=children[0], right=children[1],
inputs=inputs, outputs=outputs)
name = str('CompoundModel{0}'.format(_CompoundModelMeta._nextid))
_CompoundModelMeta._nextid += 1
mod = find_current_module(3)
if mod:
modname = mod.__name__
else:
modname = '__main__'
if operator in ('|', '+', '-'):
linear = left.linear and right.linear
else:
# Which is not to say it is *definitely* not linear but it would be
# trickier to determine
linear = False
standard_broadcasting = left.standard_broadcasting and right.standard_broadcasting
# Note: If any other members are added here, make sure to mention them
# in the docstring of this method.
members = additional_members
members.update({
'_tree': tree,
'_is_dynamic': True, # See docs for _ModelMeta._is_dynamic
'inputs': inputs,
'outputs': outputs,
'linear': linear,
'standard_broadcasting': standard_broadcasting,
'__module__': str(modname)})
new_cls = mcls(name, (_CompoundModel,), members)
if isinstance(left, Model) and isinstance(right, Model):
# Both models used in the operator were already instantiated models,
# not model *classes*. As such it's not particularly useful to return
# the class itself, but to instead produce a new instance:
instance = new_cls()
# Workaround for https://github.com/astropy/astropy/issues/3542
# TODO: Any effort to restructure the tree-like data structure for
# compound models should try to obviate this workaround--if
# intermediate compound models are stored in the tree as well then
# we can immediately check for custom inverses on sub-models when
# computing the inverse
instance._user_inverse = mcls._make_user_inverse(
operator, left, right)
if left._n_models == right._n_models:
instance._n_models = left._n_models
else:
raise ValueError('Model sets must have the same number of '
'components.')
return instance
# Otherwise return the new uninstantiated class itself
return new_cls
@classmethod
def _check_inputs_and_outputs(mcls, operator, left, right):
# TODO: These aren't the full rules for handling inputs and outputs, but
# this will handle most basic cases correctly
if operator == '|':
inputs = left.inputs
outputs = right.outputs
if left.n_outputs != right.n_inputs:
raise ModelDefinitionError(
"Unsupported operands for |: {0} (n_inputs={1}, "
"n_outputs={2}) and {3} (n_inputs={4}, n_outputs={5}); "
"n_outputs for the left-hand model must match n_inputs "
"for the right-hand model.".format(
left.name, left.n_inputs, left.n_outputs, right.name,
right.n_inputs, right.n_outputs))
elif operator == '&':
inputs = combine_labels(left.inputs, right.inputs)
outputs = combine_labels(left.outputs, right.outputs)
else:
# Without loss of generality
inputs = left.inputs
outputs = left.outputs
if (left.n_inputs != right.n_inputs or
left.n_outputs != right.n_outputs):
raise ModelDefinitionError(
"Unsupported operands for {0}: {1} (n_inputs={2}, "
"n_outputs={3}) and {4} (n_inputs={5}, n_outputs={6}); "
"models must have the same n_inputs and the same "
"n_outputs for this operator".format(
operator, left.name, left.n_inputs, left.n_outputs,
right.name, right.n_inputs, right.n_outputs))
return inputs, outputs
@classmethod
def _make_user_inverse(mcls, operator, left, right):
"""
Generates an inverse `Model` for this `_CompoundModel` when either
model in the operation has a *custom inverse* that was manually
assigned by the user.
If either model has a custom inverse, and in particular if another
`_CompoundModel` has a custom inverse, then none of that model's
sub-models should be considered at all when computing the inverse.
So in that case we just compute the inverse ahead of time and set
it as the new compound model's custom inverse.
Note, this use case only applies when combining model instances,
since model classes don't currently have a notion of a "custom
inverse" (though it could probably be supported by overriding the
class's inverse property).
TODO: Consider fixing things so the aforementioned class-based case
works as well. However, for the present purposes this is good enough.
"""
if not (operator in ('&', '|') and
(left._user_inverse or right._user_inverse)):
# These are the only operators that support an inverse right now
return None
try:
left_inv = left.inverse
right_inv = right.inverse
except NotImplementedError:
# If either inverse is undefined then just return False; this
# means the normal _CompoundModel.inverse routine will fail
# naturally anyways, since it requires all sub-models to have
# an inverse defined
return None
if operator == '&':
return left_inv & right_inv
else:
return right_inv | left_inv
# TODO: Perhaps, just perhaps, the post-order (or ???-order) ordering of
# leaf nodes is something the ExpressionTree class itself could just know
def _get_submodels(cls):
# Would make this a lazyproperty but those don't currently work with
# type objects
if cls._submodels is not None:
return cls._submodels
submodels = [c.value for c in cls._tree.traverse_postorder()
if c.isleaf]
cls._submodels = submodels
return submodels
def _init_param_descriptors(cls):
"""
This routine sets up the names for all the parameters on a compound
model, including figuring out unique names for those parameters and
also mapping them back to their associated parameters of the underlying
submodels.
Setting this all up is costly, and only necessary for compound models
that a user will directly interact with. For example when building an
expression like::
>>> M = (Model1 + Model2) * Model3 # doctest: +SKIP
the user will generally never interact directly with the temporary
result of the subexpression ``(Model1 + Model2)``. So there's no need
to setup all the parameters for that temporary throwaway. Only once
the full expression is built and the user initializes or introspects
``M`` is it necessary to determine its full parameterization.
"""
# Accessing cls.param_names will implicitly call _init_param_names if
# needed and thus also set up the _param_map; I'm not crazy about that
# design but it stands for now
for param_name in cls.param_names:
submodel_idx, submodel_param = cls._param_map[param_name]
submodel = cls[submodel_idx]
orig_param = getattr(submodel, submodel_param, None)
if isinstance(submodel, Model):
# Take the parameter's default from the model's value for that
# parameter
default = orig_param.value
else:
default = orig_param.default
# Copy constraints
constraints = dict((key, getattr(orig_param, key))
for key in Model.parameter_constraints)
# Note: Parameter.copy() returns a new unbound Parameter, never
# a bound Parameter even if submodel is a Model instance (as
# opposed to a Model subclass)
new_param = orig_param.copy(name=param_name, default=default,
unit=orig_param.unit,
**constraints)
setattr(cls, param_name, new_param)
def _init_param_names(cls):
"""
This subroutine is solely for setting up the ``param_names`` attribute
itself.
See ``_init_param_descriptors`` for the full parameter setup.
"""
# Currently this skips over Model *instances* in the expression tree;
# basically these are treated as constants and do not add
# fittable/tunable parameters to the compound model.
# TODO: I'm not 100% happy with this design, and maybe we need some
# interface for distinguishing fittable/settable parameters with
# *constant* parameters (which would be distinct from parameters with
# fixed constraints since they're permanently locked in place). But I'm
# not sure if this is really the best way to treat the issue.
names = []
param_map = {}
# Start counting the suffix indices to put on parameter names from the
# slice_offset. Usually this will just be zero, but for compound
# models that were sliced from another compound model this may be > 0
param_suffix = cls._slice_offset
for idx, model in enumerate(cls._get_submodels()):
if not model.param_names:
# Skip models that don't have parameters in the numbering
# TODO: Reevaluate this if it turns out to be confusing, though
# parameter-less models are not very common in practice (there
# are a few projections that don't take parameters)
continue
for param_name in model.param_names:
# This is sort of heuristic, but we want to check that
# model.param_name *actually* returns a Parameter descriptor,
# and that the model isn't some inconsistent type that happens
# to have a param_names attribute but does not actually
# implement settable parameters.
# In the future we can probably remove this check, but this is
# here specifically to support the legacy compat
# _CompositeModel which can be considered a pathological case
# in the context of the new framework
# if not isinstance(getattr(model, param_name, None),
# Parameter):
# break
name = '{0}_{1}'.format(param_name, param_suffix + idx)
names.append(name)
param_map[name] = (idx, param_name)
cls._param_names = tuple(names)
cls._param_map = param_map
cls._param_map_inverse = dict((v, k) for k, v in param_map.items())
def _format_expression(cls):
# TODO: At some point might be useful to make a public version of this,
# albeit with more formatting options
return cls._tree.format_expression(OPERATOR_PRECEDENCE)
def _format_components(cls):
return '\n\n'.join('[{0}]: {1!r}'.format(idx, m)
for idx, m in enumerate(cls._get_submodels()))
def _normalize_index(cls, index):
"""
Converts an index given to __getitem__ to either an integer, or
a slice with integer start and stop values.
If the length of the slice is exactly 1 this converts the index to a
simple integer lookup.
Negative integers are converted to positive integers.
"""
def get_index_from_name(name):
try:
return cls.submodel_names.index(name)
except ValueError:
raise IndexError(
'Compound model {0} does not have a component named '
'{1}'.format(cls.name, name))
def check_for_negative_index(index):
if index < 0:
new_index = len(cls.submodel_names) + index
if new_index < 0:
# If still < 0 then this is an invalid index
raise IndexError(
"Model index {0} out of range.".format(index))
else:
index = new_index
return index
if isinstance(index, str):
return get_index_from_name(index)
elif isinstance(index, slice):
if index.step not in (1, None):
# In principle it could be but I can scarcely imagine a case
# where it would be useful. If someone can think of one then
# we can enable it.
raise ValueError(
"Step not supported for compound model slicing.")
start = index.start if index.start is not None else 0
stop = (index.stop
if index.stop is not None else len(cls.submodel_names))
if isinstance(start, (int, np.integer)):
start = check_for_negative_index(start)
if isinstance(stop, (int, np.integer)):
stop = check_for_negative_index(stop)
if isinstance(start, str):
start = get_index_from_name(start)
if isinstance(stop, str):
stop = get_index_from_name(stop) + 1
length = stop - start
if length == 1:
return start
elif length <= 0:
raise ValueError("Empty slice of a compound model.")
return slice(start, stop)
elif isinstance(index, (int, np.integer)):
if index >= len(cls.submodel_names):
raise IndexError(
"Model index {0} out of range.".format(index))
return check_for_negative_index(index)
raise TypeError(
'Submodels can be indexed either by their integer order or '
'their name (got {0!r}).'.format(index))
def _get_slice(cls, start, stop):
"""
Return a new model build from a sub-expression of the expression
represented by this model.
Right now this is highly inefficient, as it creates a new temporary
model for each operator that appears in the sub-expression. It would
be better if this just built a new expression tree, and the new model
instantiated directly from that tree.
Once tree -> model instantiation is possible this should be fixed to
use that instead.
"""
members = {'_slice_offset': cls._slice_offset + start}
operators = dict((oper, _model_oper(oper, additional_members=members))
for oper in BINARY_OPERATORS)
return cls._tree.evaluate(operators, start=start, stop=stop)
@staticmethod
def _model_evaluate_getter(idx, model):
n_params = len(model.param_names)
n_inputs = model.n_inputs
n_outputs = model.n_outputs
# If model is not an instance, we need to instantiate it to make sure
# that we can call _validate_input_units (since e.g. input_units can
# be an instance property).
def evaluate_wrapper(model, inputs, param_values):
inputs = model._validate_input_units(inputs)
outputs = model.evaluate(*inputs, *param_values)
if n_outputs == 1:
outputs = (outputs,)
return model._process_output_units(inputs, outputs)
if isinstance(model, Model):
def f(inputs, params):
param_values = tuple(islice(params, n_params))
return evaluate_wrapper(model, inputs, param_values)
else:
# Where previously model was a class, now make an instance
def f(inputs, params):
param_values = tuple(islice(params, n_params))
m = model(*param_values)
return evaluate_wrapper(m, inputs, param_values)
return (f, n_inputs, n_outputs)
class _CompoundModel(Model, metaclass=_CompoundModelMeta):
fit_deriv = None
col_fit_deriv = False
_submodels = None
def __str__(self):
expression = self._format_expression()
components = self._format_components()
keywords = [
('Expression', expression),
('Components', '\n' + indent(components))
]
return super()._format_str(keywords=keywords)
def _generate_input_output_units_dict(self, mapping, attr):
"""
This method is used to transform dict or bool settings from
submodels into a single dictionary for the composite model,
taking into account renaming of input parameters.
"""
d = {}
for inp, (model, orig_inp) in mapping.items():
mattr = getattr(model, attr)
if isinstance(mattr, dict):
if orig_inp in mattr:
d[inp] = mattr[orig_inp]
elif isinstance(mattr, bool):
d[inp] = mattr
if d: # Note that if d is empty, we just return None
return d
@property
def input_units_allow_dimensionless(self):
return self._generate_input_output_units_dict(self._tree.inputs_map,
'input_units_allow_dimensionless')
@property
def input_units_strict(self):
return self._generate_input_output_units_dict(self._tree.inputs_map,
'input_units_strict')
@property
def input_units(self):
return self._generate_input_output_units_dict(self._tree.inputs_map, 'input_units')
@property
def input_units_equivalencies(self):
return self._generate_input_output_units_dict(self._tree.inputs_map,
'input_units_equivalencies')
@property
def return_units(self):
return self._generate_input_output_units_dict(self._tree.outputs_map,
'return_units')
def __getattr__(self, attr):
# This __getattr__ is necessary, because _CompoundModelMeta creates
# Parameter descriptors *lazily*--they do not exist in the class
# __dict__ until one of them has been accessed.
# However, this is at odds with how Python looks up descriptors (see
# (https://docs.python.org/3/reference/datamodel.html#invoking-descriptors)
# which is to look directly in the class __dict__
# This workaround allows descriptors to work correctly when they are
# not initially found in the class __dict__
value = getattr(self.__class__, attr)
if hasattr(value, '__get__'):
# Object is a descriptor, so we should really return the result of
# its __get__
value = value.__get__(self, self.__class__)
return value
def __getitem__(self, index):
index = self.__class__._normalize_index(index)
model = self.__class__[index]
if isinstance(index, slice):
param_names = model.param_names
else:
param_map = self.__class__._param_map_inverse
param_names = tuple(param_map[index, name]
for name in model.param_names)
return model._from_existing(self, param_names)
@property
def submodel_names(self):
return self.__class__.submodel_names
@sharedmethod
def n_submodels(self):
return len(self.submodel_names)
@property
def param_names(self):
return self.__class__.param_names
@property
def fittable(self):
return self.__class__.fittable
@sharedmethod
def evaluate(self, *args):
return self.__class__.evaluate(*args)
# TODO: The way this works is highly inefficient--the inverse is created by
# making a new model for each operator in the compound model, which could
# potentially mean creating a large number of temporary throwaway model
# classes. This can definitely be optimized in the future by implementing
# a way to construct a single model class from an existing tree
@property
def inverse(self):
def _not_implemented(oper):
def _raise(x, y):
raise NotImplementedError(
"The inverse is not currently defined for compound "
"models created using the {0} operator.".format(oper))
return _raise
operators = dict((oper, _not_implemented(oper))
for oper in ('+', '-', '*', '/', '**'))
operators['&'] = operator.and_
# Reverse the order of compositions
operators['|'] = lambda x, y: operator.or_(y, x)
def getter(idx, model):
try:
# By indexing on self[] this will return an instance of the
# model, with all the appropriate parameters set, which is
# currently required to return an inverse
return self[idx].inverse
except NotImplementedError:
raise NotImplementedError(
"All models in a composite model must have an inverse "
"defined in order for the composite model to have an "
"inverse. {0!r} does not have an inverse.".format(model))
return self._tree.evaluate(operators, getter=getter)
@sharedmethod
def _get_submodels(self):
return self.__class__._get_submodels()
def _parameter_units_for_data_units(self, input_units, output_units):
units_for_data = {}
for imodel, model in enumerate(self._submodels):
units_for_data_sub = model._parameter_units_for_data_units(input_units, output_units)
for param_sub in units_for_data_sub:
param = self._param_map_inverse[(imodel, param_sub)]
units_for_data[param] = units_for_data_sub[param_sub]
return units_for_data
def deepcopy(self):
"""
Return a deep copy of a compound model.
"""
new_model = self.copy()
new_model._submodels = [model.deepcopy() for model in self._submodels]
return new_model
def custom_model(*args, fit_deriv=None, **kwargs):
"""
Create a model from a user defined function. The inputs and parameters of
the model will be inferred from the arguments of the function.
This can be used either as a function or as a decorator. See below for
examples of both usages.
.. note::
All model parameters have to be defined as keyword arguments with
default values in the model function. Use `None` as a default argument
value if you do not want to have a default value for that parameter.
Parameters
----------
func : function
Function which defines the model. It should take N positional
arguments where ``N`` is dimensions of the model (the number of
independent variable in the model), and any number of keyword arguments
(the parameters). It must return the value of the model (typically as
an array, but can also be a scalar for scalar inputs). This
corresponds to the `~astropy.modeling.Model.evaluate` method.
fit_deriv : function, optional
Function which defines the Jacobian derivative of the model. I.e., the
derivative with respect to the *parameters* of the model. It should
have the same argument signature as ``func``, but should return a
sequence where each element of the sequence is the derivative
with respect to the corresponding argument. This corresponds to the
:meth:`~astropy.modeling.FittableModel.fit_deriv` method.
Examples
--------
Define a sinusoidal model function as a custom 1D model::
>>> from astropy.modeling.models import custom_model
>>> import numpy as np
>>> def sine_model(x, amplitude=1., frequency=1.):
... return amplitude * np.sin(2 * np.pi * frequency * x)
>>> def sine_deriv(x, amplitude=1., frequency=1.):
... return 2 * np.pi * amplitude * np.cos(2 * np.pi * frequency * x)
>>> SineModel = custom_model(sine_model, fit_deriv=sine_deriv)
Create an instance of the custom model and evaluate it::
>>> model = SineModel()
>>> model(0.25)
1.0
This model instance can now be used like a usual astropy model.
The next example demonstrates a 2D Moffat function model, and also
demonstrates the support for docstrings (this example could also include
a derivative, but it has been omitted for simplicity)::
>>> @custom_model
... def Moffat2D(x, y, amplitude=1.0, x_0=0.0, y_0=0.0, gamma=1.0,
... alpha=1.0):
... \"\"\"Two dimensional Moffat function.\"\"\"
... rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma ** 2
... return amplitude * (1 + rr_gg) ** (-alpha)
...
>>> print(Moffat2D.__doc__)
Two dimensional Moffat function.
>>> model = Moffat2D()
>>> model(1, 1) # doctest: +FLOAT_CMP
0.3333333333333333
"""
if kwargs:
warnings.warn(
"Function received unexpected arguments ({}) these "
"are ignored but will raise an Exception in the "
"future.".format(list(kwargs)),
AstropyDeprecationWarning)
if len(args) == 1 and callable(args[0]):
return _custom_model_wrapper(args[0], fit_deriv=fit_deriv)
elif not args:
return functools.partial(_custom_model_wrapper, fit_deriv=fit_deriv)
else:
raise TypeError(
"{0} takes at most one positional argument (the callable/"
"function to be turned into a model. When used as a decorator "
"it should be passed keyword arguments only (if "
"any).".format(__name__))
def _custom_model_wrapper(func, fit_deriv=None):
"""
Internal implementation `custom_model`.
When `custom_model` is called as a function its arguments are passed to
this function, and the result of this function is returned.
When `custom_model` is used as a decorator a partial evaluation of this
function is returned by `custom_model`.
"""
if not callable(func):
raise ModelDefinitionError(
"func is not callable; it must be a function or other callable "
"object")
if fit_deriv is not None and not callable(fit_deriv):
raise ModelDefinitionError(
"fit_deriv not callable; it must be a function or other "
"callable object")
model_name = func.__name__
inputs, params = get_inputs_and_params(func)
if (fit_deriv is not None and
len(fit_deriv.__defaults__) != len(params)):
raise ModelDefinitionError("derivative function should accept "
"same number of parameters as func.")
# TODO: Maybe have a clever scheme for default output name?
if inputs:
output_names = (inputs[0].name,)
else:
output_names = ('x',)
params = dict((param.name, Parameter(param.name, default=param.default))
for param in params)
mod = find_current_module(2)
if mod:
modname = mod.__name__
else:
modname = '__main__'
members = {
'__module__': str(modname),
'__doc__': func.__doc__,
'inputs': tuple(x.name for x in inputs),
'outputs': output_names,
'evaluate': staticmethod(func),
}
if fit_deriv is not None:
members['fit_deriv'] = staticmethod(fit_deriv)
members.update(params)
return type(model_name, (FittableModel,), members)
def render_model(model, arr=None, coords=None):
"""
Evaluates a model on an input array. Evaluation is limited to
a bounding box if the `Model.bounding_box` attribute is set.
Parameters
----------
model : `Model`
Model to be evaluated.
arr : `numpy.ndarray`, optional
Array on which the model is evaluated.
coords : array-like, optional
Coordinate arrays mapping to ``arr``, such that
``arr[coords] == arr``.
Returns
-------
array : `numpy.ndarray`
The model evaluated on the input ``arr`` or a new array from ``coords``.
If ``arr`` and ``coords`` are both `None`, the returned array is
limited to the `Model.bounding_box` limits. If
`Model.bounding_box` is `None`, ``arr`` or ``coords`` must be passed.
Examples
--------
:ref:`bounding-boxes`
"""
bbox = model.bounding_box
if (coords is None) & (arr is None) & (bbox is None):
raise ValueError('If no bounding_box is set, coords or arr must be input.')
# for consistent indexing
if model.n_inputs == 1:
if coords is not None:
coords = [coords]
if bbox is not None:
bbox = [bbox]
if arr is not None:
arr = arr.copy()
# Check dimensions match model
if arr.ndim != model.n_inputs:
raise ValueError('number of array dimensions inconsistent with '
'number of model inputs.')
if coords is not None:
# Check dimensions match arr and model
coords = np.array(coords)
if len(coords) != model.n_inputs:
raise ValueError('coordinate length inconsistent with the number '
'of model inputs.')
if arr is not None:
if coords[0].shape != arr.shape:
raise ValueError('coordinate shape inconsistent with the '
'array shape.')
else:
arr = np.zeros(coords[0].shape)
if bbox is not None:
# assures position is at center pixel, important when using add_array
pd = pos, delta = np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2))
for bb in bbox]).astype(int).T
if coords is not None:
sub_shape = tuple(delta * 2 + 1)
sub_coords = np.array([extract_array(c, sub_shape, pos) for c in coords])
else:
limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T]
sub_coords = np.mgrid[limits]
sub_coords = sub_coords[::-1]
if arr is None:
arr = model(*sub_coords)
else:
try:
arr = add_array(arr, model(*sub_coords), pos)
except ValueError:
raise ValueError('The `bounding_box` is larger than the input'
' arr in one or more dimensions. Set '
'`model.bounding_box = None`.')
else:
if coords is None:
im_shape = arr.shape
limits = [slice(i) for i in im_shape]
coords = np.mgrid[limits]
arr += model(*coords[::-1])
return arr
def _prepare_inputs_single_model(model, params, inputs, **kwargs):
broadcasts = []
for idx, _input in enumerate(inputs):
input_shape = _input.shape
# Ensure that array scalars are always upgrade to 1-D arrays for the
# sake of consistency with how parameters work. They will be cast back
# to scalars at the end
if not input_shape:
inputs[idx] = _input.reshape((1,))
if not params:
max_broadcast = input_shape
else:
max_broadcast = ()
for param in params:
try:
if model.standard_broadcasting:
broadcast = check_broadcast(input_shape, param.shape)
else:
broadcast = input_shape
except IncompatibleShapeError:
raise ValueError(
"Model input argument {0!r} of shape {1!r} cannot be "
"broadcast with parameter {2!r} of shape "
"{3!r}.".format(model.inputs[idx], input_shape,
param.name, param.shape))
if len(broadcast) > len(max_broadcast):
max_broadcast = broadcast
elif len(broadcast) == len(max_broadcast):
max_broadcast = max(max_broadcast, broadcast)
broadcasts.append(max_broadcast)
if model.n_outputs > model.n_inputs:
if len(set(broadcasts)) > 1:
raise ValueError(
"For models with n_outputs > n_inputs, the combination of "
"all inputs and parameters must broadcast to the same shape, "
"which will be used as the shape of all outputs. In this "
"case some of the inputs had different shapes, so it is "
"ambiguous how to format outputs for this model. Try using "
"inputs that are all the same size and shape.")
else:
# Extend the broadcasts list to include shapes for all outputs
extra_outputs = model.n_outputs - model.n_inputs
if not broadcasts:
# If there were no inputs then the broadcasts list is empty
# just add a None since there is no broadcasting of outputs and
# inputs necessary (see _prepare_outputs_single_model)
broadcasts.append(None)
broadcasts.extend([broadcasts[0]] * extra_outputs)
return inputs, (broadcasts,)
def _prepare_outputs_single_model(model, outputs, format_info):
broadcasts = format_info[0]
outputs = list(outputs)
for idx, output in enumerate(outputs):
broadcast_shape = broadcasts[idx]
if broadcast_shape is not None:
if not broadcast_shape:
# Shape is (), i.e. a scalar should be returned
outputs[idx] = output.item()
else:
outputs[idx] = output.reshape(broadcast_shape)
return tuple(outputs)
def _prepare_inputs_model_set(model, params, inputs, n_models, model_set_axis,
**kwargs):
reshaped = []
pivots = []
for idx, _input in enumerate(inputs):
max_param_shape = ()
if n_models > 1 and model_set_axis is not False:
# Use the shape of the input *excluding* the model axis
input_shape = (_input.shape[:model_set_axis] +
_input.shape[model_set_axis + 1:])
else:
input_shape = _input.shape
for param in params:
try:
check_broadcast(input_shape, param.shape)
except IncompatibleShapeError:
raise ValueError(
"Model input argument {0!r} of shape {1!r} cannot be "
"broadcast with parameter {2!r} of shape "
"{3!r}.".format(model.inputs[idx], input_shape,
param.name, param.shape))
if len(param.shape) > len(max_param_shape):
max_param_shape = param.shape
# We've now determined that, excluding the model_set_axis, the
# input can broadcast with all the parameters
input_ndim = len(input_shape)
if model_set_axis is False:
if len(max_param_shape) > input_ndim:
# Just needs to prepend new axes to the input
n_new_axes = 1 + len(max_param_shape) - input_ndim
new_axes = (1,) * n_new_axes
new_shape = new_axes + _input.shape
pivot = model.model_set_axis
else:
pivot = input_ndim - len(max_param_shape)
new_shape = (_input.shape[:pivot] + (1,) +
_input.shape[pivot:])
new_input = _input.reshape(new_shape)
else:
if len(max_param_shape) >= input_ndim:
n_new_axes = len(max_param_shape) - input_ndim
pivot = model.model_set_axis
new_axes = (1,) * n_new_axes
new_shape = (_input.shape[:pivot + 1] + new_axes +
_input.shape[pivot + 1:])
new_input = _input.reshape(new_shape)
else:
pivot = _input.ndim - len(max_param_shape) - 1
new_input = np.rollaxis(_input, model_set_axis,
pivot + 1)
pivots.append(pivot)
reshaped.append(new_input)
if model.n_inputs < model.n_outputs:
pivots.extend([model_set_axis] * (model.n_outputs - model.n_inputs))
return reshaped, (pivots,)
def _prepare_outputs_model_set(model, outputs, format_info, model_set_axis):
pivots = format_info[0]
# If model_set_axis = False was passed then use
# model._model_set_axis to format the output.
if model_set_axis is None or model_set_axis is False:
model_set_axis = model.model_set_axis
outputs = list(outputs)
for idx, output in enumerate(outputs):
pivot = pivots[idx]
if pivot < output.ndim and pivot != model_set_axis:
outputs[idx] = np.rollaxis(output, pivot,
model_set_axis)
return tuple(outputs)
def _validate_input_shapes(inputs, argnames, n_models, model_set_axis,
validate_broadcasting):
"""
Perform basic validation of model inputs--that they are mutually
broadcastable and that they have the minimum dimensions for the given
model_set_axis.
If validation succeeds, returns the total shape that will result from
broadcasting the input arrays with each other.
"""
check_model_set_axis = n_models > 1 and model_set_axis is not False
if not (validate_broadcasting or check_model_set_axis):
# Nothing else needed here
return
all_shapes = []
for idx, _input in enumerate(inputs):
input_shape = np.shape(_input)
# Ensure that the input's model_set_axis matches the model's
# n_models
if input_shape and check_model_set_axis:
# Note: Scalar inputs *only* get a pass on this
if len(input_shape) < model_set_axis + 1:
raise ValueError(
"For model_set_axis={0}, all inputs must be at "
"least {1}-dimensional.".format(
model_set_axis, model_set_axis + 1))
elif input_shape[model_set_axis] != n_models:
try:
argname = argnames[idx]
except IndexError:
# the case of model.inputs = ()
argname = str(idx)
raise ValueError(
"Input argument {0!r} does not have the correct "
"dimensions in model_set_axis={1} for a model set with "
"n_models={2}.".format(argname, model_set_axis,
n_models))
all_shapes.append(input_shape)
if not validate_broadcasting:
return
try:
input_broadcast = check_broadcast(*all_shapes)
except IncompatibleShapeError as exc:
shape_a, shape_a_idx, shape_b, shape_b_idx = exc.args
arg_a = argnames[shape_a_idx]
arg_b = argnames[shape_b_idx]
raise ValueError(
"Model input argument {0!r} of shape {1!r} cannot "
"be broadcast with input {2!r} of shape {3!r}".format(
arg_a, shape_a, arg_b, shape_b))
return input_broadcast
copyreg.pickle(_ModelMeta, _ModelMeta.__reduce__)
copyreg.pickle(_CompoundModelMeta, _CompoundModelMeta.__reduce__)
|
164fc98caf7232b739b43be3d4678a6571086f9051f150ea4d1c29139ae4db6f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Model and functions related to blackbody radiation.
.. _blackbody-planck-law:
Blackbody Radiation
-------------------
Blackbody flux is calculated with Planck law
(:ref:`Rybicki & Lightman 1979 <ref-rybicki1979>`):
.. math::
B_{\\lambda}(T) = \\frac{2 h c^{2} / \\lambda^{5}}{exp(h c / \\lambda k T) - 1}
B_{\\nu}(T) = \\frac{2 h \\nu^{3} / c^{2}}{exp(h \\nu / k T) - 1}
where the unit of :math:`B_{\\lambda}(T)` is
:math:`erg \\; s^{-1} cm^{-2} \\mathring{A}^{-1} sr^{-1}`, and
:math:`B_{\\nu}(T)` is :math:`erg \\; s^{-1} cm^{-2} Hz^{-1} sr^{-1}`.
:func:`~astropy.modeling.blackbody.blackbody_lambda` and
:func:`~astropy.modeling.blackbody.blackbody_nu` calculate the
blackbody flux for :math:`B_{\\lambda}(T)` and :math:`B_{\\nu}(T)`,
respectively.
For blackbody representation as a model, see :class:`BlackBody1D`.
.. _blackbody-examples:
Examples
^^^^^^^^
>>> import numpy as np
>>> from astropy import units as u
>>> from astropy.modeling.blackbody import blackbody_lambda, blackbody_nu
Calculate blackbody flux for 5000 K at 100 and 10000 Angstrom while suppressing
any Numpy warnings:
>>> wavelengths = [100, 10000] * u.AA
>>> temperature = 5000 * u.K
>>> with np.errstate(all='ignore'):
... flux_lam = blackbody_lambda(wavelengths, temperature)
... flux_nu = blackbody_nu(wavelengths, temperature)
>>> flux_lam # doctest: +FLOAT_CMP
<Quantity [ 1.27452545e-108, 7.10190526e+005] erg / (Angstrom cm2 s sr)>
>>> flux_nu # doctest: +FLOAT_CMP
<Quantity [ 4.25135927e-123, 2.36894060e-005] erg / (cm2 Hz s sr)>
Alternatively, the same results for ``flux_nu`` can be computed using
:class:`BlackBody1D` with blackbody representation as a model. The difference between
this and the former approach is in one additional step outlined as follows:
>>> from astropy import constants as const
>>> from astropy.modeling import models
>>> temperature = 5000 * u.K
>>> bolometric_flux = const.sigma_sb * temperature ** 4 / np.pi
>>> bolometric_flux.to(u.erg / (u.cm * u.cm * u.s)) # doctest: +FLOAT_CMP
<Quantity 1.12808367e+10 erg / (cm2 s)>
>>> wavelengths = [100, 10000] * u.AA
>>> bb_astro = models.BlackBody1D(temperature, bolometric_flux=bolometric_flux)
>>> bb_astro(wavelengths).to(u.erg / (u.cm * u.cm * u.Hz * u.s)) / u.sr # doctest: +FLOAT_CMP
<Quantity [4.25102471e-123, 2.36893879e-005] erg / (cm2 Hz s sr)>
where ``bb_astro(wavelengths)`` computes the equivalent result as ``flux_nu`` above.
Plot a blackbody spectrum for 5000 K:
.. plot::
import matplotlib.pyplot as plt
import numpy as np
from astropy import constants as const
from astropy import units as u
from astropy.modeling.blackbody import blackbody_lambda
temperature = 5000 * u.K
wavemax = (const.b_wien / temperature).to(u.AA) # Wien's displacement law
waveset = np.logspace(
0, np.log10(wavemax.value + 10 * wavemax.value), num=1000) * u.AA
with np.errstate(all='ignore'):
flux = blackbody_lambda(waveset, temperature)
fig, ax = plt.subplots(figsize=(8, 5))
ax.plot(waveset.value, flux.value)
ax.axvline(wavemax.value, ls='--')
ax.get_yaxis().get_major_formatter().set_powerlimits((0, 1))
ax.set_xlabel(r'$\\lambda$ ({0})'.format(waveset.unit))
ax.set_ylabel(r'$B_{\\lambda}(T)$')
ax.set_title('Blackbody, T = {0}'.format(temperature))
Note that an array of temperatures can also be given instead of a single
temperature. In this case, the Numpy broadcasting rules apply: for instance, if
the frequency and temperature have the same shape, the output will have this
shape too, while if the frequency is a 2-d array with shape ``(n, m)`` and the
temperature is an array with shape ``(m,)``, the output will have a shape
``(n, m)``.
See Also
^^^^^^^^
.. _ref-rybicki1979:
Rybicki, G. B., & Lightman, A. P. 1979, Radiative Processes in Astrophysics (New York, NY: Wiley)
"""
import warnings
from collections import OrderedDict
import numpy as np
from .core import Fittable1DModel
from .parameters import Parameter
from astropy import constants as const
from astropy import units as u
from astropy.utils.exceptions import AstropyUserWarning
__all__ = ['BlackBody1D', 'blackbody_nu', 'blackbody_lambda']
# Units
FNU = u.erg / (u.cm**2 * u.s * u.Hz)
FLAM = u.erg / (u.cm**2 * u.s * u.AA)
# Some platform implementations of expm1() are buggy and Numpy uses
# them anyways--the bug is that on certain large inputs it returns
# NaN instead of INF like it should (it should only return NaN on a
# NaN input
# See https://github.com/astropy/astropy/issues/4171
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning)
_has_buggy_expm1 = np.isnan(np.expm1(1000)) or np.isnan(np.expm1(1e10))
class BlackBody1D(Fittable1DModel):
"""
One dimensional blackbody model.
Parameters
----------
temperature : :class:`~astropy.units.Quantity`
Blackbody temperature.
bolometric_flux : :class:`~astropy.units.Quantity`
The bolometric flux of the blackbody (i.e., the integral over the
spectral axis).
Notes
-----
Model formula:
.. math:: f(x) = \\pi B_{\\nu} f_{\\text{bolometric}} / (\\sigma T^{4})
Examples
--------
>>> from astropy.modeling import models
>>> from astropy import units as u
>>> bb = models.BlackBody1D()
>>> bb(6000 * u.AA) # doctest: +FLOAT_CMP
<Quantity 1.3585381201978953e-15 erg / (cm2 Hz s)>
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import BlackBody1D
from astropy.modeling.blackbody import FLAM
from astropy import units as u
from astropy.visualization import quantity_support
bb = BlackBody1D(temperature=5778*u.K)
wav = np.arange(1000, 110000) * u.AA
flux = bb(wav).to(FLAM, u.spectral_density(wav))
with quantity_support():
plt.figure()
plt.semilogx(wav, flux)
plt.axvline(bb.lambda_max.to(u.AA).value, ls='--')
plt.show()
"""
# We parametrize this model with a temperature and a bolometric flux. The
# bolometric flux is the integral of the model over the spectral axis. This
# is more useful than simply having an amplitude parameter.
temperature = Parameter(default=5000, min=0, unit=u.K)
bolometric_flux = Parameter(default=1, min=0, unit=u.erg / u.cm ** 2 / u.s)
# We allow values without units to be passed when evaluating the model, and
# in this case the input x values are assumed to be frequencies in Hz.
_input_units_allow_dimensionless = True
# We enable the spectral equivalency by default for the spectral axis
input_units_equivalencies = {'x': u.spectral()}
def evaluate(self, x, temperature, bolometric_flux):
"""Evaluate the model.
Parameters
----------
x : float, `~numpy.ndarray`, or `~astropy.units.Quantity`
Frequency at which to compute the blackbody. If no units are given,
this defaults to Hz.
temperature : float, `~numpy.ndarray`, or `~astropy.units.Quantity`
Temperature of the blackbody. If no units are given, this defaults
to Kelvin.
bolometric_flux : float, `~numpy.ndarray`, or `~astropy.units.Quantity`
Desired integral for the blackbody.
Returns
-------
y : number or ndarray
Blackbody spectrum. The units are determined from the units of
``bolometric_flux``.
"""
# We need to make sure that we attach units to the temperature if it
# doesn't have any units. We do this because even though blackbody_nu
# can take temperature values without units, the / temperature ** 4
# factor needs units to be defined.
if isinstance(temperature, u.Quantity):
temperature = temperature.to(u.K, equivalencies=u.temperature())
else:
temperature = u.Quantity(temperature, u.K)
# We normalize the returned blackbody so that the integral would be
# unity, and we then multiply by the bolometric flux. A normalized
# blackbody has f_nu = pi * B_nu / (sigma * T^4), which is what we
# calculate here. We convert to 1/Hz to make sure the units are
# simplified as much as possible, then we multiply by the bolometric
# flux to get the normalization right.
fnu = ((np.pi * u.sr * blackbody_nu(x, temperature) /
const.sigma_sb / temperature ** 4).to(1 / u.Hz) *
bolometric_flux)
# If the bolometric_flux parameter has no unit, we should drop the /Hz
# and return a unitless value. This occurs for instance during fitting,
# since we drop the units temporarily.
if hasattr(bolometric_flux, 'unit'):
return fnu
else:
return fnu.value
@property
def input_units(self):
# The input units are those of the 'x' value, which should always be
# Hz. Because we do this, and because input_units_allow_dimensionless
# is set to True, dimensionless values are assumed to be in Hz.
return {'x': u.Hz}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('temperature', u.K),
('bolometric_flux', outputs_unit['y'] * u.Hz)])
@property
def lambda_max(self):
"""Peak wavelength when the curve is expressed as power density."""
return const.b_wien / self.temperature
def blackbody_nu(in_x, temperature):
"""Calculate blackbody flux per steradian, :math:`B_{\\nu}(T)`.
.. note::
Use `numpy.errstate` to suppress Numpy warnings, if desired.
.. warning::
Output values might contain ``nan`` and ``inf``.
Parameters
----------
in_x : number, array-like, or `~astropy.units.Quantity`
Frequency, wavelength, or wave number.
If not a Quantity, it is assumed to be in Hz.
temperature : number, array-like, or `~astropy.units.Quantity`
Blackbody temperature.
If not a Quantity, it is assumed to be in Kelvin.
Returns
-------
flux : `~astropy.units.Quantity`
Blackbody monochromatic flux in
:math:`erg \\; cm^{-2} s^{-1} Hz^{-1} sr^{-1}`.
Raises
------
ValueError
Invalid temperature.
ZeroDivisionError
Wavelength is zero (when converting to frequency).
"""
# Convert to units for calculations, also force double precision
with u.add_enabled_equivalencies(u.spectral() + u.temperature()):
freq = u.Quantity(in_x, u.Hz, dtype=np.float64)
temp = u.Quantity(temperature, u.K, dtype=np.float64)
# Check if input values are physically possible
if np.any(temp < 0):
raise ValueError('Temperature should be positive: {0}'.format(temp))
if not np.all(np.isfinite(freq)) or np.any(freq <= 0):
warnings.warn('Input contains invalid wavelength/frequency value(s)',
AstropyUserWarning)
log_boltz = const.h * freq / (const.k_B * temp)
boltzm1 = np.expm1(log_boltz)
if _has_buggy_expm1:
# Replace incorrect nan results with infs--any result of 'nan' is
# incorrect unless the input (in log_boltz) happened to be nan to begin
# with. (As noted in #4393 ideally this would be replaced by a version
# of expm1 that doesn't have this bug, rather than fixing incorrect
# results after the fact...)
boltzm1_nans = np.isnan(boltzm1)
if np.any(boltzm1_nans):
if boltzm1.isscalar and not np.isnan(log_boltz):
boltzm1 = np.inf
else:
boltzm1[np.where(~np.isnan(log_boltz) & boltzm1_nans)] = np.inf
# Calculate blackbody flux
bb_nu = (2.0 * const.h * freq ** 3 / (const.c ** 2 * boltzm1))
flux = bb_nu.to(FNU, u.spectral_density(freq))
return flux / u.sr # Add per steradian to output flux unit
def blackbody_lambda(in_x, temperature):
"""Like :func:`blackbody_nu` but for :math:`B_{\\lambda}(T)`.
Parameters
----------
in_x : number, array-like, or `~astropy.units.Quantity`
Frequency, wavelength, or wave number.
If not a Quantity, it is assumed to be in Angstrom.
temperature : number, array-like, or `~astropy.units.Quantity`
Blackbody temperature.
If not a Quantity, it is assumed to be in Kelvin.
Returns
-------
flux : `~astropy.units.Quantity`
Blackbody monochromatic flux in
:math:`erg \\; cm^{-2} s^{-1} \\mathring{A}^{-1} sr^{-1}`.
"""
if getattr(in_x, 'unit', None) is None:
in_x = u.Quantity(in_x, u.AA)
bb_nu = blackbody_nu(in_x, temperature) * u.sr # Remove sr for conversion
flux = bb_nu.to(FLAM, u.spectral_density(in_x))
return flux / u.sr # Add per steradian to output flux unit
|
13676feda99e5156a71211c44f73a921cb448f7fbdd2f0a3355ebcc86b5f0e0d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Optimization algorithms used in `~astropy.modeling.fitting`.
"""
import warnings
import abc
import numpy as np
from astropy.utils.exceptions import AstropyUserWarning
__all__ = ["Optimization", "SLSQP", "Simplex"]
# Maximum number of iterations
DEFAULT_MAXITER = 100
# Step for the forward difference approximation of the Jacobian
DEFAULT_EPS = np.sqrt(np.finfo(float).eps)
# Default requested accuracy
DEFAULT_ACC = 1e-07
DEFAULT_BOUNDS = (-10 ** 12, 10 ** 12)
class Optimization(metaclass=abc.ABCMeta):
"""
Base class for optimizers.
Parameters
----------
opt_method : callable
Implements optimization method
Notes
-----
The base Optimizer does not support any constraints by default; individual
optimizers should explicitly set this list to the specific constraints
it supports.
"""
supported_constraints = []
def __init__(self, opt_method):
self._opt_method = opt_method
self._maxiter = DEFAULT_MAXITER
self._eps = DEFAULT_EPS
self._acc = DEFAULT_ACC
@property
def maxiter(self):
"""Maximum number of iterations"""
return self._maxiter
@maxiter.setter
def maxiter(self, val):
"""Set maxiter"""
self._maxiter = val
@property
def eps(self):
"""Step for the forward difference approximation of the Jacobian"""
return self._eps
@eps.setter
def eps(self, val):
"""Set eps value"""
self._eps = val
@property
def acc(self):
"""Requested accuracy"""
return self._acc
@acc.setter
def acc(self, val):
"""Set accuracy"""
self._acc = val
def __repr__(self):
fmt = "{0}()".format(self.__class__.__name__)
return fmt
@property
def opt_method(self):
return self._opt_method
@abc.abstractmethod
def __call__(self):
raise NotImplementedError("Subclasses should implement this method")
class SLSQP(Optimization):
"""
Sequential Least Squares Programming optimization algorithm.
The algorithm is described in [1]_. It supports tied and fixed
parameters, as well as bounded constraints. Uses
`scipy.optimize.fmin_slsqp`.
References
----------
.. [1] http://www.netlib.org/toms/733
"""
supported_constraints = ['bounds', 'eqcons', 'ineqcons', 'fixed', 'tied']
def __init__(self):
from scipy.optimize import fmin_slsqp
super().__init__(fmin_slsqp)
self.fit_info = {
'final_func_val': None,
'numiter': None,
'exit_mode': None,
'message': None
}
def __call__(self, objfunc, initval, fargs, **kwargs):
"""
Run the solver.
Parameters
----------
objfunc : callable
objection function
initval : iterable
initial guess for the parameter values
fargs : tuple
other arguments to be passed to the statistic function
kwargs : dict
other keyword arguments to be passed to the solver
"""
kwargs['iter'] = kwargs.pop('maxiter', self._maxiter)
if 'epsilon' not in kwargs:
kwargs['epsilon'] = self._eps
if 'acc' not in kwargs:
kwargs['acc'] = self._acc
# Get the verbosity level
disp = kwargs.pop('verblevel', None)
# set the values of constraints to match the requirements of fmin_slsqp
model = fargs[0]
pars = [getattr(model, name) for name in model.param_names]
bounds = [par.bounds for par in pars if not (par.fixed or par.tied)]
bounds = np.asarray(bounds)
for i in bounds:
if i[0] is None:
i[0] = DEFAULT_BOUNDS[0]
if i[1] is None:
i[1] = DEFAULT_BOUNDS[1]
# older versions of scipy require this array to be float
bounds = np.asarray(bounds, dtype=float)
eqcons = np.array(model.eqcons)
ineqcons = np.array(model.ineqcons)
fitparams, final_func_val, numiter, exit_mode, mess = self.opt_method(
objfunc, initval, args=fargs, full_output=True, disp=disp,
bounds=bounds, eqcons=eqcons, ieqcons=ineqcons,
**kwargs)
self.fit_info['final_func_val'] = final_func_val
self.fit_info['numiter'] = numiter
self.fit_info['exit_mode'] = exit_mode
self.fit_info['message'] = mess
if exit_mode != 0:
warnings.warn("The fit may be unsuccessful; check "
"fit_info['message'] for more information.",
AstropyUserWarning)
return fitparams, self.fit_info
class Simplex(Optimization):
"""
Neald-Mead (downhill simplex) algorithm.
This algorithm [1]_ only uses function values, not derivatives.
Uses `scipy.optimize.fmin`.
References
----------
.. [1] Nelder, J.A. and Mead, R. (1965), "A simplex method for function
minimization", The Computer Journal, 7, pp. 308-313
"""
supported_constraints = ['bounds', 'fixed', 'tied']
def __init__(self):
from scipy.optimize import fmin as simplex
super().__init__(simplex)
self.fit_info = {
'final_func_val': None,
'numiter': None,
'exit_mode': None,
'num_function_calls': None
}
def __call__(self, objfunc, initval, fargs, **kwargs):
"""
Run the solver.
Parameters
----------
objfunc : callable
objection function
initval : iterable
initial guess for the parameter values
fargs : tuple
other arguments to be passed to the statistic function
kwargs : dict
other keyword arguments to be passed to the solver
"""
if 'maxiter' not in kwargs:
kwargs['maxiter'] = self._maxiter
if 'acc' in kwargs:
self._acc = kwargs['acc']
kwargs.pop('acc')
if 'xtol' in kwargs:
self._acc = kwargs['xtol']
kwargs.pop('xtol')
# Get the verbosity level
disp = kwargs.pop('verblevel', None)
fitparams, final_func_val, numiter, funcalls, exit_mode = self.opt_method(
objfunc, initval, args=fargs, xtol=self._acc, disp=disp,
full_output=True, **kwargs)
self.fit_info['final_func_val'] = final_func_val
self.fit_info['numiter'] = numiter
self.fit_info['exit_mode'] = exit_mode
self.fit_info['num_function_calls'] = funcalls
if self.fit_info['exit_mode'] == 1:
warnings.warn("The fit may be unsuccessful; "
"Maximum number of function evaluations reached.",
AstropyUserWarning)
if self.fit_info['exit_mode'] == 2:
warnings.warn("The fit may be unsuccessful; "
"Maximum number of iterations reached.",
AstropyUserWarning)
return fitparams, self.fit_info
|
73a618fcfe30190732bd1f58110b865d361cdcf42afd4bf573f357fe56063c4a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Implements rotations, including spherical rotations as defined in WCS Paper II
[1]_
`RotateNative2Celestial` and `RotateCelestial2Native` follow the convention in
WCS Paper II to rotate to/from a native sphere and the celestial sphere.
The implementation uses `EulerAngleRotation`. The model parameters are
three angles: the longitude (``lon``) and latitude (``lat``) of the fiducial point
in the celestial system (``CRVAL`` keywords in FITS), and the longitude of the celestial
pole in the native system (``lon_pole``). The Euler angles are ``lon+90``, ``90-lat``
and ``-(lon_pole-90)``.
References
----------
.. [1] Calabretta, M.R., Greisen, E.W., 2002, A&A, 395, 1077 (Paper II)
"""
import math
import numpy as np
from .core import Model
from .parameters import Parameter
from astropy.coordinates.matrix_utilities import rotation_matrix, matrix_product
from astropy import units as u
from astropy.utils.decorators import deprecated
from .utils import _to_radian, _to_orig_unit
__all__ = ['RotateCelestial2Native', 'RotateNative2Celestial', 'Rotation2D',
'EulerAngleRotation']
class _EulerRotation:
"""
Base class which does the actual computation.
"""
_separable = False
def _create_matrix(self, phi, theta, psi, axes_order):
matrices = []
for angle, axis in zip([phi, theta, psi], axes_order):
if isinstance(angle, u.Quantity):
angle = angle.value
angle = angle.item()
matrices.append(rotation_matrix(angle, axis, unit=u.rad))
result = matrix_product(*matrices[::-1])
return result
@staticmethod
def spherical2cartesian(alpha, delta):
alpha = np.deg2rad(alpha)
delta = np.deg2rad(delta)
x = np.cos(alpha) * np.cos(delta)
y = np.cos(delta) * np.sin(alpha)
z = np.sin(delta)
return np.array([x, y, z])
@staticmethod
def cartesian2spherical(x, y, z):
h = np.hypot(x, y)
alpha = np.rad2deg(np.arctan2(y, x))
delta = np.rad2deg(np.arctan2(z, h))
return alpha, delta
@deprecated(2.0)
@staticmethod
def rotation_matrix_from_angle(angle):
"""
Clockwise rotation matrix.
Parameters
----------
angle : float
Rotation angle in radians.
"""
return np.array([[math.cos(angle), math.sin(angle)],
[-math.sin(angle), math.cos(angle)]])
def evaluate(self, alpha, delta, phi, theta, psi, axes_order):
shape = None
if isinstance(alpha, np.ndarray) and alpha.ndim == 2:
alpha = alpha.flatten()
delta = delta.flatten()
shape = alpha.shape
inp = self.spherical2cartesian(alpha, delta)
matrix = self._create_matrix(phi, theta, psi, axes_order)
result = np.dot(matrix, inp)
a, b = self.cartesian2spherical(*result)
if shape is not None:
a.shape = shape
b.shape = shape
return a, b
_input_units_strict = True
_input_units_allow_dimensionless = True
@property
def input_units(self):
""" Input units. """
return {'alpha': u.deg, 'delta': u.deg}
@property
def return_units(self):
""" Output units. """
return {'alpha': u.deg, 'delta': u.deg}
class EulerAngleRotation(_EulerRotation, Model):
"""
Implements Euler angle intrinsic rotations.
Rotates one coordinate system into another (fixed) coordinate system.
All coordinate systems are right-handed. The sign of the angles is
determined by the right-hand rule..
Parameters
----------
phi, theta, psi : float or `~astropy.units.Quantity`
"proper" Euler angles in deg.
If floats, they should be in deg.
axes_order : str
A 3 character string, a combination of 'x', 'y' and 'z',
where each character denotes an axis in 3D space.
"""
inputs = ('alpha', 'delta')
outputs = ('alpha', 'delta')
phi = Parameter(default=0, getter=_to_orig_unit, setter=_to_radian)
theta = Parameter(default=0, getter=_to_orig_unit, setter=_to_radian)
psi = Parameter(default=0, getter=_to_orig_unit, setter=_to_radian)
def __init__(self, phi, theta, psi, axes_order, **kwargs):
self.axes = ['x', 'y', 'z']
if len(axes_order) != 3:
raise TypeError(
"Expected axes_order to be a character sequence of length 3,"
"got {0}".format(axes_order))
unrecognized = set(axes_order).difference(self.axes)
if unrecognized:
raise ValueError("Unrecognized axis label {0}; "
"should be one of {1} ".format(unrecognized, self.axes))
self.axes_order = axes_order
qs = [isinstance(par, u.Quantity) for par in [phi, theta, psi]]
if any(qs) and not all(qs):
raise TypeError("All parameters should be of the same type - float or Quantity.")
super().__init__(phi=phi, theta=theta, psi=psi, **kwargs)
def inverse(self):
return self.__class__(phi=-self.psi,
theta=-self.theta,
psi=-self.phi,
axes_order=self.axes_order[::-1])
def evaluate(self, alpha, delta, phi, theta, psi):
a, b = super().evaluate(alpha, delta, phi, theta, psi, self.axes_order)
return a, b
class _SkyRotation(_EulerRotation, Model):
"""
Base class for RotateNative2Celestial and RotateCelestial2Native.
"""
lon = Parameter(default=0, getter=_to_orig_unit, setter=_to_radian)
lat = Parameter(default=0, getter=_to_orig_unit, setter=_to_radian)
lon_pole = Parameter(default=0, getter=_to_orig_unit, setter=_to_radian)
def __init__(self, lon, lat, lon_pole, **kwargs):
qs = [isinstance(par, u.Quantity) for par in [lon, lat, lon_pole]]
if any(qs) and not all(qs):
raise TypeError("All parameters should be of the same type - float or Quantity.")
super().__init__(lon, lat, lon_pole, **kwargs)
self.axes_order = 'zxz'
def _evaluate(self, phi, theta, lon, lat, lon_pole):
alpha, delta = super().evaluate(phi, theta, lon, lat, lon_pole,
self.axes_order)
mask = alpha < 0
if isinstance(mask, np.ndarray):
alpha[mask] += 360
else:
alpha += 360
return alpha, delta
class RotateNative2Celestial(_SkyRotation):
"""
Transform from Native to Celestial Spherical Coordinates.
Parameters
----------
lon : float or or `~astropy.units.Quantity`
Celestial longitude of the fiducial point.
lat : float or or `~astropy.units.Quantity`
Celestial latitude of the fiducial point.
lon_pole : float or or `~astropy.units.Quantity`
Longitude of the celestial pole in the native system.
Notes
-----
If ``lon``, ``lat`` and ``lon_pole`` are numerical values they should be in units of deg.
"""
#: Inputs are angles on the native sphere
inputs = ('phi_N', 'theta_N')
#: Outputs are angles on the celestial sphere
outputs = ('alpha_C', 'delta_C')
@property
def input_units(self):
""" Input units. """
return {'phi_N': u.deg, 'theta_N': u.deg}
@property
def return_units(self):
""" Output units. """
return {'alpha_C': u.deg, 'delta_C': u.deg}
def __init__(self, lon, lat, lon_pole, **kwargs):
super().__init__(lon, lat, lon_pole, **kwargs)
def evaluate(self, phi_N, theta_N, lon, lat, lon_pole):
"""
Parameters
----------
phi_N, theta_N : float (deg) or `~astropy.units.Quantity`
Angles in the Native coordinate system.
lon, lat, lon_pole : float (in deg) or `~astropy.units.Quantity`
Parameter values when the model was initialized.
Returns
-------
alpha_C, delta_C : float (deg) or `~astropy.units.Quantity`
Angles on the Celestial sphere.
"""
# The values are in radians since they have already been through the setter.
if isinstance(lon, u.Quantity):
lon = lon.value
lat = lat.value
lon_pole = lon_pole.value
# Convert to Euler angles
phi = lon_pole - np.pi / 2
theta = - (np.pi / 2 - lat)
psi = -(np.pi / 2 + lon)
alpha_C, delta_C = super()._evaluate(phi_N, theta_N, phi, theta, psi)
return alpha_C, delta_C
@property
def inverse(self):
# convert to angles on the celestial sphere
return RotateCelestial2Native(self.lon, self.lat, self.lon_pole)
class RotateCelestial2Native(_SkyRotation):
"""
Transform from Celestial to Native Spherical Coordinates.
Parameters
----------
lon : float or or `~astropy.units.Quantity`
Celestial longitude of the fiducial point.
lat : float or or `~astropy.units.Quantity`
Celestial latitude of the fiducial point.
lon_pole : float or or `~astropy.units.Quantity`
Longitude of the celestial pole in the native system.
Notes
-----
If ``lon``, ``lat`` and ``lon_pole`` are numerical values they should be in units of deg.
"""
#: Inputs are angles on the celestial sphere
inputs = ('alpha_C', 'delta_C')
#: Outputs are angles on the native sphere
outputs = ('phi_N', 'theta_N')
@property
def input_units(self):
""" Input units. """
return {'alpha_C': u.deg, 'delta_C': u.deg}
@property
def return_units(self):
""" Output units. """
return {'phi_N': u.deg, 'theta_N': u.deg}
def __init__(self, lon, lat, lon_pole, **kwargs):
super().__init__(lon, lat, lon_pole, **kwargs)
def evaluate(self, alpha_C, delta_C, lon, lat, lon_pole):
"""
Parameters
----------
alpha_C, delta_C : float (deg) or `~astropy.units.Quantity`
Angles in the Celestial coordinate frame.
lon, lat, lon_pole : float (deg) or `~astropy.units.Quantity`
Parameter values when the model was initialized.
Returns
-------
phi_N, theta_N : float (deg) or `~astropy.units.Quantity`
Angles on the Native sphere.
"""
if isinstance(lon, u.Quantity):
lon = lon.value
lat = lat.value
lon_pole = lon_pole.value
# Convert to Euler angles
phi = (np.pi / 2 + lon)
theta = (np.pi / 2 - lat)
psi = -(lon_pole - np.pi / 2)
phi_N, theta_N = super()._evaluate(alpha_C, delta_C, phi, theta, psi)
return phi_N, theta_N
@property
def inverse(self):
return RotateNative2Celestial(self.lon, self.lat, self.lon_pole)
class Rotation2D(Model):
"""
Perform a 2D rotation given an angle.
Positive angles represent a counter-clockwise rotation and vice-versa.
Parameters
----------
angle : float or `~astropy.units.Quantity`
Angle of rotation (if float it should be in deg).
"""
inputs = ('x', 'y')
outputs = ('x', 'y')
_separable = False
angle = Parameter(default=0.0, getter=_to_orig_unit, setter=_to_radian)
@property
def inverse(self):
"""Inverse rotation."""
return self.__class__(angle=-self.angle)
@classmethod
def evaluate(cls, x, y, angle):
"""
Rotate (x, y) about ``angle``.
Parameters
----------
x, y : ndarray-like
Input quantities
angle : float (deg) or `~astropy.units.Quantity`
Angle of rotations.
"""
if x.shape != y.shape:
raise ValueError("Expected input arrays to have the same shape")
# If one argument has units, enforce they both have units and they are compatible.
x_unit = getattr(x, 'unit', None)
y_unit = getattr(y, 'unit', None)
has_units = x_unit is not None and y_unit is not None
if x_unit != y_unit:
if has_units and y_unit.is_equivalent(x_unit):
y = y.to(x_unit)
y_unit = x_unit
else:
raise u.UnitsError("x and y must have compatible units")
# Note: If the original shape was () (an array scalar) convert to a
# 1-element 1-D array on output for consistency with most other models
orig_shape = x.shape or (1,)
inarr = np.array([x.flatten(), y.flatten()])
if isinstance(angle, u.Quantity):
angle = angle.to_value(u.rad)
result = np.dot(cls._compute_matrix(angle), inarr)
x, y = result[0], result[1]
x.shape = y.shape = orig_shape
if has_units:
return u.Quantity(x, unit=x_unit), u.Quantity(y, unit=y_unit)
else:
return x, y
@staticmethod
def _compute_matrix(angle):
return np.array([[math.cos(angle), -math.sin(angle)],
[math.sin(angle), math.cos(angle)]],
dtype=np.float64)
|
5430ec4e328b32ed8fc6fba01c572105931718c7adf4dfe933ad5040084fbd27 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
from os.path import join
from distutils.core import Extension
from distutils import log
from astropy_helpers import setup_helpers, utils
from astropy_helpers.version_helpers import get_pkg_version_module
wcs_setup_package = utils.import_file(join('astropy', 'wcs', 'setup_package.py'))
MODELING_ROOT = os.path.relpath(os.path.dirname(__file__))
MODELING_SRC = join(MODELING_ROOT, 'src')
SRC_FILES = [join(MODELING_SRC, 'projections.c.templ'),
__file__]
GEN_FILES = [join(MODELING_SRC, 'projections.c')]
# This defines the set of projection functions that we want to wrap.
# The key is the projection name, and the value is the number of
# parameters.
# (These are in the order that the appear in the WCS coordinate
# systems paper).
projections = {
'azp': 2,
'szp': 3,
'tan': 0,
'stg': 0,
'sin': 2,
'arc': 0,
'zea': 0,
'air': 1,
'cyp': 2,
'cea': 1,
'mer': 0,
'sfl': 0,
'par': 0,
'mol': 0,
'ait': 0,
'cop': 2,
'coe': 2,
'cod': 2,
'coo': 2,
'bon': 1,
'pco': 0,
'tsc': 0,
'csc': 0,
'qsc': 0,
'hpx': 2,
'xph': 0,
}
def pre_build_py_hook(cmd_obj):
preprocess_source()
def pre_build_ext_hook(cmd_obj):
preprocess_source()
def pre_sdist_hook(cmd_obj):
preprocess_source()
def preprocess_source():
# TODO: Move this to setup_helpers
# Generating the wcslib wrappers should only be done if needed. This also
# ensures that it is not done for any release tarball since those will
# include core.py and core.c.
if all(os.path.exists(filename) for filename in GEN_FILES):
# Determine modification times
src_mtime = max(os.path.getmtime(filename) for filename in SRC_FILES)
gen_mtime = min(os.path.getmtime(filename) for filename in GEN_FILES)
version = get_pkg_version_module('astropy')
if gen_mtime > src_mtime:
# If generated source is recent enough, don't update
return
elif version.release:
# or, if we're on a release, issue a warning, but go ahead and use
# the wrappers anyway
log.warn('WARNING: The autogenerated wrappers in '
'astropy.modeling._projections seem to be older '
'than the source templates used to create '
'them. Because this is a release version we will '
'use them anyway, but this might be a sign of '
'some sort of version mismatch or other '
'tampering. Or it might just mean you moved '
'some files around or otherwise accidentally '
'changed timestamps.')
return
# otherwise rebuild the autogenerated files
# If jinja2 isn't present, then print a warning and use existing files
try:
import jinja2 # pylint: disable=W0611
except ImportError:
log.warn("WARNING: jinja2 could not be imported, so the existing "
"modeling _projections.c file will be used")
return
from jinja2 import Environment, FileSystemLoader
# Prepare the jinja2 templating environment
env = Environment(loader=FileSystemLoader(MODELING_SRC))
c_in = env.get_template('projections.c.templ')
c_out = c_in.render(projections=projections)
with open(join(MODELING_SRC, 'projections.c'), 'w') as fd:
fd.write(c_out)
def get_extensions():
wcslib_files = [ # List of wcslib files to compile
'prj.c',
'wcserr.c',
'wcsprintf.c',
'wcsutil.c'
]
wcslib_config_paths = [
join(MODELING_SRC, 'wcsconfig.h')
]
cfg = setup_helpers.DistutilsExtensionArgs()
wcs_setup_package.get_wcslib_cfg(cfg, wcslib_files, wcslib_config_paths)
cfg['include_dirs'].append(MODELING_SRC)
astropy_files = [ # List of astropy.modeling files to compile
'projections.c'
]
cfg['sources'].extend(join(MODELING_SRC, x) for x in astropy_files)
cfg['sources'] = [str(x) for x in cfg['sources']]
cfg = dict((str(key), val) for key, val in cfg.items())
return [Extension(str('astropy.modeling._projections'), **cfg)]
|
fd693409195f2efc647e91dffedba0bc2f43c2505c52952574dd09a7aad6f50e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Functions to determine if a model is separable, i.e.
if the model outputs are independent.
It analyzes ``n_inputs``, ``n_outputs`` and the operators
in a compound model by stepping through the transforms
and creating a ``coord_matrix`` of shape (``n_outputs``, ``n_inputs``).
Each modeling operator is represented by a function which
takes two simple models (or two ``coord_matrix`` arrays) and
returns an array of shape (``n_outputs``, ``n_inputs``).
"""
import numpy as np
from .core import Model, _CompoundModel, ModelDefinitionError
from .mappings import Mapping
__all__ = ["is_separable", "separability_matrix"]
def is_separable(transform):
"""
A separability test for the outputs of a transform.
Parameters
----------
transform : `~astropy.modeling.core.Model`
A (compound) model.
Returns
-------
is_separable : ndarray
A boolean array with size ``transform.n_outputs`` where
each element indicates whether the output is independent
and the result of a separable transform.
Examples
--------
>>> from astropy.modeling.models import Shift, Scale, Rotation2D, Polynomial2D
>>> is_separable(Shift(1) & Shift(2) | Scale(1) & Scale(2))
array([ True, True]...)
>>> is_separable(Shift(1) & Shift(2) | Rotation2D(2))
array([False, False]...)
>>> is_separable(Shift(1) & Shift(2) | Mapping([0, 1, 0, 1]) | \
Polynomial2D(1) & Polynomial2D(2))
array([False, False]...)
>>> is_separable(Shift(1) & Shift(2) | Mapping([0, 1, 0, 1]))
array([ True, True, True, True]...)
"""
if transform.n_inputs == 1 and transform.n_outputs > 1:
is_separable = np.array([False] * transform.n_outputs).T
return is_separable
separable_matrix = _separable(transform)
is_separable = separable_matrix.sum(1)
is_separable = np.where(is_separable != 1, False, True)
return is_separable
def separability_matrix(transform):
"""
Compute the correlation between outputs and inputs.
Parameters
----------
transform : `~astropy.modeling.core.Model`
A (compound) model.
Returns
-------
separable_matrix : ndarray
A boolean correlation matrix of shape (n_outputs, n_inputs).
Indicates the dependence of outputs on inputs. For completely
independent outputs, the diagonal elements are True and
off-diagonal elements are False.
Examples
--------
>>> from astropy.modeling.models import Shift, Scale, Rotation2D, Polynomial2D
>>> separability_matrix(Shift(1) & Shift(2) | Scale(1) & Scale(2))
array([[ True, False], [False, True]]...)
>>> separability_matrix(Shift(1) & Shift(2) | Rotation2D(2))
array([[ True, True], [ True, True]]...)
>>> separability_matrix(Shift(1) & Shift(2) | Mapping([0, 1, 0, 1]) | \
Polynomial2D(1) & Polynomial2D(2))
array([[ True, True], [ True, True]]...)
>>> separability_matrix(Shift(1) & Shift(2) | Mapping([0, 1, 0, 1]))
array([[ True, False], [False, True], [ True, False], [False, True]]...)
"""
if transform.n_inputs == 1 and transform.n_outputs > 1:
return np.ones((transform.n_outputs, transform.n_inputs),
dtype=np.bool)
separable_matrix = _separable(transform)
separable_matrix = np.where(separable_matrix != 0, True, False)
return separable_matrix
def _compute_n_outputs(left, right):
"""
Compute the number of outputs of two models.
The two models are the left and right model to an operation in
the expression tree of a compound model.
Parameters
----------
left, right : `astropy.modeling.Model` or ndarray
If input is of an array, it is the output of `coord_matrix`.
"""
if isinstance(left, Model):
lnout = left.n_outputs
else:
lnout = left.shape[0]
if isinstance(right, Model):
rnout = right.n_outputs
else:
rnout = right.shape[0]
noutp = lnout + rnout
return noutp
def _arith_oper(left, right):
"""
Function corresponding to one of the arithmetic operators
['+', '-'. '*', '/', '**'].
This always returns a nonseparable output.
Parameters
----------
left, right : `astropy.modeling.Model` or ndarray
If input is of an array, it is the output of `coord_matrix`.
Returns
-------
result : ndarray
Result from this operation.
"""
# models have the same number of inputs and outputs
def _n_inputs_outputs(input):
if isinstance(input, Model):
n_outputs, n_inputs = input.n_outputs, input.n_inputs
else:
n_outputs, n_inputs = input.shape
return n_inputs, n_outputs
left_inputs, left_outputs = _n_inputs_outputs(left)
right_inputs, right_outputs = _n_inputs_outputs(right)
if left_inputs != right_inputs or left_outputs != right_outputs:
raise ModelDefinitionError(
"Unsupported operands for arithmetic operator: left (n_inputs={0}, "
"n_outputs={1}) and right (n_inputs={2}, n_outputs={3}); "
"models must have the same n_inputs and the same "
"n_outputs for this operator.".format(
left_inputs, left_outputs, right_inputs, right_outputs))
result = np.ones((left_outputs, left_inputs))
return result
def _coord_matrix(model, pos, noutp):
"""
Create an array representing inputs and outputs of a simple model.
The array has a shape (noutp, model.n_inputs).
Parameters
----------
model : `astropy.modeling.Model`
model
pos : str
Position of this model in the expression tree.
One of ['left', 'right'].
noutp : int
Number of outputs of the compound model of which the input model
is a left or right child.
"""
if isinstance(model, Mapping):
axes = []
for i in model.mapping:
axis = np.zeros((model.n_inputs,))
axis[i] = 1
axes.append(axis)
m = np.vstack(axes)
mat = np.zeros((noutp, model.n_inputs))
if pos == 'left':
mat[: model.n_outputs, :model.n_inputs] = m
else:
mat[-model.n_outputs:, -model.n_inputs:] = m
return mat
if not model.separable:
# this does not work for more than 2 coordinates
mat = np.zeros((noutp, model.n_inputs))
if pos == 'left':
mat[:model.n_outputs, : model.n_inputs] = 1
else:
mat[-model.n_outputs:, -model.n_inputs:] = 1
else:
mat = np.zeros((noutp, model.n_inputs))
for i in range(model.n_inputs):
mat[i, i] = 1
if pos == 'right':
mat = np.roll(mat, (noutp - model.n_outputs))
return mat
def _cstack(left, right):
"""
Function corresponding to '&' operation.
Parameters
----------
left, right : `astropy.modeling.Model` or ndarray
If input is of an array, it is the output of `coord_matrix`.
Returns
-------
result : ndarray
Result from this operation.
"""
noutp = _compute_n_outputs(left, right)
if isinstance(left, Model):
cleft = _coord_matrix(left, 'left', noutp)
else:
cleft = np.zeros((noutp, left.shape[1]))
cleft[: left.shape[0], : left.shape[1]] = left
if isinstance(right, Model):
cright = _coord_matrix(right, 'right', noutp)
else:
cright = np.zeros((noutp, right.shape[1]))
cright[-right.shape[0]:, -right.shape[1]:] = 1
return np.hstack([cleft, cright])
def _cdot(left, right):
"""
Function corresponding to "|" operation.
Parameters
----------
left, right : `astropy.modeling.Model` or ndarray
If input is of an array, it is the output of `coord_matrix`.
Returns
-------
result : ndarray
Result from this operation.
"""
left, right = right, left
def _n_inputs_outputs(input, position):
"""
Return ``n_inputs``, ``n_outputs`` for a model or coord_matrix.
"""
if isinstance(input, Model):
coords = _coord_matrix(input, position, input.n_outputs)
else:
coords = input
return coords
cleft = _n_inputs_outputs(left, 'left')
cright = _n_inputs_outputs(right, 'right')
try:
result = np.dot(cleft, cright)
except ValueError:
raise ModelDefinitionError(
'Models cannot be combined with the "|" operator; '
'left coord_matrix is {0}, right coord_matrix is {1}'.format(
cright, cleft))
return result
def _separable(transform):
"""
Calculate the separability of outputs.
Parameters
----------
transform : `astropy.modeling.Model`
A transform (usually a compound model).
Returns
-------
is_separable : ndarray of dtype np.bool
An array of shape (transform.n_outputs,) of boolean type
Each element represents the separablity of the corresponding output.
"""
if isinstance(transform, _CompoundModel):
is_separable = transform._tree.evaluate(_operators)
elif isinstance(transform, Model):
is_separable = _coord_matrix(transform, 'left', transform.n_outputs)
return is_separable
# Maps modeling operators to a function computing and represents the
# relationship of axes as an array of 0-es and 1-s
_operators = {'&': _cstack, '|': _cdot, '+': _arith_oper, '-': _arith_oper,
'*': _arith_oper, '/': _arith_oper, '**': _arith_oper}
|
385b9a625c19dc3c9aed80b57a3d808c30a375841372eb82d3ad382714cf995b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module provides utility functions for the models package
"""
from collections import deque
from collections.abc import MutableMapping
from inspect import signature
import numpy as np
from astropy.utils import isiterable, check_broadcast
from astropy.utils.compat import NUMPY_LT_1_14
from astropy import units as u
__all__ = ['ExpressionTree', 'AliasDict', 'check_broadcast',
'poly_map_domain', 'comb', 'ellipse_extent']
class ExpressionTree:
__slots__ = ['left', 'right', 'value', 'inputs', 'outputs']
def __init__(self, value, left=None, right=None, inputs=None, outputs=None):
self.value = value
self.inputs = inputs
self.outputs = outputs
self.left = left
# Two subtrees can't be the same *object* or else traverse_postorder
# breaks, so we just always copy the right subtree to subvert that.
if right is not None and left is right:
right = right.copy()
self.right = right
def __getstate__(self):
# For some reason the default pickle protocol on Python 2 does not just
# do this. On Python 3 it's not a problem.
return dict((slot, getattr(self, slot)) for slot in self.__slots__)
def __setstate__(self, state):
for slot, value in state.items():
setattr(self, slot, value)
@staticmethod
def _recursive_lookup(branch, adict, key):
if isinstance(branch, ExpressionTree):
return adict[key]
else:
return branch, key
@property
def inputs_map(self):
"""
Map the names of the inputs to this ExpressionTree to the inputs to the leaf models.
"""
inputs_map = {}
if not isinstance(self.value, str): # If we don't have an operator the mapping is trivial
return {inp: (self.value, inp) for inp in self.inputs}
elif self.value == '|':
l_inputs_map = self.left.inputs_map
for inp in self.inputs:
m, inp2 = self._recursive_lookup(self.left, l_inputs_map, inp)
inputs_map[inp] = m, inp2
elif self.value == '&':
l_inputs_map = self.left.inputs_map
r_inputs_map = self.right.inputs_map
for i, inp in enumerate(self.inputs):
if i < len(self.left.inputs): # Get from left
m, inp2 = self._recursive_lookup(self.left,
l_inputs_map,
self.left.inputs[i])
inputs_map[inp] = m, inp2
else: # Get from right
m, inp2 = self._recursive_lookup(self.right,
r_inputs_map,
self.right.inputs[i - len(self.left.inputs)])
inputs_map[inp] = m, inp2
else:
l_inputs_map = self.left.inputs_map
for inp in self.left.inputs:
m, inp2 = self._recursive_lookup(self.left, l_inputs_map, inp)
inputs_map[inp] = m, inp2
return inputs_map
@property
def outputs_map(self):
"""
Map the names of the outputs to this ExpressionTree to the outputs to the leaf models.
"""
outputs_map = {}
if not isinstance(self.value, str): # If we don't have an operator the mapping is trivial
return {out: (self.value, out) for out in self.outputs}
elif self.value == '|':
r_outputs_map = self.right.outputs_map
for out in self.outputs:
m, out2 = self._recursive_lookup(self.right, r_outputs_map, out)
outputs_map[out] = m, out2
elif self.value == '&':
r_outputs_map = self.right.outputs_map
l_outputs_map = self.left.outputs_map
for i, out in enumerate(self.outputs):
if i < len(self.left.outputs): # Get from left
m, out2 = self._recursive_lookup(self.left,
l_outputs_map,
self.left.outputs[i])
outputs_map[out] = m, out2
else: # Get from right
m, out2 = self._recursive_lookup(self.right,
r_outputs_map,
self.right.outputs[i - len(self.left.outputs)])
outputs_map[out] = m, out2
else:
l_outputs_map = self.left.outputs_map
for out in self.left.outputs:
m, out2 = self._recursive_lookup(self.left, l_outputs_map, out)
outputs_map[out] = m, out2
return outputs_map
@property
def isleaf(self):
return self.left is None and self.right is None
def traverse_preorder(self):
stack = deque([self])
while stack:
node = stack.pop()
yield node
if node.right is not None:
stack.append(node.right)
if node.left is not None:
stack.append(node.left)
def traverse_inorder(self):
stack = deque()
node = self
while stack or node is not None:
if node is not None:
stack.append(node)
node = node.left
else:
node = stack.pop()
yield node
node = node.right
def traverse_postorder(self):
stack = deque([self])
last = None
while stack:
node = stack[-1]
if last is None or node is last.left or node is last.right:
if node.left is not None:
stack.append(node.left)
elif node.right is not None:
stack.append(node.right)
elif node.left is last and node.right is not None:
stack.append(node.right)
else:
yield stack.pop()
last = node
def evaluate(self, operators, getter=None, start=0, stop=None):
"""Evaluate the expression represented by this tree.
``Operators`` should be a dictionary mapping operator names ('tensor',
'product', etc.) to a function that implements that operator for the
correct number of operands.
If given, ``getter`` is a function evaluated on each *leaf* node's
value before applying the operator between them. This could be used,
for example, to operate on an attribute of the node values rather than
directly on the node values. The ``getter`` is passed both the index
of the leaf (a count starting at 0 that is incremented after each leaf
is found) and the leaf node itself.
The ``start`` and ``stop`` arguments allow evaluating a sub-expression
within the expression tree.
TODO: Document this better.
"""
stack = deque()
if getter is None:
getter = lambda idx, value: value
if start is None:
start = 0
leaf_idx = 0
for node in self.traverse_postorder():
if node.isleaf:
# For a "tree" containing just a single operator at the root
# Also push the index of this leaf onto the stack, which will
# prove useful for evaluating subexpressions
stack.append((getter(leaf_idx, node.value), leaf_idx))
leaf_idx += 1
else:
operator = operators[node.value]
if len(stack) < 2:
# Skip this operator if there are not enough operands on
# the stack; this can happen if some operands were skipped
# when evaluating a sub-expression
continue
right = stack.pop()
left = stack.pop()
operands = []
for operand in (left, right):
# idx is the leaf index; -1 if not a leaf node
if operand[-1] == -1:
operands.append(operand)
else:
operand, idx = operand
if start <= idx and (stop is None or idx < stop):
operands.append((operand, idx))
if len(operands) == 2:
# evaluate the operator with the given operands and place
# the result on the stack (with -1 for the "leaf index"
# since this result is not a leaf node
left, right = operands
stack.append((operator(left[0], right[0]), -1))
elif len(operands) == 0:
# Just push the left one back on the stack
# TODO: Explain and/or refactor this better
# This is here because even if both operands were "skipped"
# due to being outside the (start, stop) range, we've only
# skipped one operator. But there should be at least 2
# operators involving these operands, so we push the one
# from the left back onto the stack so that the next
# operator will be skipped as well. Should probably come
# up with an easier to follow way to write this algorithm
stack.append(left)
else:
# one or more of the operands was not included in the
# sub-expression slice, so don't evaluate the operator;
# instead place left over operands (if any) back on the
# stack for later use
stack.extend(operands)
return stack.pop()[0]
def copy(self):
# Hopefully this won't blow the stack for any practical case; if such a
# case arises that this won't work then I suppose we can find an
# iterative approach.
children = []
for child in (self.left, self.right):
if isinstance(child, ExpressionTree):
children.append(child.copy())
else:
children.append(child)
return self.__class__(self.value, left=children[0], right=children[1])
def format_expression(self, operator_precedence, format_leaf=None):
leaf_idx = 0
operands = deque()
if format_leaf is None:
format_leaf = lambda i, l: '[{0}]'.format(i)
for node in self.traverse_postorder():
if node.isleaf:
operands.append(format_leaf(leaf_idx, node))
leaf_idx += 1
continue
oper_order = operator_precedence[node.value]
right = operands.pop()
left = operands.pop()
if (node.left is not None and not node.left.isleaf and
operator_precedence[node.left.value] < oper_order):
left = '({0})'.format(left)
if (node.right is not None and not node.right.isleaf and
operator_precedence[node.right.value] < oper_order):
right = '({0})'.format(right)
operands.append(' '.join((left, node.value, right)))
return ''.join(operands)
class AliasDict(MutableMapping):
"""
Creates a `dict` like object that wraps an existing `dict` or other
`MutableMapping`, along with a `dict` of *key aliases* that translate
between specific keys in this dict to different keys in the underlying
dict.
In other words, keys that do not have an associated alias are accessed and
stored like a normal `dict`. However, a key that has an alias is accessed
and stored to the "parent" dict via the alias.
Parameters
----------
parent : dict-like
The parent `dict` that aliased keys and accessed from and stored to.
aliases : dict-like
Maps keys in this dict to their associated keys in the parent dict.
Examples
--------
>>> parent = {'a': 1, 'b': 2, 'c': 3}
>>> aliases = {'foo': 'a', 'bar': 'c'}
>>> alias_dict = AliasDict(parent, aliases)
>>> alias_dict['foo']
1
>>> alias_dict['bar']
3
Keys in the original parent dict are not visible if they were not
aliased::
>>> alias_dict['b']
Traceback (most recent call last):
...
KeyError: 'b'
Likewise, updates to aliased keys are reflected back in the parent dict::
>>> alias_dict['foo'] = 42
>>> alias_dict['foo']
42
>>> parent['a']
42
However, updates/insertions to keys that are *not* aliased are not
reflected in the parent dict::
>>> alias_dict['qux'] = 99
>>> alias_dict['qux']
99
>>> 'qux' in parent
False
In particular, updates on the `AliasDict` to a key that is equal to
one of the aliased keys in the parent dict does *not* update the parent
dict. For example, ``alias_dict`` aliases ``'foo'`` to ``'a'``. But
assigning to a key ``'a'`` on the `AliasDict` does not impact the
parent::
>>> alias_dict['a'] = 'nope'
>>> alias_dict['a']
'nope'
>>> parent['a']
42
"""
_store_type = dict
"""
Subclasses may override this to use other mapping types as the underlying
storage, for example an `OrderedDict`. However, even in this case
additional work may be needed to get things like the ordering right.
"""
def __init__(self, parent, aliases):
self._parent = parent
self._store = self._store_type()
self._aliases = dict(aliases)
def __getitem__(self, key):
if key in self._aliases:
try:
return self._parent[self._aliases[key]]
except KeyError:
raise KeyError(key)
return self._store[key]
def __setitem__(self, key, value):
if key in self._aliases:
self._parent[self._aliases[key]] = value
else:
self._store[key] = value
def __delitem__(self, key):
if key in self._aliases:
try:
del self._parent[self._aliases[key]]
except KeyError:
raise KeyError(key)
else:
del self._store[key]
def __iter__(self):
"""
First iterates over keys from the parent dict (if the aliased keys are
present in the parent), followed by any keys in the local store.
"""
for key, alias in self._aliases.items():
if alias in self._parent:
yield key
for key in self._store:
yield key
def __len__(self):
# TODO:
# This could be done more efficiently, but at present the use case for
# it is narrow if non-existent.
return len(list(iter(self)))
def __repr__(self):
# repr() just like any other dict--this should look transparent
store_copy = self._store_type()
for key, alias in self._aliases.items():
if alias in self._parent:
store_copy[key] = self._parent[alias]
store_copy.update(self._store)
return repr(store_copy)
class _BoundingBox(tuple):
"""
Base class for models with custom bounding box templates (methods that
return an actual bounding box tuple given some adjustable parameters--see
for example `~astropy.modeling.models.Gaussian1D.bounding_box`).
On these classes the ``bounding_box`` property still returns a `tuple`
giving the default bounding box for that instance of the model. But that
tuple may also be a subclass of this class that is callable, and allows
a new tuple to be returned using a user-supplied value for any adjustable
parameters to the bounding box.
"""
_model = None
def __new__(cls, input_, _model=None):
self = super().__new__(cls, input_)
if _model is not None:
# Bind this _BoundingBox (most likely a subclass) to a Model
# instance so that its __call__ can access the model
self._model = _model
return self
def __call__(self, *args, **kwargs):
raise NotImplementedError(
"This bounding box is fixed by the model and does not have "
"adjustable parameters.")
@classmethod
def validate(cls, model, bounding_box):
"""
Validate a given bounding box sequence against the given model (which
may be either a subclass of `~astropy.modeling.Model` or an instance
thereof, so long as the ``.inputs`` attribute is defined.
Currently this just checks that the bounding_box is either a 2-tuple
of lower and upper bounds for 1-D models, or an N-tuple of 2-tuples
for N-D models.
This also returns a normalized version of the bounding_box input to
ensure it is always an N-tuple (even for the 1-D case).
"""
nd = model.n_inputs
if nd == 1:
MESSAGE = "Bounding box for {0} model must be a sequence of length "
"2 consisting of a lower and upper bound, or a 1-tuple "
"containing such a sequence as its sole element.".format(model.name)
try:
valid_shape = np.shape(bounding_box) in ((2,), (1, 2))
except TypeError:
# np.shape does not work with lists of Quantities
valid_shape = np.shape([b.to_value() for b in bounding_box]) in ((2,), (1, 2))
except ValueError:
raise ValueError(MESSAGE)
if not isiterable(bounding_box) or not valid_shape:
raise ValueError(MESSAGE)
if len(bounding_box) == 1:
return cls((tuple(bounding_box[0]),))
else:
return cls(tuple(bounding_box))
else:
MESSAGE = "Bounding box for {0} model must be a sequence of length "
"{1} (the number of model inputs) consisting of pairs of "
"lower and upper bounds for those inputs on which to "
"evaluate the model.".format(model.name, nd)
try:
valid_shape = all([len(i) == 2 for i in bounding_box])
except TypeError:
valid_shape = False
if len(bounding_box) != nd:
valid_shape = False
if not isiterable(bounding_box) or not valid_shape:
raise ValueError(MESSAGE)
return cls(tuple(bounds) for bounds in bounding_box)
def make_binary_operator_eval(oper, f, g):
"""
Given a binary operator (as a callable of two arguments) ``oper`` and
two callables ``f`` and ``g`` which accept the same arguments,
returns a *new* function that takes the same arguments as ``f`` and ``g``,
but passes the outputs of ``f`` and ``g`` in the given ``oper``.
``f`` and ``g`` are assumed to return tuples (which may be 1-tuples). The
given operator is applied element-wise to tuple outputs).
Example
-------
>>> from operator import add
>>> def prod(x, y):
... return (x * y,)
...
>>> sum_of_prod = make_binary_operator_eval(add, prod, prod)
>>> sum_of_prod(3, 5)
(30,)
"""
return lambda inputs, params: \
tuple(oper(x, y) for x, y in zip(f(inputs, params),
g(inputs, params)))
def poly_map_domain(oldx, domain, window):
"""
Map domain into window by shifting and scaling.
Parameters
----------
oldx : array
original coordinates
domain : list or tuple of length 2
function domain
window : list or tuple of length 2
range into which to map the domain
"""
domain = np.array(domain, dtype=np.float64)
window = np.array(window, dtype=np.float64)
scl = (window[1] - window[0]) / (domain[1] - domain[0])
off = (window[0] * domain[1] - window[1] * domain[0]) / (domain[1] - domain[0])
return off + scl * oldx
def comb(N, k):
"""
The number of combinations of N things taken k at a time.
Parameters
----------
N : int, array
Number of things.
k : int, array
Number of elements taken.
"""
if (k > N) or (N < 0) or (k < 0):
return 0
val = 1
for j in range(min(k, N - k)):
val = (val * (N - j)) / (j + 1)
return val
def array_repr_oneline(array):
"""
Represents a multi-dimensional Numpy array flattened onto a single line.
"""
sep = ',' if NUMPY_LT_1_14 else ', '
r = np.array2string(array, separator=sep, suppress_small=True)
return ' '.join(l.strip() for l in r.splitlines())
def combine_labels(left, right):
"""
For use with the join operator &: Combine left input/output labels with
right input/output labels.
If none of the labels conflict then this just returns a sum of tuples.
However if *any* of the labels conflict, this appends '0' to the left-hand
labels and '1' to the right-hand labels so there is no ambiguity).
"""
if set(left).intersection(right):
left = tuple(l + '0' for l in left)
right = tuple(r + '1' for r in right)
return left + right
def ellipse_extent(a, b, theta):
"""
Calculates the extent of a box encapsulating a rotated 2D ellipse.
Parameters
----------
a : float or `~astropy.units.Quantity`
Major axis.
b : float or `~astropy.units.Quantity`
Minor axis.
theta : float or `~astropy.units.Quantity`
Rotation angle. If given as a floating-point value, it is assumed to be
in radians.
Returns
-------
offsets : tuple
The absolute value of the offset distances from the ellipse center that
define its bounding box region, ``(dx, dy)``.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Ellipse2D
from astropy.modeling.utils import ellipse_extent, render_model
amplitude = 1
x0 = 50
y0 = 50
a = 30
b = 10
theta = np.pi/4
model = Ellipse2D(amplitude, x0, y0, a, b, theta)
dx, dy = ellipse_extent(a, b, theta)
limits = [x0 - dx, x0 + dx, y0 - dy, y0 + dy]
model.bounding_box = limits
image = render_model(model)
plt.imshow(image, cmap='binary', interpolation='nearest', alpha=.5,
extent = limits)
plt.show()
"""
t = np.arctan2(-b * np.tan(theta), a)
dx = a * np.cos(t) * np.cos(theta) - b * np.sin(t) * np.sin(theta)
t = np.arctan2(b, a * np.tan(theta))
dy = b * np.sin(t) * np.cos(theta) + a * np.cos(t) * np.sin(theta)
if isinstance(dx, u.Quantity) or isinstance(dy, u.Quantity):
return np.abs(u.Quantity([dx, dy]))
else:
return np.abs([dx, dy])
def get_inputs_and_params(func):
"""
Given a callable, determine the input variables and the
parameters.
Parameters
----------
func : callable
Returns
-------
inputs, params : tuple
Each entry is a list of inspect.Parameter objects
"""
sig = signature(func)
inputs = []
params = []
for param in sig.parameters.values():
if param.kind in (param.VAR_POSITIONAL, param.VAR_KEYWORD):
raise ValueError("Signature must not have *args or **kwargs")
if param.default == param.empty:
inputs.append(param)
else:
params.append(param)
return inputs, params
def _parameter_with_unit(parameter, unit):
if parameter.unit is None:
return parameter.value * unit
else:
return parameter.quantity.to(unit)
def _parameter_without_unit(value, old_unit, new_unit):
if old_unit is None:
return value
else:
return value * old_unit.to(new_unit)
def _combine_equivalency_dict(keys, eq1=None, eq2=None):
# Given two dictionaries that give equivalencies for a set of keys, for
# example input value names, return a dictionary that includes all the
# equivalencies
eq = {}
for key in keys:
eq[key] = []
if eq1 is not None and key in eq1:
eq[key].extend(eq1[key])
if eq2 is not None and key in eq2:
eq[key].extend(eq2[key])
return eq
def _to_radian(value):
""" Convert ``value`` to radian. """
if isinstance(value, u.Quantity):
return value.to(u.rad)
else:
return np.deg2rad(value)
def _to_orig_unit(value, raw_unit=None, orig_unit=None):
""" Convert value with ``raw_unit`` to ``orig_unit``. """
if raw_unit is not None:
return (value * raw_unit).to(orig_unit)
else:
return np.rad2deg(value)
|
56c1f607df8c400c57e8e85b1ea2238248f09de53e1004b4660cda3ce1ed9b0d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Mathematical models."""
from collections import OrderedDict
import numpy as np
from astropy import units as u
from astropy.units import Quantity, UnitsError
from .core import (Fittable1DModel, Fittable2DModel,
ModelDefinitionError)
from .parameters import Parameter, InputParameterError
from .utils import ellipse_extent
__all__ = ['AiryDisk2D', 'Moffat1D', 'Moffat2D', 'Box1D', 'Box2D', 'Const1D',
'Const2D', 'Ellipse2D', 'Disk2D', 'Gaussian1D', 'Gaussian2D', 'Linear1D',
'Lorentz1D', 'MexicanHat1D', 'MexicanHat2D', 'RedshiftScaleFactor',
'Multiply', 'Planar2D', 'Scale', 'Sersic1D', 'Sersic2D', 'Shift',
'Sine1D', 'Trapezoid1D', 'TrapezoidDisk2D', 'Ring2D', 'Voigt1D']
TWOPI = 2 * np.pi
FLOAT_EPSILON = float(np.finfo(np.float32).tiny)
# Note that we define this here rather than using the value defined in
# astropy.stats to avoid importing astropy.stats every time astropy.modeling
# is loaded.
GAUSSIAN_SIGMA_TO_FWHM = 2.0 * np.sqrt(2.0 * np.log(2.0))
class Gaussian1D(Fittable1DModel):
"""
One dimensional Gaussian model.
Parameters
----------
amplitude : float
Amplitude of the Gaussian.
mean : float
Mean of the Gaussian.
stddev : float
Standard deviation of the Gaussian.
Notes
-----
Model formula:
.. math:: f(x) = A e^{- \\frac{\\left(x - x_{0}\\right)^{2}}{2 \\sigma^{2}}}
Examples
--------
>>> from astropy.modeling import models
>>> def tie_center(model):
... mean = 50 * model.stddev
... return mean
>>> tied_parameters = {'mean': tie_center}
Specify that 'mean' is a tied parameter in one of two ways:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... tied=tied_parameters)
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.mean.tied
False
>>> g1.mean.tied = tie_center
>>> g1.mean.tied
<function tie_center at 0x...>
Fixed parameters:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... fixed={'stddev': True})
>>> g1.stddev.fixed
True
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.stddev.fixed
False
>>> g1.stddev.fixed = True
>>> g1.stddev.fixed
True
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Gaussian1D
plt.figure()
s1 = Gaussian1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
See Also
--------
Gaussian2D, Box1D, Moffat1D, Lorentz1D
"""
amplitude = Parameter(default=1)
mean = Parameter(default=0)
# Ensure stddev makes sense if its bounds are not explicitly set.
# stddev must be non-zero and positive.
stddev = Parameter(default=1, bounds=(FLOAT_EPSILON, None))
def bounding_box(self, factor=5.5):
"""
Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``
Parameters
----------
factor : float
The multiple of `stddev` used to define the limits.
The default is 5.5, corresponding to a relative error < 1e-7.
Examples
--------
>>> from astropy.modeling.models import Gaussian1D
>>> model = Gaussian1D(mean=0, stddev=2)
>>> model.bounding_box
(-11.0, 11.0)
This range can be set directly (see: `Model.bounding_box
<astropy.modeling.Model.bounding_box>`) or by using a different factor,
like:
>>> model.bounding_box = model.bounding_box(factor=2)
>>> model.bounding_box
(-4.0, 4.0)
"""
x0 = self.mean
dx = factor * self.stddev
return (x0 - dx, x0 + dx)
@property
def fwhm(self):
"""Gaussian full width at half maximum."""
return self.stddev * GAUSSIAN_SIGMA_TO_FWHM
@staticmethod
def evaluate(x, amplitude, mean, stddev):
"""
Gaussian1D model function.
"""
return amplitude * np.exp(- 0.5 * (x - mean) ** 2 / stddev ** 2)
@staticmethod
def fit_deriv(x, amplitude, mean, stddev):
"""
Gaussian1D model function derivatives.
"""
d_amplitude = np.exp(-0.5 / stddev ** 2 * (x - mean) ** 2)
d_mean = amplitude * d_amplitude * (x - mean) / stddev ** 2
d_stddev = amplitude * d_amplitude * (x - mean) ** 2 / stddev ** 3
return [d_amplitude, d_mean, d_stddev]
@property
def input_units(self):
if self.mean.unit is None:
return None
else:
return {'x': self.mean.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('mean', inputs_unit['x']),
('stddev', inputs_unit['x']),
('amplitude', outputs_unit['y'])])
class Gaussian2D(Fittable2DModel):
r"""
Two dimensional Gaussian model.
Parameters
----------
amplitude : float
Amplitude of the Gaussian.
x_mean : float
Mean of the Gaussian in x.
y_mean : float
Mean of the Gaussian in y.
x_stddev : float or None
Standard deviation of the Gaussian in x before rotating by theta. Must
be None if a covariance matrix (``cov_matrix``) is provided. If no
``cov_matrix`` is given, ``None`` means the default value (1).
y_stddev : float or None
Standard deviation of the Gaussian in y before rotating by theta. Must
be None if a covariance matrix (``cov_matrix``) is provided. If no
``cov_matrix`` is given, ``None`` means the default value (1).
theta : float, optional
Rotation angle in radians. The rotation angle increases
counterclockwise. Must be None if a covariance matrix (``cov_matrix``)
is provided. If no ``cov_matrix`` is given, ``None`` means the default
value (0).
cov_matrix : ndarray, optional
A 2x2 covariance matrix. If specified, overrides the ``x_stddev``,
``y_stddev``, and ``theta`` defaults.
Notes
-----
Model formula:
.. math::
f(x, y) = A e^{-a\left(x - x_{0}\right)^{2} -b\left(x - x_{0}\right)
\left(y - y_{0}\right) -c\left(y - y_{0}\right)^{2}}
Using the following definitions:
.. math::
a = \left(\frac{\cos^{2}{\left (\theta \right )}}{2 \sigma_{x}^{2}} +
\frac{\sin^{2}{\left (\theta \right )}}{2 \sigma_{y}^{2}}\right)
b = \left(\frac{\sin{\left (2 \theta \right )}}{2 \sigma_{x}^{2}} -
\frac{\sin{\left (2 \theta \right )}}{2 \sigma_{y}^{2}}\right)
c = \left(\frac{\sin^{2}{\left (\theta \right )}}{2 \sigma_{x}^{2}} +
\frac{\cos^{2}{\left (\theta \right )}}{2 \sigma_{y}^{2}}\right)
If using a ``cov_matrix``, the model is of the form:
.. math::
f(x, y) = A e^{-0.5 \left(\vec{x} - \vec{x}_{0}\right)^{T} \Sigma^{-1} \left(\vec{x} - \vec{x}_{0}\right)}
where :math:`\vec{x} = [x, y]`, :math:`\vec{x}_{0} = [x_{0}, y_{0}]`,
and :math:`\Sigma` is the covariance matrix:
.. math::
\Sigma = \left(\begin{array}{ccc}
\sigma_x^2 & \rho \sigma_x \sigma_y \\
\rho \sigma_x \sigma_y & \sigma_y^2
\end{array}\right)
:math:`\rho` is the correlation between ``x`` and ``y``, which should
be between -1 and +1. Positive correlation corresponds to a
``theta`` in the range 0 to 90 degrees. Negative correlation
corresponds to a ``theta`` in the range of 0 to -90 degrees.
See [1]_ for more details about the 2D Gaussian function.
See Also
--------
Gaussian1D, Box2D, Moffat2D
References
----------
.. [1] https://en.wikipedia.org/wiki/Gaussian_function
"""
amplitude = Parameter(default=1)
x_mean = Parameter(default=0)
y_mean = Parameter(default=0)
x_stddev = Parameter(default=1)
y_stddev = Parameter(default=1)
theta = Parameter(default=0.0)
def __init__(self, amplitude=amplitude.default, x_mean=x_mean.default,
y_mean=y_mean.default, x_stddev=None, y_stddev=None,
theta=None, cov_matrix=None, **kwargs):
if cov_matrix is None:
if x_stddev is None:
x_stddev = self.__class__.x_stddev.default
if y_stddev is None:
y_stddev = self.__class__.y_stddev.default
if theta is None:
theta = self.__class__.theta.default
else:
if x_stddev is not None or y_stddev is not None or theta is not None:
raise InputParameterError("Cannot specify both cov_matrix and "
"x/y_stddev/theta")
else:
# Compute principle coordinate system transformation
cov_matrix = np.array(cov_matrix)
if cov_matrix.shape != (2, 2):
# TODO: Maybe it should be possible for the covariance matrix
# to be some (x, y, ..., z, 2, 2) array to be broadcast with
# other parameters of shape (x, y, ..., z)
# But that's maybe a special case to work out if/when needed
raise ValueError("Covariance matrix must be 2x2")
eig_vals, eig_vecs = np.linalg.eig(cov_matrix)
x_stddev, y_stddev = np.sqrt(eig_vals)
y_vec = eig_vecs[:, 0]
theta = np.arctan2(y_vec[1], y_vec[0])
# Ensure stddev makes sense if its bounds are not explicitly set.
# stddev must be non-zero and positive.
# TODO: Investigate why setting this in Parameter above causes
# convolution tests to hang.
kwargs.setdefault('bounds', {})
kwargs['bounds'].setdefault('x_stddev', (FLOAT_EPSILON, None))
kwargs['bounds'].setdefault('y_stddev', (FLOAT_EPSILON, None))
super().__init__(
amplitude=amplitude, x_mean=x_mean, y_mean=y_mean,
x_stddev=x_stddev, y_stddev=y_stddev, theta=theta, **kwargs)
@property
def x_fwhm(self):
"""Gaussian full width at half maximum in X."""
return self.x_stddev * GAUSSIAN_SIGMA_TO_FWHM
@property
def y_fwhm(self):
"""Gaussian full width at half maximum in Y."""
return self.y_stddev * GAUSSIAN_SIGMA_TO_FWHM
def bounding_box(self, factor=5.5):
"""
Tuple defining the default ``bounding_box`` limits in each dimension,
``((y_low, y_high), (x_low, x_high))``
The default offset from the mean is 5.5-sigma, corresponding
to a relative error < 1e-7. The limits are adjusted for rotation.
Parameters
----------
factor : float, optional
The multiple of `x_stddev` and `y_stddev` used to define the limits.
The default is 5.5.
Examples
--------
>>> from astropy.modeling.models import Gaussian2D
>>> model = Gaussian2D(x_mean=0, y_mean=0, x_stddev=1, y_stddev=2)
>>> model.bounding_box
((-11.0, 11.0), (-5.5, 5.5))
This range can be set directly (see: `Model.bounding_box
<astropy.modeling.Model.bounding_box>`) or by using a different factor
like:
>>> model.bounding_box = model.bounding_box(factor=2)
>>> model.bounding_box
((-4.0, 4.0), (-2.0, 2.0))
"""
a = factor * self.x_stddev
b = factor * self.y_stddev
theta = self.theta.value
dx, dy = ellipse_extent(a, b, theta)
return ((self.y_mean - dy, self.y_mean + dy),
(self.x_mean - dx, self.x_mean + dx))
@staticmethod
def evaluate(x, y, amplitude, x_mean, y_mean, x_stddev, y_stddev, theta):
"""Two dimensional Gaussian function"""
cost2 = np.cos(theta) ** 2
sint2 = np.sin(theta) ** 2
sin2t = np.sin(2. * theta)
xstd2 = x_stddev ** 2
ystd2 = y_stddev ** 2
xdiff = x - x_mean
ydiff = y - y_mean
a = 0.5 * ((cost2 / xstd2) + (sint2 / ystd2))
b = 0.5 * ((sin2t / xstd2) - (sin2t / ystd2))
c = 0.5 * ((sint2 / xstd2) + (cost2 / ystd2))
return amplitude * np.exp(-((a * xdiff ** 2) + (b * xdiff * ydiff) +
(c * ydiff ** 2)))
@staticmethod
def fit_deriv(x, y, amplitude, x_mean, y_mean, x_stddev, y_stddev, theta):
"""Two dimensional Gaussian function derivative with respect to parameters"""
cost = np.cos(theta)
sint = np.sin(theta)
cost2 = np.cos(theta) ** 2
sint2 = np.sin(theta) ** 2
cos2t = np.cos(2. * theta)
sin2t = np.sin(2. * theta)
xstd2 = x_stddev ** 2
ystd2 = y_stddev ** 2
xstd3 = x_stddev ** 3
ystd3 = y_stddev ** 3
xdiff = x - x_mean
ydiff = y - y_mean
xdiff2 = xdiff ** 2
ydiff2 = ydiff ** 2
a = 0.5 * ((cost2 / xstd2) + (sint2 / ystd2))
b = 0.5 * ((sin2t / xstd2) - (sin2t / ystd2))
c = 0.5 * ((sint2 / xstd2) + (cost2 / ystd2))
g = amplitude * np.exp(-((a * xdiff2) + (b * xdiff * ydiff) +
(c * ydiff2)))
da_dtheta = (sint * cost * ((1. / ystd2) - (1. / xstd2)))
da_dx_stddev = -cost2 / xstd3
da_dy_stddev = -sint2 / ystd3
db_dtheta = (cos2t / xstd2) - (cos2t / ystd2)
db_dx_stddev = -sin2t / xstd3
db_dy_stddev = sin2t / ystd3
dc_dtheta = -da_dtheta
dc_dx_stddev = -sint2 / xstd3
dc_dy_stddev = -cost2 / ystd3
dg_dA = g / amplitude
dg_dx_mean = g * ((2. * a * xdiff) + (b * ydiff))
dg_dy_mean = g * ((b * xdiff) + (2. * c * ydiff))
dg_dx_stddev = g * (-(da_dx_stddev * xdiff2 +
db_dx_stddev * xdiff * ydiff +
dc_dx_stddev * ydiff2))
dg_dy_stddev = g * (-(da_dy_stddev * xdiff2 +
db_dy_stddev * xdiff * ydiff +
dc_dy_stddev * ydiff2))
dg_dtheta = g * (-(da_dtheta * xdiff2 +
db_dtheta * xdiff * ydiff +
dc_dtheta * ydiff2))
return [dg_dA, dg_dx_mean, dg_dy_mean, dg_dx_stddev, dg_dy_stddev,
dg_dtheta]
@property
def input_units(self):
if self.x_mean.unit is None and self.y_mean.unit is None:
return None
else:
return {'x': self.x_mean.unit,
'y': self.y_mean.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_mean', inputs_unit['x']),
('y_mean', inputs_unit['x']),
('x_stddev', inputs_unit['x']),
('y_stddev', inputs_unit['x']),
('theta', u.rad),
('amplitude', outputs_unit['z'])])
class Shift(Fittable1DModel):
"""
Shift a coordinate.
Parameters
----------
offset : float
Offset to add to a coordinate.
"""
offset = Parameter(default=0)
linear = True
@property
def input_units(self):
if self.offset.unit is None:
return None
else:
return {'x': self.offset.unit}
@property
def inverse(self):
"""One dimensional inverse Shift model function"""
inv = self.copy()
inv.offset *= -1
return inv
@staticmethod
def evaluate(x, offset):
"""One dimensional Shift model function"""
return x + offset
@staticmethod
def sum_of_implicit_terms(x):
"""Evaluate the implicit term (x) of one dimensional Shift model"""
return x
@staticmethod
def fit_deriv(x, *params):
"""One dimensional Shift model derivative with respect to parameter"""
d_offset = np.ones_like(x)
return [d_offset]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('offset', outputs_unit['y'])])
class Scale(Fittable1DModel):
"""
Multiply a model by a dimensionless factor.
Parameters
----------
factor : float
Factor by which to scale a coordinate.
Notes
-----
If ``factor`` is a `~astropy.units.Quantity` then the units will be
stripped before the scaling operation.
"""
factor = Parameter(default=1)
linear = True
fittable = True
_input_units_strict = True
_input_units_allow_dimensionless = True
@property
def input_units(self):
if self.factor.unit is None:
return None
else:
return {'x': self.factor.unit}
@property
def inverse(self):
"""One dimensional inverse Scale model function"""
inv = self.copy()
inv.factor = 1 / self.factor
return inv
@staticmethod
def evaluate(x, factor):
"""One dimensional Scale model function"""
if isinstance(factor, u.Quantity):
factor = factor.value
return factor * x
@staticmethod
def fit_deriv(x, *params):
"""One dimensional Scale model derivative with respect to parameter"""
d_factor = x
return [d_factor]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
unit = outputs_unit['y'] / inputs_unit['x']
if unit == u.one:
unit = None
return OrderedDict([('factor', unit)])
class Multiply(Fittable1DModel):
"""
Multiply a model by a quantity or number.
Parameters
----------
factor : float
Factor by which to multiply a coordinate.
"""
inputs = ('x',)
outputs = ('y',)
factor = Parameter(default=1)
linear = True
fittable = True
@property
def inverse(self):
"""One dimensional inverse multiply model function"""
inv = self.copy()
inv.factor = 1 / self.factor
return inv
@staticmethod
def evaluate(x, factor):
"""One dimensional multiply model function"""
return factor * x
@staticmethod
def fit_deriv(x, *params):
"""One dimensional multiply model derivative with respect to parameter"""
d_factor = x
return [d_factor]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('factor', outputs_unit['y'])])
class RedshiftScaleFactor(Fittable1DModel):
"""
One dimensional redshift scale factor model.
Parameters
----------
z : float
Redshift value.
Notes
-----
Model formula:
.. math:: f(x) = x (1 + z)
"""
z = Parameter(description='redshift', default=0)
@staticmethod
def evaluate(x, z):
"""One dimensional RedshiftScaleFactor model function"""
return (1 + z) * x
@staticmethod
def fit_deriv(x, z):
"""One dimensional RedshiftScaleFactor model derivative"""
d_z = x
return [d_z]
@property
def inverse(self):
"""Inverse RedshiftScaleFactor model"""
inv = self.copy()
inv.z = 1.0 / (1.0 + self.z) - 1.0
return inv
class Sersic1D(Fittable1DModel):
r"""
One dimensional Sersic surface brightness profile.
Parameters
----------
amplitude : float
Surface brightness at r_eff.
r_eff : float
Effective (half-light) radius
n : float
Sersic Index.
See Also
--------
Gaussian1D, Moffat1D, Lorentz1D
Notes
-----
Model formula:
.. math::
I(r)=I_e\exp\left\{-b_n\left[\left(\frac{r}{r_{e}}\right)^{(1/n)}-1\right]\right\}
The constant :math:`b_n` is defined such that :math:`r_e` contains half the total
luminosity, and can be solved for numerically.
.. math::
\Gamma(2n) = 2\gamma (b_n,2n)
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Sersic1D
import matplotlib.pyplot as plt
plt.figure()
plt.subplot(111, xscale='log', yscale='log')
s1 = Sersic1D(amplitude=1, r_eff=5)
r=np.arange(0, 100, .01)
for n in range(1, 10):
s1.n = n
plt.plot(r, s1(r), color=str(float(n) / 15))
plt.axis([1e-1, 30, 1e-2, 1e3])
plt.xlabel('log Radius')
plt.ylabel('log Surface Brightness')
plt.text(.25, 1.5, 'n=1')
plt.text(.25, 300, 'n=10')
plt.xticks([])
plt.yticks([])
plt.show()
References
----------
.. [1] http://ned.ipac.caltech.edu/level5/March05/Graham/Graham2.html
"""
amplitude = Parameter(default=1)
r_eff = Parameter(default=1)
n = Parameter(default=4)
_gammaincinv = None
@classmethod
def evaluate(cls, r, amplitude, r_eff, n):
"""One dimensional Sersic profile function."""
if cls._gammaincinv is None:
try:
from scipy.special import gammaincinv
cls._gammaincinv = gammaincinv
except ValueError:
raise ImportError('Sersic1D model requires scipy > 0.11.')
return (amplitude * np.exp(
-cls._gammaincinv(2 * n, 0.5) * ((r / r_eff) ** (1 / n) - 1)))
@property
def input_units(self):
if self.r_eff.unit is None:
return None
else:
return {'x': self.r_eff.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('r_eff', inputs_unit['x']),
('amplitude', outputs_unit['y'])])
class Sine1D(Fittable1DModel):
"""
One dimensional Sine model.
Parameters
----------
amplitude : float
Oscillation amplitude
frequency : float
Oscillation frequency
phase : float
Oscillation phase
See Also
--------
Const1D, Linear1D
Notes
-----
Model formula:
.. math:: f(x) = A \\sin(2 \\pi f x + 2 \\pi p)
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Sine1D
plt.figure()
s1 = Sine1D(amplitude=1, frequency=.25)
r=np.arange(0, 10, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([0, 10, -5, 5])
plt.show()
"""
amplitude = Parameter(default=1)
frequency = Parameter(default=1)
phase = Parameter(default=0)
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional Sine model function"""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = TWOPI * (frequency * x + phase)
if isinstance(argument, Quantity):
argument = argument.value
return amplitude * np.sin(argument)
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional Sine model derivative"""
d_amplitude = np.sin(TWOPI * frequency * x + TWOPI * phase)
d_frequency = (TWOPI * x * amplitude *
np.cos(TWOPI * frequency * x + TWOPI * phase))
d_phase = (TWOPI * amplitude *
np.cos(TWOPI * frequency * x + TWOPI * phase))
return [d_amplitude, d_frequency, d_phase]
@property
def input_units(self):
if self.frequency.unit is None:
return None
else:
return {'x': 1. / self.frequency.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('frequency', inputs_unit['x'] ** -1),
('amplitude', outputs_unit['y'])])
class Linear1D(Fittable1DModel):
"""
One dimensional Line model.
Parameters
----------
slope : float
Slope of the straight line
intercept : float
Intercept of the straight line
See Also
--------
Const1D
Notes
-----
Model formula:
.. math:: f(x) = a x + b
"""
slope = Parameter(default=1)
intercept = Parameter(default=0)
linear = True
@staticmethod
def evaluate(x, slope, intercept):
"""One dimensional Line model function"""
return slope * x + intercept
@staticmethod
def fit_deriv(x, slope, intercept):
"""One dimensional Line model derivative with respect to parameters"""
d_slope = x
d_intercept = np.ones_like(x)
return [d_slope, d_intercept]
@property
def inverse(self):
new_slope = self.slope ** -1
new_intercept = -self.intercept / self.slope
return self.__class__(slope=new_slope, intercept=new_intercept)
@property
def input_units(self):
if self.intercept.unit is None and self.slope.unit is None:
return None
else:
return {'x': self.intercept.unit / self.slope.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('intercept', outputs_unit['y']),
('slope', outputs_unit['y'] / inputs_unit['x'])])
class Planar2D(Fittable2DModel):
"""
Two dimensional Plane model.
Parameters
----------
slope_x : float
Slope of the straight line in X
slope_y : float
Slope of the straight line in Y
intercept : float
Z-intercept of the straight line
Notes
-----
Model formula:
.. math:: f(x, y) = a x + b y + c
"""
slope_x = Parameter(default=1)
slope_y = Parameter(default=1)
intercept = Parameter(default=0)
linear = True
@staticmethod
def evaluate(x, y, slope_x, slope_y, intercept):
"""Two dimensional Plane model function"""
return slope_x * x + slope_y * y + intercept
@staticmethod
def fit_deriv(x, y, slope_x, slope_y, intercept):
"""Two dimensional Plane model derivative with respect to parameters"""
d_slope_x = x
d_slope_y = y
d_intercept = np.ones_like(x)
return [d_slope_x, d_slope_y, d_intercept]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('intercept', outputs_unit['z']),
('slope_x', outputs_unit['z'] / inputs_unit['x']),
('slope_y', outputs_unit['z'] / inputs_unit['y'])])
class Lorentz1D(Fittable1DModel):
"""
One dimensional Lorentzian model.
Parameters
----------
amplitude : float
Peak value
x_0 : float
Position of the peak
fwhm : float
Full width at half maximum
See Also
--------
Gaussian1D, Box1D, MexicanHat1D
Notes
-----
Model formula:
.. math::
f(x) = \\frac{A \\gamma^{2}}{\\gamma^{2} + \\left(x - x_{0}\\right)^{2}}
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Lorentz1D
plt.figure()
s1 = Lorentz1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
fwhm = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, x_0, fwhm):
"""One dimensional Lorentzian model function"""
return (amplitude * ((fwhm / 2.) ** 2) / ((x - x_0) ** 2 +
(fwhm / 2.) ** 2))
@staticmethod
def fit_deriv(x, amplitude, x_0, fwhm):
"""One dimensional Lorentzian model derivative with respect to parameters"""
d_amplitude = fwhm ** 2 / (fwhm ** 2 + (x - x_0) ** 2)
d_x_0 = (amplitude * d_amplitude * (2 * x - 2 * x_0) /
(fwhm ** 2 + (x - x_0) ** 2))
d_fwhm = 2 * amplitude * d_amplitude / fwhm * (1 - d_amplitude)
return [d_amplitude, d_x_0, d_fwhm]
def bounding_box(self, factor=25):
"""Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
Parameters
----------
factor : float
The multiple of FWHM used to define the limits.
Default is chosen to include most (99%) of the
area under the curve, while still showing the
central feature of interest.
"""
x0 = self.x_0
dx = factor * self.fwhm
return (x0 - dx, x0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_0', inputs_unit['x']),
('fwhm', inputs_unit['x']),
('amplitude', outputs_unit['y'])])
class Voigt1D(Fittable1DModel):
"""
One dimensional model for the Voigt profile.
Parameters
----------
x_0 : float
Position of the peak
amplitude_L : float
The Lorentzian amplitude
fwhm_L : float
The Lorentzian full width at half maximum
fwhm_G : float
The Gaussian full width at half maximum
See Also
--------
Gaussian1D, Lorentz1D
Notes
-----
Algorithm for the computation taken from
McLean, A. B., Mitchell, C. E. J. & Swanston, D. M. Implementation of an
efficient analytical approximation to the Voigt function for photoemission
lineshape analysis. Journal of Electron Spectroscopy and Related Phenomena
69, 125-132 (1994)
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Voigt1D
import matplotlib.pyplot as plt
plt.figure()
x = np.arange(0, 10, 0.01)
v1 = Voigt1D(x_0=5, amplitude_L=10, fwhm_L=0.5, fwhm_G=0.9)
plt.plot(x, v1(x))
plt.show()
"""
x_0 = Parameter(default=0)
amplitude_L = Parameter(default=1)
fwhm_L = Parameter(default=2/np.pi)
fwhm_G = Parameter(default=np.log(2))
_abcd = np.array([
[-1.2150, -1.3509, -1.2150, -1.3509], # A
[1.2359, 0.3786, -1.2359, -0.3786], # B
[-0.3085, 0.5906, -0.3085, 0.5906], # C
[0.0210, -1.1858, -0.0210, 1.1858]]) # D
@classmethod
def evaluate(cls, x, x_0, amplitude_L, fwhm_L, fwhm_G):
A, B, C, D = cls._abcd
sqrt_ln2 = np.sqrt(np.log(2))
X = (x - x_0) * 2 * sqrt_ln2 / fwhm_G
X = np.atleast_1d(X)[..., np.newaxis]
Y = fwhm_L * sqrt_ln2 / fwhm_G
Y = np.atleast_1d(Y)[..., np.newaxis]
V = np.sum((C * (Y - A) + D * (X - B))/(((Y - A) ** 2 + (X - B) ** 2)), axis=-1)
return (fwhm_L * amplitude_L * np.sqrt(np.pi) * sqrt_ln2 / fwhm_G) * V
@classmethod
def fit_deriv(cls, x, x_0, amplitude_L, fwhm_L, fwhm_G):
A, B, C, D = cls._abcd
sqrt_ln2 = np.sqrt(np.log(2))
X = (x - x_0) * 2 * sqrt_ln2 / fwhm_G
X = np.atleast_1d(X)[:, np.newaxis]
Y = fwhm_L * sqrt_ln2 / fwhm_G
Y = np.atleast_1d(Y)[:, np.newaxis]
constant = fwhm_L * amplitude_L * np.sqrt(np.pi) * sqrt_ln2 / fwhm_G
alpha = C * (Y - A) + D * (X - B)
beta = (Y - A) ** 2 + (X - B) ** 2
V = np.sum((alpha / beta), axis=-1)
dVdx = np.sum((D/beta - 2 * (X - B) * alpha / np.square(beta)), axis=-1)
dVdy = np.sum((C/beta - 2 * (Y - A) * alpha / np.square(beta)), axis=-1)
dyda = [-constant * dVdx * 2 * sqrt_ln2 / fwhm_G,
constant * V / amplitude_L,
constant * (V / fwhm_L + dVdy * sqrt_ln2 / fwhm_G),
-constant * (V + (sqrt_ln2 / fwhm_G) * (2 * (x - x_0) * dVdx + fwhm_L * dVdy)) / fwhm_G]
return dyda
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_0', inputs_unit['x']),
('fwhm_L', inputs_unit['x']),
('fwhm_G', inputs_unit['x']),
('amplitude_L', outputs_unit['y'])])
class Const1D(Fittable1DModel):
"""
One dimensional Constant model.
Parameters
----------
amplitude : float
Value of the constant function
See Also
--------
Const2D
Notes
-----
Model formula:
.. math:: f(x) = A
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Const1D
plt.figure()
s1 = Const1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1)
linear = True
@staticmethod
def evaluate(x, amplitude):
"""One dimensional Constant model function"""
if amplitude.size == 1:
# This is slightly faster than using ones_like and multiplying
x = np.empty_like(x, subok=False)
x.fill(amplitude.item())
else:
# This case is less likely but could occur if the amplitude
# parameter is given an array-like value
x = amplitude * np.ones_like(x, subok=False)
if isinstance(amplitude, Quantity):
return Quantity(x, unit=amplitude.unit, copy=False)
else:
return x
@staticmethod
def fit_deriv(x, amplitude):
"""One dimensional Constant model derivative with respect to parameters"""
d_amplitude = np.ones_like(x)
return [d_amplitude]
@property
def input_units(self):
return None
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('amplitude', outputs_unit['y'])])
class Const2D(Fittable2DModel):
"""
Two dimensional Constant model.
Parameters
----------
amplitude : float
Value of the constant function
See Also
--------
Const1D
Notes
-----
Model formula:
.. math:: f(x, y) = A
"""
amplitude = Parameter(default=1)
linear = True
@staticmethod
def evaluate(x, y, amplitude):
"""Two dimensional Constant model function"""
if amplitude.size == 1:
# This is slightly faster than using ones_like and multiplying
x = np.empty_like(x, subok=False)
x.fill(amplitude.item())
else:
# This case is less likely but could occur if the amplitude
# parameter is given an array-like value
x = amplitude * np.ones_like(x, subok=False)
if isinstance(amplitude, Quantity):
return Quantity(x, unit=amplitude.unit, copy=False)
else:
return x
@property
def input_units(self):
return None
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('amplitude', outputs_unit['z'])])
class Ellipse2D(Fittable2DModel):
"""
A 2D Ellipse model.
Parameters
----------
amplitude : float
Value of the ellipse.
x_0 : float
x position of the center of the disk.
y_0 : float
y position of the center of the disk.
a : float
The length of the semimajor axis.
b : float
The length of the semiminor axis.
theta : float
The rotation angle in radians of the semimajor axis. The
rotation angle increases counterclockwise from the positive x
axis.
See Also
--------
Disk2D, Box2D
Notes
-----
Model formula:
.. math::
f(x, y) = \\left \\{
\\begin{array}{ll}
\\mathrm{amplitude} & : \\left[\\frac{(x - x_0) \\cos
\\theta + (y - y_0) \\sin \\theta}{a}\\right]^2 +
\\left[\\frac{-(x - x_0) \\sin \\theta + (y - y_0)
\\cos \\theta}{b}\\right]^2 \\leq 1 \\\\
0 & : \\mathrm{otherwise}
\\end{array}
\\right.
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Ellipse2D
from astropy.coordinates import Angle
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
x0, y0 = 25, 25
a, b = 20, 10
theta = Angle(30, 'deg')
e = Ellipse2D(amplitude=100., x_0=x0, y_0=y0, a=a, b=b,
theta=theta.radian)
y, x = np.mgrid[0:50, 0:50]
fig, ax = plt.subplots(1, 1)
ax.imshow(e(x, y), origin='lower', interpolation='none', cmap='Greys_r')
e2 = mpatches.Ellipse((x0, y0), 2*a, 2*b, theta.degree, edgecolor='red',
facecolor='none')
ax.add_patch(e2)
plt.show()
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
a = Parameter(default=1)
b = Parameter(default=1)
theta = Parameter(default=0)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, a, b, theta):
"""Two dimensional Ellipse model function."""
xx = x - x_0
yy = y - y_0
cost = np.cos(theta)
sint = np.sin(theta)
numerator1 = (xx * cost) + (yy * sint)
numerator2 = -(xx * sint) + (yy * cost)
in_ellipse = (((numerator1 / a) ** 2 + (numerator2 / b) ** 2) <= 1.)
result = np.select([in_ellipse], [amplitude])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
else:
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``((y_low, y_high), (x_low, x_high))``
"""
a = self.a
b = self.b
theta = self.theta.value
dx, dy = ellipse_extent(a, b, theta)
return ((self.y_0 - dy, self.y_0 + dy),
(self.x_0 - dx, self.x_0 + dx))
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['x']),
('a', inputs_unit['x']),
('b', inputs_unit['x']),
('theta', u.rad),
('amplitude', outputs_unit['z'])])
class Disk2D(Fittable2DModel):
"""
Two dimensional radial symmetric Disk model.
Parameters
----------
amplitude : float
Value of the disk function
x_0 : float
x position center of the disk
y_0 : float
y position center of the disk
R_0 : float
Radius of the disk
See Also
--------
Box2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(r) = \\left \\{
\\begin{array}{ll}
A & : r \\leq R_0 \\\\
0 & : r > R_0
\\end{array}
\\right.
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
R_0 = Parameter(default=1)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, R_0):
"""Two dimensional Disk model function"""
rr = (x - x_0) ** 2 + (y - y_0) ** 2
result = np.select([rr <= R_0 ** 2], [amplitude])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
else:
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``((y_low, y_high), (x_low, x_high))``
"""
return ((self.y_0 - self.R_0, self.y_0 + self.R_0),
(self.x_0 - self.R_0, self.x_0 + self.R_0))
@property
def input_units(self):
if self.x_0.unit is None and self.y_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['x']),
('R_0', inputs_unit['x']),
('amplitude', outputs_unit['z'])])
class Ring2D(Fittable2DModel):
"""
Two dimensional radial symmetric Ring model.
Parameters
----------
amplitude : float
Value of the disk function
x_0 : float
x position center of the disk
y_0 : float
y position center of the disk
r_in : float
Inner radius of the ring
width : float
Width of the ring.
r_out : float
Outer Radius of the ring. Can be specified instead of width.
See Also
--------
Disk2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(r) = \\left \\{
\\begin{array}{ll}
A & : r_{in} \\leq r \\leq r_{out} \\\\
0 & : \\text{else}
\\end{array}
\\right.
Where :math:`r_{out} = r_{in} + r_{width}`.
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
r_in = Parameter(default=1)
width = Parameter(default=1)
def __init__(self, amplitude=amplitude.default, x_0=x_0.default,
y_0=y_0.default, r_in=r_in.default, width=width.default,
r_out=None, **kwargs):
# If outer radius explicitly given, it overrides default width.
if r_out is not None:
if width != self.width.default:
raise InputParameterError(
"Cannot specify both width and outer radius separately.")
width = r_out - r_in
elif width is None:
width = self.width.default
super().__init__(
amplitude=amplitude, x_0=x_0, y_0=y_0, r_in=r_in, width=width,
**kwargs)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, r_in, width):
"""Two dimensional Ring model function."""
rr = (x - x_0) ** 2 + (y - y_0) ** 2
r_range = np.logical_and(rr >= r_in ** 2, rr <= (r_in + width) ** 2)
result = np.select([r_range], [amplitude])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
else:
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box``.
``((y_low, y_high), (x_low, x_high))``
"""
dr = self.r_in + self.width
return ((self.y_0 - dr, self.y_0 + dr),
(self.x_0 - dr, self.x_0 + dr))
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['x']),
('r_in', inputs_unit['x']),
('width', inputs_unit['x']),
('amplitude', outputs_unit['z'])])
class Delta1D(Fittable1DModel):
"""One dimensional Dirac delta function."""
def __init__(self):
raise ModelDefinitionError("Not implemented")
class Delta2D(Fittable2DModel):
"""Two dimensional Dirac delta function."""
def __init__(self):
raise ModelDefinitionError("Not implemented")
class Box1D(Fittable1DModel):
"""
One dimensional Box model.
Parameters
----------
amplitude : float
Amplitude A
x_0 : float
Position of the center of the box function
width : float
Width of the box
See Also
--------
Box2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(x) = \\left \\{
\\begin{array}{ll}
A & : x_0 - w/2 \\leq x \\leq x_0 + w/2 \\\\
0 & : \\text{else}
\\end{array}
\\right.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Box1D
plt.figure()
s1 = Box1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
width = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, x_0, width):
"""One dimensional Box model function"""
inside = np.logical_and(x >= x_0 - width / 2., x <= x_0 + width / 2.)
result = np.select([inside], [amplitude], 0)
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
else:
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``(x_low, x_high))``
"""
dx = self.width / 2
return (self.x_0 - dx, self.x_0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_0', inputs_unit['x']),
('width', inputs_unit['x']),
('amplitude', outputs_unit['y'])])
class Box2D(Fittable2DModel):
"""
Two dimensional Box model.
Parameters
----------
amplitude : float
Amplitude A
x_0 : float
x position of the center of the box function
x_width : float
Width in x direction of the box
y_0 : float
y position of the center of the box function
y_width : float
Width in y direction of the box
See Also
--------
Box1D, Gaussian2D, Moffat2D
Notes
-----
Model formula:
.. math::
f(x, y) = \\left \\{
\\begin{array}{ll}
A : & x_0 - w_x/2 \\leq x \\leq x_0 + w_x/2 \\text{ and} \\\\
& y_0 - w_y/2 \\leq y \\leq y_0 + w_y/2 \\\\
0 : & \\text{else}
\\end{array}
\\right.
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
x_width = Parameter(default=1)
y_width = Parameter(default=1)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, x_width, y_width):
"""Two dimensional Box model function"""
x_range = np.logical_and(x >= x_0 - x_width / 2.,
x <= x_0 + x_width / 2.)
y_range = np.logical_and(y >= y_0 - y_width / 2.,
y <= y_0 + y_width / 2.)
result = np.select([np.logical_and(x_range, y_range)], [amplitude], 0)
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
else:
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box``.
``((y_low, y_high), (x_low, x_high))``
"""
dx = self.x_width / 2
dy = self.y_width / 2
return ((self.y_0 - dy, self.y_0 + dy),
(self.x_0 - dx, self.x_0 + dx))
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['y']),
('x_width', inputs_unit['x']),
('y_width', inputs_unit['y']),
('amplitude', outputs_unit['z'])])
class Trapezoid1D(Fittable1DModel):
"""
One dimensional Trapezoid model.
Parameters
----------
amplitude : float
Amplitude of the trapezoid
x_0 : float
Center position of the trapezoid
width : float
Width of the constant part of the trapezoid.
slope : float
Slope of the tails of the trapezoid
See Also
--------
Box1D, Gaussian1D, Moffat1D
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Trapezoid1D
plt.figure()
s1 = Trapezoid1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
width = Parameter(default=1)
slope = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, x_0, width, slope):
"""One dimensional Trapezoid model function"""
# Compute the four points where the trapezoid changes slope
# x1 <= x2 <= x3 <= x4
x2 = x_0 - width / 2.
x3 = x_0 + width / 2.
x1 = x2 - amplitude / slope
x4 = x3 + amplitude / slope
# Compute model values in pieces between the change points
range_a = np.logical_and(x >= x1, x < x2)
range_b = np.logical_and(x >= x2, x < x3)
range_c = np.logical_and(x >= x3, x < x4)
val_a = slope * (x - x1)
val_b = amplitude
val_c = slope * (x4 - x)
result = np.select([range_a, range_b, range_c], [val_a, val_b, val_c])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
else:
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``(x_low, x_high))``
"""
dx = self.width / 2 + self.amplitude / self.slope
return (self.x_0 - dx, self.x_0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_0', inputs_unit['x']),
('width', inputs_unit['x']),
('slope', outputs_unit['y'] / inputs_unit['x']),
('amplitude', outputs_unit['y'])])
class TrapezoidDisk2D(Fittable2DModel):
"""
Two dimensional circular Trapezoid model.
Parameters
----------
amplitude : float
Amplitude of the trapezoid
x_0 : float
x position of the center of the trapezoid
y_0 : float
y position of the center of the trapezoid
R_0 : float
Radius of the constant part of the trapezoid.
slope : float
Slope of the tails of the trapezoid in x direction.
See Also
--------
Disk2D, Box2D
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
R_0 = Parameter(default=1)
slope = Parameter(default=1)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, R_0, slope):
"""Two dimensional Trapezoid Disk model function"""
r = np.sqrt((x - x_0) ** 2 + (y - y_0) ** 2)
range_1 = r <= R_0
range_2 = np.logical_and(r > R_0, r <= R_0 + amplitude / slope)
val_1 = amplitude
val_2 = amplitude + slope * (R_0 - r)
result = np.select([range_1, range_2], [val_1, val_2])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
else:
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box``.
``((y_low, y_high), (x_low, x_high))``
"""
dr = self.R_0 + self.amplitude / self.slope
return ((self.y_0 - dr, self.y_0 + dr),
(self.x_0 - dr, self.x_0 + dr))
@property
def input_units(self):
if self.x_0.unit is None and self.y_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['x']),
('R_0', inputs_unit['x']),
('slope', outputs_unit['z'] / inputs_unit['x']),
('amplitude', outputs_unit['z'])])
class MexicanHat1D(Fittable1DModel):
"""
One dimensional Mexican Hat model.
Parameters
----------
amplitude : float
Amplitude
x_0 : float
Position of the peak
sigma : float
Width of the Mexican hat
See Also
--------
MexicanHat2D, Box1D, Gaussian1D, Trapezoid1D
Notes
-----
Model formula:
.. math::
f(x) = {A \\left(1 - \\frac{\\left(x - x_{0}\\right)^{2}}{\\sigma^{2}}\\right)
e^{- \\frac{\\left(x - x_{0}\\right)^{2}}{2 \\sigma^{2}}}}
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import MexicanHat1D
plt.figure()
s1 = MexicanHat1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -2, 4])
plt.show()
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
sigma = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, x_0, sigma):
"""One dimensional Mexican Hat model function"""
xx_ww = (x - x_0) ** 2 / (2 * sigma ** 2)
return amplitude * (1 - 2 * xx_ww) * np.exp(-xx_ww)
def bounding_box(self, factor=10.0):
"""Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
Parameters
----------
factor : float
The multiple of sigma used to define the limits.
"""
x0 = self.x_0
dx = factor * self.sigma
return (x0 - dx, x0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_0', inputs_unit['x']),
('sigma', inputs_unit['x']),
('amplitude', outputs_unit['y'])])
class MexicanHat2D(Fittable2DModel):
"""
Two dimensional symmetric Mexican Hat model.
Parameters
----------
amplitude : float
Amplitude
x_0 : float
x position of the peak
y_0 : float
y position of the peak
sigma : float
Width of the Mexican hat
See Also
--------
MexicanHat1D, Gaussian2D
Notes
-----
Model formula:
.. math::
f(x, y) = A \\left(1 - \\frac{\\left(x - x_{0}\\right)^{2}
+ \\left(y - y_{0}\\right)^{2}}{\\sigma^{2}}\\right)
e^{\\frac{- \\left(x - x_{0}\\right)^{2}
- \\left(y - y_{0}\\right)^{2}}{2 \\sigma^{2}}}
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
sigma = Parameter(default=1)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, sigma):
"""Two dimensional Mexican Hat model function"""
rr_ww = ((x - x_0) ** 2 + (y - y_0) ** 2) / (2 * sigma ** 2)
return amplitude * (1 - rr_ww) * np.exp(- rr_ww)
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['x']),
('sigma', inputs_unit['x']),
('amplitude', outputs_unit['z'])])
class AiryDisk2D(Fittable2DModel):
"""
Two dimensional Airy disk model.
Parameters
----------
amplitude : float
Amplitude of the Airy function.
x_0 : float
x position of the maximum of the Airy function.
y_0 : float
y position of the maximum of the Airy function.
radius : float
The radius of the Airy disk (radius of the first zero).
See Also
--------
Box2D, TrapezoidDisk2D, Gaussian2D
Notes
-----
Model formula:
.. math:: f(r) = A \\left[\\frac{2 J_1(\\frac{\\pi r}{R/R_z})}{\\frac{\\pi r}{R/R_z}}\\right]^2
Where :math:`J_1` is the first order Bessel function of the first
kind, :math:`r` is radial distance from the maximum of the Airy
function (:math:`r = \\sqrt{(x - x_0)^2 + (y - y_0)^2}`), :math:`R`
is the input ``radius`` parameter, and :math:`R_z =
1.2196698912665045`).
For an optical system, the radius of the first zero represents the
limiting angular resolution and is approximately 1.22 * lambda / D,
where lambda is the wavelength of the light and D is the diameter of
the aperture.
See [1]_ for more details about the Airy disk.
References
----------
.. [1] https://en.wikipedia.org/wiki/Airy_disk
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
radius = Parameter(default=1)
_rz = None
_j1 = None
@classmethod
def evaluate(cls, x, y, amplitude, x_0, y_0, radius):
"""Two dimensional Airy model function"""
if cls._rz is None:
try:
from scipy.special import j1, jn_zeros
cls._rz = jn_zeros(1, 1)[0] / np.pi
cls._j1 = j1
except ValueError:
raise ImportError('AiryDisk2D model requires scipy > 0.11.')
r = np.sqrt((x - x_0) ** 2 + (y - y_0) ** 2) / (radius / cls._rz)
if isinstance(r, Quantity):
# scipy function cannot handle Quantity, so turn into array.
r = r.to_value(u.dimensionless_unscaled)
# Since r can be zero, we have to take care to treat that case
# separately so as not to raise a numpy warning
z = np.ones(r.shape)
rt = np.pi * r[r > 0]
z[r > 0] = (2.0 * cls._j1(rt) / rt) ** 2
if isinstance(amplitude, Quantity):
# make z quantity too, otherwise in-place multiplication fails.
z = Quantity(z, u.dimensionless_unscaled, copy=False)
z *= amplitude
return z
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['x']),
('radius', inputs_unit['x']),
('amplitude', outputs_unit['z'])])
class Moffat1D(Fittable1DModel):
"""
One dimensional Moffat model.
Parameters
----------
amplitude : float
Amplitude of the model.
x_0 : float
x position of the maximum of the Moffat model.
gamma : float
Core width of the Moffat model.
alpha : float
Power index of the Moffat model.
See Also
--------
Gaussian1D, Box1D
Notes
-----
Model formula:
.. math::
f(x) = A \\left(1 + \\frac{\\left(x - x_{0}\\right)^{2}}{\\gamma^{2}}\\right)^{- \\alpha}
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Moffat1D
plt.figure()
s1 = Moffat1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
gamma = Parameter(default=1)
alpha = Parameter(default=1)
@property
def fwhm(self):
"""
Moffat full width at half maximum.
Derivation of the formula is available in
`this notebook by Yoonsoo Bach <http://nbviewer.jupyter.org/github/ysbach/AO_2017/blob/master/04_Ground_Based_Concept.ipynb#1.2.-Moffat>`_.
"""
return 2.0 * self.gamma * np.sqrt(2.0 ** (1.0 / self.alpha) - 1.0)
@staticmethod
def evaluate(x, amplitude, x_0, gamma, alpha):
"""One dimensional Moffat model function"""
return amplitude * (1 + ((x - x_0) / gamma) ** 2) ** (-alpha)
@staticmethod
def fit_deriv(x, amplitude, x_0, gamma, alpha):
"""One dimensional Moffat model derivative with respect to parameters"""
fac = (1 + (x - x_0) ** 2 / gamma ** 2)
d_A = fac ** (-alpha)
d_x_0 = (2 * amplitude * alpha * (x - x_0) * d_A / (fac * gamma ** 2))
d_gamma = (2 * amplitude * alpha * (x - x_0) ** 2 * d_A /
(fac * gamma ** 3))
d_alpha = -amplitude * d_A * np.log(fac)
return [d_A, d_x_0, d_gamma, d_alpha]
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_0', inputs_unit['x']),
('gamma', inputs_unit['x']),
('amplitude', outputs_unit['y'])])
class Moffat2D(Fittable2DModel):
"""
Two dimensional Moffat model.
Parameters
----------
amplitude : float
Amplitude of the model.
x_0 : float
x position of the maximum of the Moffat model.
y_0 : float
y position of the maximum of the Moffat model.
gamma : float
Core width of the Moffat model.
alpha : float
Power index of the Moffat model.
See Also
--------
Gaussian2D, Box2D
Notes
-----
Model formula:
.. math::
f(x, y) = A \\left(1 + \\frac{\\left(x - x_{0}\\right)^{2} +
\\left(y - y_{0}\\right)^{2}}{\\gamma^{2}}\\right)^{- \\alpha}
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
gamma = Parameter(default=1)
alpha = Parameter(default=1)
@property
def fwhm(self):
"""
Moffat full width at half maximum.
Derivation of the formula is available in
`this notebook by Yoonsoo Bach <http://nbviewer.jupyter.org/github/ysbach/AO_2017/blob/master/04_Ground_Based_Concept.ipynb#1.2.-Moffat>`_.
"""
return 2.0 * self.gamma * np.sqrt(2.0 ** (1.0 / self.alpha) - 1.0)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, gamma, alpha):
"""Two dimensional Moffat model function"""
rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma ** 2
return amplitude * (1 + rr_gg) ** (-alpha)
@staticmethod
def fit_deriv(x, y, amplitude, x_0, y_0, gamma, alpha):
"""Two dimensional Moffat model derivative with respect to parameters"""
rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma ** 2
d_A = (1 + rr_gg) ** (-alpha)
d_x_0 = (2 * amplitude * alpha * d_A * (x - x_0) /
(gamma ** 2 * (1 + rr_gg)))
d_y_0 = (2 * amplitude * alpha * d_A * (y - y_0) /
(gamma ** 2 * (1 + rr_gg)))
d_alpha = -amplitude * d_A * np.log(1 + rr_gg)
d_gamma = (2 * amplitude * alpha * d_A * rr_gg /
(gamma ** 3 * (1 + rr_gg)))
return [d_A, d_x_0, d_y_0, d_gamma, d_alpha]
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['x']),
('gamma', inputs_unit['x']),
('amplitude', outputs_unit['z'])])
class Sersic2D(Fittable2DModel):
r"""
Two dimensional Sersic surface brightness profile.
Parameters
----------
amplitude : float
Surface brightness at r_eff.
r_eff : float
Effective (half-light) radius
n : float
Sersic Index.
x_0 : float, optional
x position of the center.
y_0 : float, optional
y position of the center.
ellip : float, optional
Ellipticity.
theta : float, optional
Rotation angle in radians, counterclockwise from
the positive x-axis.
See Also
--------
Gaussian2D, Moffat2D
Notes
-----
Model formula:
.. math::
I(x,y) = I(r) = I_e\exp\left\{-b_n\left[\left(\frac{r}{r_{e}}\right)^{(1/n)}-1\right]\right\}
The constant :math:`b_n` is defined such that :math:`r_e` contains half the total
luminosity, and can be solved for numerically.
.. math::
\Gamma(2n) = 2\gamma (b_n,2n)
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Sersic2D
import matplotlib.pyplot as plt
x,y = np.meshgrid(np.arange(100), np.arange(100))
mod = Sersic2D(amplitude = 1, r_eff = 25, n=4, x_0=50, y_0=50,
ellip=.5, theta=-1)
img = mod(x, y)
log_img = np.log10(img)
plt.figure()
plt.imshow(log_img, origin='lower', interpolation='nearest',
vmin=-1, vmax=2)
plt.xlabel('x')
plt.ylabel('y')
cbar = plt.colorbar()
cbar.set_label('Log Brightness', rotation=270, labelpad=25)
cbar.set_ticks([-1, 0, 1, 2], update_ticks=True)
plt.show()
References
----------
.. [1] http://ned.ipac.caltech.edu/level5/March05/Graham/Graham2.html
"""
amplitude = Parameter(default=1)
r_eff = Parameter(default=1)
n = Parameter(default=4)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
ellip = Parameter(default=0)
theta = Parameter(default=0)
_gammaincinv = None
@classmethod
def evaluate(cls, x, y, amplitude, r_eff, n, x_0, y_0, ellip, theta):
"""Two dimensional Sersic profile function."""
if cls._gammaincinv is None:
try:
from scipy.special import gammaincinv
cls._gammaincinv = gammaincinv
except ValueError:
raise ImportError('Sersic2D model requires scipy > 0.11.')
bn = cls._gammaincinv(2. * n, 0.5)
a, b = r_eff, (1 - ellip) * r_eff
cos_theta, sin_theta = np.cos(theta), np.sin(theta)
x_maj = (x - x_0) * cos_theta + (y - y_0) * sin_theta
x_min = -(x - x_0) * sin_theta + (y - y_0) * cos_theta
z = np.sqrt((x_maj / a) ** 2 + (x_min / b) ** 2)
return amplitude * np.exp(-bn * (z ** (1 / n) - 1))
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['x']),
('r_eff', inputs_unit['x']),
('theta', u.rad),
('amplitude', outputs_unit['z'])])
|
f7bb103fbaacfb648d7ba82b0b426121868318e625152b46e50c76da2cd16a23 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""
Implements projections--particularly sky projections defined in WCS Paper II
[1]_.
All angles are set and and displayed in degrees but internally computations are
performed in radians. All functions expect inputs and outputs degrees.
References
----------
.. [1] Calabretta, M.R., Greisen, E.W., 2002, A&A, 395, 1077 (Paper II)
"""
import abc
import numpy as np
from .core import Model
from .parameters import Parameter, InputParameterError
from astropy import units as u
from . import _projections
from .utils import _to_radian, _to_orig_unit
projcodes = [
'AZP', 'SZP', 'TAN', 'STG', 'SIN', 'ARC', 'ZEA', 'AIR', 'CYP',
'CEA', 'CAR', 'MER', 'SFL', 'PAR', 'MOL', 'AIT', 'COP', 'COE',
'COD', 'COO', 'BON', 'PCO', 'TSC', 'CSC', 'QSC', 'HPX', 'XPH'
]
__all__ = ['Projection', 'Pix2SkyProjection', 'Sky2PixProjection',
'Zenithal', 'Cylindrical', 'PseudoCylindrical', 'Conic',
'PseudoConic', 'QuadCube', 'HEALPix',
'AffineTransformation2D',
'projcodes',
'Pix2Sky_ZenithalPerspective', 'Sky2Pix_ZenithalPerspective',
'Pix2Sky_SlantZenithalPerspective', 'Sky2Pix_SlantZenithalPerspective',
'Pix2Sky_Gnomonic', 'Sky2Pix_Gnomonic',
'Pix2Sky_Stereographic', 'Sky2Pix_Stereographic',
'Pix2Sky_SlantOrthographic', 'Sky2Pix_SlantOrthographic',
'Pix2Sky_ZenithalEquidistant', 'Sky2Pix_ZenithalEquidistant',
'Pix2Sky_ZenithalEqualArea', 'Sky2Pix_ZenithalEqualArea',
'Pix2Sky_Airy', 'Sky2Pix_Airy',
'Pix2Sky_CylindricalPerspective', 'Sky2Pix_CylindricalPerspective',
'Pix2Sky_CylindricalEqualArea', 'Sky2Pix_CylindricalEqualArea',
'Pix2Sky_PlateCarree', 'Sky2Pix_PlateCarree',
'Pix2Sky_Mercator', 'Sky2Pix_Mercator',
'Pix2Sky_SansonFlamsteed', 'Sky2Pix_SansonFlamsteed',
'Pix2Sky_Parabolic', 'Sky2Pix_Parabolic',
'Pix2Sky_Molleweide', 'Sky2Pix_Molleweide',
'Pix2Sky_HammerAitoff', 'Sky2Pix_HammerAitoff',
'Pix2Sky_ConicPerspective', 'Sky2Pix_ConicPerspective',
'Pix2Sky_ConicEqualArea', 'Sky2Pix_ConicEqualArea',
'Pix2Sky_ConicEquidistant', 'Sky2Pix_ConicEquidistant',
'Pix2Sky_ConicOrthomorphic', 'Sky2Pix_ConicOrthomorphic',
'Pix2Sky_BonneEqualArea', 'Sky2Pix_BonneEqualArea',
'Pix2Sky_Polyconic', 'Sky2Pix_Polyconic',
'Pix2Sky_TangentialSphericalCube', 'Sky2Pix_TangentialSphericalCube',
'Pix2Sky_COBEQuadSphericalCube', 'Sky2Pix_COBEQuadSphericalCube',
'Pix2Sky_QuadSphericalCube', 'Sky2Pix_QuadSphericalCube',
'Pix2Sky_HEALPix', 'Sky2Pix_HEALPix',
'Pix2Sky_HEALPixPolar', 'Sky2Pix_HEALPixPolar',
# The following are short FITS WCS aliases
'Pix2Sky_AZP', 'Sky2Pix_AZP',
'Pix2Sky_SZP', 'Sky2Pix_SZP',
'Pix2Sky_TAN', 'Sky2Pix_TAN',
'Pix2Sky_STG', 'Sky2Pix_STG',
'Pix2Sky_SIN', 'Sky2Pix_SIN',
'Pix2Sky_ARC', 'Sky2Pix_ARC',
'Pix2Sky_ZEA', 'Sky2Pix_ZEA',
'Pix2Sky_AIR', 'Sky2Pix_AIR',
'Pix2Sky_CYP', 'Sky2Pix_CYP',
'Pix2Sky_CEA', 'Sky2Pix_CEA',
'Pix2Sky_CAR', 'Sky2Pix_CAR',
'Pix2Sky_MER', 'Sky2Pix_MER',
'Pix2Sky_SFL', 'Sky2Pix_SFL',
'Pix2Sky_PAR', 'Sky2Pix_PAR',
'Pix2Sky_MOL', 'Sky2Pix_MOL',
'Pix2Sky_AIT', 'Sky2Pix_AIT',
'Pix2Sky_COP', 'Sky2Pix_COP',
'Pix2Sky_COE', 'Sky2Pix_COE',
'Pix2Sky_COD', 'Sky2Pix_COD',
'Pix2Sky_COO', 'Sky2Pix_COO',
'Pix2Sky_BON', 'Sky2Pix_BON',
'Pix2Sky_PCO', 'Sky2Pix_PCO',
'Pix2Sky_TSC', 'Sky2Pix_TSC',
'Pix2Sky_CSC', 'Sky2Pix_CSC',
'Pix2Sky_QSC', 'Sky2Pix_QSC',
'Pix2Sky_HPX', 'Sky2Pix_HPX',
'Pix2Sky_XPH', 'Sky2Pix_XPH'
]
class Projection(Model):
"""Base class for all sky projections."""
# Radius of the generating sphere.
# This sets the circumference to 360 deg so that arc length is measured in deg.
r0 = 180 * u.deg / np.pi
_separable = False
@property
@abc.abstractmethod
def inverse(self):
"""
Inverse projection--all projection models must provide an inverse.
"""
class Pix2SkyProjection(Projection):
"""Base class for all Pix2Sky projections."""
inputs = ('x', 'y')
outputs = ('phi', 'theta')
_input_units_strict = True
_input_units_allow_dimensionless = True
@property
def input_units(self):
return {'x': u.deg, 'y': u.deg}
@property
def return_units(self):
return {'phi': u.deg, 'theta': u.deg}
class Sky2PixProjection(Projection):
"""Base class for all Sky2Pix projections."""
inputs = ('phi', 'theta')
outputs = ('x', 'y')
_input_units_strict = True
_input_units_allow_dimensionless = True
@property
def input_units(self):
return {'phi': u.deg, 'theta': u.deg}
@property
def return_units(self):
return {'x': u.deg, 'y': u.deg}
class Zenithal(Projection):
r"""Base class for all Zenithal projections.
Zenithal (or azimuthal) projections map the sphere directly onto a
plane. All zenithal projections are specified by defining the
radius as a function of native latitude, :math:`R_\theta`.
The pixel-to-sky transformation is defined as:
.. math::
\phi &= \arg(-y, x) \\
R_\theta &= \sqrt{x^2 + y^2}
and the inverse (sky-to-pixel) is defined as:
.. math::
x &= R_\theta \sin \phi \\
y &= R_\theta \cos \phi
"""
_separable = False
class Pix2Sky_ZenithalPerspective(Pix2SkyProjection, Zenithal):
r"""
Zenithal perspective projection - pixel to sky.
Corresponds to the ``AZP`` projection in FITS WCS.
.. math::
\phi &= \arg(-y \cos \gamma, x) \\
\theta &= \left\{\genfrac{}{}{0pt}{}{\psi - \omega}{\psi + \omega + 180^{\circ}}\right.
where:
.. math::
\psi &= \arg(\rho, 1) \\
\omega &= \sin^{-1}\left(\frac{\rho \mu}{\sqrt{\rho^2 + 1}}\right) \\
\rho &= \frac{R}{\frac{180^{\circ}}{\pi}(\mu + 1) + y \sin \gamma} \\
R &= \sqrt{x^2 + y^2 \cos^2 \gamma}
Parameters
--------------
mu : float
Distance from point of projection to center of sphere
in spherical radii, μ. Default is 0.
gamma : float
Look angle γ in degrees. Default is 0°.
"""
mu = Parameter(default=0.0)
gamma = Parameter(default=0.0, getter=_to_orig_unit, setter=_to_radian)
def __init__(self, mu=mu.default, gamma=gamma.default, **kwargs):
# units : mu - in spherical radii, gamma - in deg
# TODO: Support quantity objects here and in similar contexts
super().__init__(mu, gamma, **kwargs)
@mu.validator
def mu(self, value):
if np.any(value == -1):
raise InputParameterError(
"Zenithal perspective projection is not defined for mu = -1")
@property
def inverse(self):
return Sky2Pix_ZenithalPerspective(self.mu.value, self.gamma.value)
@classmethod
def evaluate(cls, x, y, mu, gamma):
return _projections.azpx2s(x, y, mu, _to_orig_unit(gamma))
Pix2Sky_AZP = Pix2Sky_ZenithalPerspective
class Sky2Pix_ZenithalPerspective(Sky2PixProjection, Zenithal):
r"""
Zenithal perspective projection - sky to pixel.
Corresponds to the ``AZP`` projection in FITS WCS.
.. math::
x &= R \sin \phi \\
y &= -R \sec \gamma \cos \theta
where:
.. math::
R = \frac{180^{\circ}}{\pi} \frac{(\mu + 1) \cos \theta}{(\mu + \sin \theta) + \cos \theta \cos \phi \tan \gamma}
Parameters
----------
mu : float
Distance from point of projection to center of sphere
in spherical radii, μ. Default is 0.
gamma : float
Look angle γ in degrees. Default is 0°.
"""
mu = Parameter(default=0.0)
gamma = Parameter(default=0.0, getter=_to_orig_unit, setter=_to_radian)
@mu.validator
def mu(self, value):
if np.any(value == -1):
raise InputParameterError(
"Zenithal perspective projection is not defined for mu = -1")
@property
def inverse(self):
return Pix2Sky_AZP(self.mu.value, self.gamma.value)
@classmethod
def evaluate(cls, phi, theta, mu, gamma):
return _projections.azps2x(
phi, theta, mu, _to_orig_unit(gamma))
Sky2Pix_AZP = Sky2Pix_ZenithalPerspective
class Pix2Sky_SlantZenithalPerspective(Pix2SkyProjection, Zenithal):
r"""
Slant zenithal perspective projection - pixel to sky.
Corresponds to the ``SZP`` projection in FITS WCS.
Parameters
--------------
mu : float
Distance from point of projection to center of sphere
in spherical radii, μ. Default is 0.
phi0 : float
The longitude φ₀ of the reference point, in degrees. Default
is 0°.
theta0 : float
The latitude θ₀ of the reference point, in degrees. Default
is 90°.
"""
def _validate_mu(mu):
if np.asarray(mu == -1).any():
raise ValueError(
"Zenithal perspective projection is not defined for mu=-1")
return mu
mu = Parameter(default=0.0, setter=_validate_mu)
phi0 = Parameter(default=0.0, getter=_to_orig_unit, setter=_to_radian)
theta0 = Parameter(default=90.0, getter=_to_orig_unit, setter=_to_radian)
@property
def inverse(self):
return Sky2Pix_SlantZenithalPerspective(
self.mu.value, self.phi0.value, self.theta0.value)
@classmethod
def evaluate(cls, x, y, mu, phi0, theta0):
return _projections.szpx2s(
x, y, mu, _to_orig_unit(phi0), _to_orig_unit(theta0))
Pix2Sky_SZP = Pix2Sky_SlantZenithalPerspective
class Sky2Pix_SlantZenithalPerspective(Sky2PixProjection, Zenithal):
r"""
Zenithal perspective projection - sky to pixel.
Corresponds to the ``SZP`` projection in FITS WCS.
Parameters
----------
mu : float
distance from point of projection to center of sphere
in spherical radii, μ. Default is 0.
phi0 : float
The longitude φ₀ of the reference point, in degrees. Default
is 0°.
theta0 : float
The latitude θ₀ of the reference point, in degrees. Default
is 90°.
"""
def _validate_mu(mu):
if np.asarray(mu == -1).any():
raise ValueError("Zenithal perspective projection is not defined for mu=-1")
return mu
mu = Parameter(default=0.0, setter=_validate_mu)
phi0 = Parameter(default=0.0, getter=_to_orig_unit, setter=_to_radian)
theta0 = Parameter(default=0.0, getter=_to_orig_unit, setter=_to_radian)
@property
def inverse(self):
return Pix2Sky_SlantZenithalPerspective(
self.mu.value, self.phi0.value, self.theta0.value)
@classmethod
def evaluate(cls, phi, theta, mu, phi0, theta0):
return _projections.szps2x(
phi, theta, mu, _to_orig_unit(phi0), _to_orig_unit(theta0))
Sky2Pix_SZP = Sky2Pix_SlantZenithalPerspective
class Pix2Sky_Gnomonic(Pix2SkyProjection, Zenithal):
r"""
Gnomonic projection - pixel to sky.
Corresponds to the ``TAN`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
\theta = \tan^{-1}\left(\frac{180^{\circ}}{\pi R_\theta}\right)
"""
@property
def inverse(self):
return Sky2Pix_Gnomonic()
@classmethod
def evaluate(cls, x, y):
return _projections.tanx2s(x, y)
Pix2Sky_TAN = Pix2Sky_Gnomonic
class Sky2Pix_Gnomonic(Sky2PixProjection, Zenithal):
r"""
Gnomonic Projection - sky to pixel.
Corresponds to the ``TAN`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
R_\theta = \frac{180^{\circ}}{\pi}\cot \theta
"""
@property
def inverse(self):
return Pix2Sky_Gnomonic()
@classmethod
def evaluate(cls, phi, theta):
return _projections.tans2x(phi, theta)
Sky2Pix_TAN = Sky2Pix_Gnomonic
class Pix2Sky_Stereographic(Pix2SkyProjection, Zenithal):
r"""
Stereographic Projection - pixel to sky.
Corresponds to the ``STG`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
\theta = 90^{\circ} - 2 \tan^{-1}\left(\frac{\pi R_\theta}{360^{\circ}}\right)
"""
@property
def inverse(self):
return Sky2Pix_Stereographic()
@classmethod
def evaluate(cls, x, y):
return _projections.stgx2s(x, y)
Pix2Sky_STG = Pix2Sky_Stereographic
class Sky2Pix_Stereographic(Sky2PixProjection, Zenithal):
r"""
Stereographic Projection - sky to pixel.
Corresponds to the ``STG`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
R_\theta = \frac{180^{\circ}}{\pi}\frac{2 \cos \theta}{1 + \sin \theta}
"""
@property
def inverse(self):
return Pix2Sky_Stereographic()
@classmethod
def evaluate(cls, phi, theta):
return _projections.stgs2x(phi, theta)
Sky2Pix_STG = Sky2Pix_Stereographic
class Pix2Sky_SlantOrthographic(Pix2SkyProjection, Zenithal):
r"""
Slant orthographic projection - pixel to sky.
Corresponds to the ``SIN`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
The following transformation applies when :math:`\xi` and
:math:`\eta` are both zero.
.. math::
\theta = \cos^{-1}\left(\frac{\pi}{180^{\circ}}R_\theta\right)
The parameters :math:`\xi` and :math:`\eta` are defined from the
reference point :math:`(\phi_c, \theta_c)` as:
.. math::
\xi &= \cot \theta_c \sin \phi_c \\
\eta &= - \cot \theta_c \cos \phi_c
Parameters
----------
xi : float
Obliqueness parameter, ξ. Default is 0.0.
eta : float
Obliqueness parameter, η. Default is 0.0.
"""
xi = Parameter(default=0.0)
eta = Parameter(default=0.0)
@property
def inverse(self):
return Sky2Pix_SlantOrthographic(self.xi.value, self.eta.value)
@classmethod
def evaluate(cls, x, y, xi, eta):
return _projections.sinx2s(x, y, xi, eta)
Pix2Sky_SIN = Pix2Sky_SlantOrthographic
class Sky2Pix_SlantOrthographic(Sky2PixProjection, Zenithal):
r"""
Slant orthographic projection - sky to pixel.
Corresponds to the ``SIN`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
The following transformation applies when :math:`\xi` and
:math:`\eta` are both zero.
.. math::
R_\theta = \frac{180^{\circ}}{\pi}\cos \theta
But more specifically are:
.. math::
x &= \frac{180^\circ}{\pi}[\cos \theta \sin \phi + \xi(1 - \sin \theta)] \\
y &= \frac{180^\circ}{\pi}[\cos \theta \cos \phi + \eta(1 - \sin \theta)]
"""
xi = Parameter(default=0.0)
eta = Parameter(default=0.0)
@property
def inverse(self):
return Pix2Sky_SlantOrthographic(self.xi.value, self.eta.value)
@classmethod
def evaluate(cls, phi, theta, xi, eta):
return _projections.sins2x(phi, theta, xi, eta)
Sky2Pix_SIN = Sky2Pix_SlantOrthographic
class Pix2Sky_ZenithalEquidistant(Pix2SkyProjection, Zenithal):
r"""
Zenithal equidistant projection - pixel to sky.
Corresponds to the ``ARC`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
\theta = 90^\circ - R_\theta
"""
@property
def inverse(self):
return Sky2Pix_ZenithalEquidistant()
@classmethod
def evaluate(cls, x, y):
return _projections.arcx2s(x, y)
Pix2Sky_ARC = Pix2Sky_ZenithalEquidistant
class Sky2Pix_ZenithalEquidistant(Sky2PixProjection, Zenithal):
r"""
Zenithal equidistant projection - sky to pixel.
Corresponds to the ``ARC`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
R_\theta = 90^\circ - \theta
"""
@property
def inverse(self):
return Pix2Sky_ZenithalEquidistant()
@classmethod
def evaluate(cls, phi, theta):
return _projections.arcs2x(phi, theta)
Sky2Pix_ARC = Sky2Pix_ZenithalEquidistant
class Pix2Sky_ZenithalEqualArea(Pix2SkyProjection, Zenithal):
r"""
Zenithal equidistant projection - pixel to sky.
Corresponds to the ``ZEA`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
\theta = 90^\circ - 2 \sin^{-1} \left(\frac{\pi R_\theta}{360^\circ}\right)
"""
@property
def inverse(self):
return Sky2Pix_ZenithalEqualArea()
@classmethod
def evaluate(cls, x, y):
return _projections.zeax2s(x, y)
Pix2Sky_ZEA = Pix2Sky_ZenithalEqualArea
class Sky2Pix_ZenithalEqualArea(Sky2PixProjection, Zenithal):
r"""
Zenithal equidistant projection - sky to pixel.
Corresponds to the ``ZEA`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
R_\theta &= \frac{180^\circ}{\pi} \sqrt{2(1 - \sin\theta)} \\
&= \frac{360^\circ}{\pi} \sin\left(\frac{90^\circ - \theta}{2}\right)
"""
@property
def inverse(self):
return Pix2Sky_ZenithalEqualArea()
@classmethod
def evaluate(cls, phi, theta):
return _projections.zeas2x(phi, theta)
Sky2Pix_ZEA = Sky2Pix_ZenithalEqualArea
class Pix2Sky_Airy(Pix2SkyProjection, Zenithal):
r"""
Airy projection - pixel to sky.
Corresponds to the ``AIR`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
Parameters
----------
theta_b : float
The latitude :math:`\theta_b` at which to minimize the error,
in degrees. Default is 90°.
"""
theta_b = Parameter(default=90.0)
@property
def inverse(self):
return Sky2Pix_Airy(self.theta_b.value)
@classmethod
def evaluate(cls, x, y, theta_b):
return _projections.airx2s(x, y, theta_b)
Pix2Sky_AIR = Pix2Sky_Airy
class Sky2Pix_Airy(Sky2PixProjection, Zenithal):
r"""
Airy - sky to pixel.
Corresponds to the ``AIR`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
R_\theta = -2 \frac{180^\circ}{\pi}\left(\frac{\ln(\cos \xi)}{\tan \xi} + \frac{\ln(\cos \xi_b)}{\tan^2 \xi_b} \tan \xi \right)
where:
.. math::
\xi &= \frac{90^\circ - \theta}{2} \\
\xi_b &= \frac{90^\circ - \theta_b}{2}
Parameters
----------
theta_b : float
The latitude :math:`\theta_b` at which to minimize the error,
in degrees. Default is 90°.
"""
theta_b = Parameter(default=90.0)
@property
def inverse(self):
return Pix2Sky_Airy(self.theta_b.value)
@classmethod
def evaluate(cls, phi, theta, theta_b):
return _projections.airs2x(phi, theta, theta_b)
Sky2Pix_AIR = Sky2Pix_Airy
class Cylindrical(Projection):
r"""Base class for Cylindrical projections.
Cylindrical projections are so-named because the surface of
projection is a cylinder.
"""
_separable = True
class Pix2Sky_CylindricalPerspective(Pix2SkyProjection, Cylindrical):
r"""
Cylindrical perspective - pixel to sky.
Corresponds to the ``CYP`` projection in FITS WCS.
.. math::
\phi &= \frac{x}{\lambda} \\
\theta &= \arg(1, \eta) + \sin{-1}\left(\frac{\eta \mu}{\sqrt{\eta^2 + 1}}\right)
where:
.. math::
\eta = \frac{\pi}{180^{\circ}}\frac{y}{\mu + \lambda}
Parameters
----------
mu : float
Distance from center of sphere in the direction opposite the
projected surface, in spherical radii, μ. Default is 1.
lam : float
Radius of the cylinder in spherical radii, λ. Default is 1.
"""
mu = Parameter(default=1.0)
lam = Parameter(default=1.0)
@mu.validator
def mu(self, value):
if np.any(value == -self.lam):
raise InputParameterError(
"CYP projection is not defined for mu = -lambda")
@lam.validator
def lam(self, value):
if np.any(value == -self.mu):
raise InputParameterError(
"CYP projection is not defined for lambda = -mu")
@property
def inverse(self):
return Sky2Pix_CylindricalPerspective(self.mu.value, self.lam.value)
@classmethod
def evaluate(cls, x, y, mu, lam):
return _projections.cypx2s(x, y, mu, lam)
Pix2Sky_CYP = Pix2Sky_CylindricalPerspective
class Sky2Pix_CylindricalPerspective(Sky2PixProjection, Cylindrical):
r"""
Cylindrical Perspective - sky to pixel.
Corresponds to the ``CYP`` projection in FITS WCS.
.. math::
x &= \lambda \phi \\
y &= \frac{180^{\circ}}{\pi}\left(\frac{\mu + \lambda}{\mu + \cos \theta}\right)\sin \theta
Parameters
----------
mu : float
Distance from center of sphere in the direction opposite the
projected surface, in spherical radii, μ. Default is 0.
lam : float
Radius of the cylinder in spherical radii, λ. Default is 0.
"""
mu = Parameter(default=1.0)
lam = Parameter(default=1.0)
@mu.validator
def mu(self, value):
if np.any(value == -self.lam):
raise InputParameterError(
"CYP projection is not defined for mu = -lambda")
@lam.validator
def lam(self, value):
if np.any(value == -self.mu):
raise InputParameterError(
"CYP projection is not defined for lambda = -mu")
@property
def inverse(self):
return Pix2Sky_CylindricalPerspective(self.mu, self.lam)
@classmethod
def evaluate(cls, phi, theta, mu, lam):
return _projections.cyps2x(phi, theta, mu, lam)
Sky2Pix_CYP = Sky2Pix_CylindricalPerspective
class Pix2Sky_CylindricalEqualArea(Pix2SkyProjection, Cylindrical):
r"""
Cylindrical equal area projection - pixel to sky.
Corresponds to the ``CEA`` projection in FITS WCS.
.. math::
\phi &= x \\
\theta &= \sin^{-1}\left(\frac{\pi}{180^{\circ}}\lambda y\right)
Parameters
----------
lam : float
Radius of the cylinder in spherical radii, λ. Default is 0.
"""
lam = Parameter(default=1)
@property
def inverse(self):
return Sky2Pix_CylindricalEqualArea(self.lam)
@classmethod
def evaluate(cls, x, y, lam):
return _projections.ceax2s(x, y, lam)
Pix2Sky_CEA = Pix2Sky_CylindricalEqualArea
class Sky2Pix_CylindricalEqualArea(Sky2PixProjection, Cylindrical):
r"""
Cylindrical equal area projection - sky to pixel.
Corresponds to the ``CEA`` projection in FITS WCS.
.. math::
x &= \phi \\
y &= \frac{180^{\circ}}{\pi}\frac{\sin \theta}{\lambda}
Parameters
----------
lam : float
Radius of the cylinder in spherical radii, λ. Default is 0.
"""
lam = Parameter(default=1)
@property
def inverse(self):
return Pix2Sky_CylindricalEqualArea(self.lam)
@classmethod
def evaluate(cls, phi, theta, lam):
return _projections.ceas2x(phi, theta, lam)
Sky2Pix_CEA = Sky2Pix_CylindricalEqualArea
class Pix2Sky_PlateCarree(Pix2SkyProjection, Cylindrical):
r"""
Plate carrée projection - pixel to sky.
Corresponds to the ``CAR`` projection in FITS WCS.
.. math::
\phi &= x \\
\theta &= y
"""
@property
def inverse(self):
return Sky2Pix_PlateCarree()
@staticmethod
def evaluate(x, y):
# The intermediate variables are only used here for clarity
phi = np.array(x, copy=True)
theta = np.array(y, copy=True)
return phi, theta
Pix2Sky_CAR = Pix2Sky_PlateCarree
class Sky2Pix_PlateCarree(Sky2PixProjection, Cylindrical):
r"""
Plate carrée projection - sky to pixel.
Corresponds to the ``CAR`` projection in FITS WCS.
.. math::
x &= \phi \\
y &= \theta
"""
@property
def inverse(self):
return Pix2Sky_PlateCarree()
@staticmethod
def evaluate(phi, theta):
# The intermediate variables are only used here for clarity
x = np.array(phi, copy=True)
y = np.array(theta, copy=True)
return x, y
Sky2Pix_CAR = Sky2Pix_PlateCarree
class Pix2Sky_Mercator(Pix2SkyProjection, Cylindrical):
r"""
Mercator - pixel to sky.
Corresponds to the ``MER`` projection in FITS WCS.
.. math::
\phi &= x \\
\theta &= 2 \tan^{-1}\left(e^{y \pi / 180^{\circ}}\right)-90^{\circ}
"""
@property
def inverse(self):
return Sky2Pix_Mercator()
@classmethod
def evaluate(cls, x, y):
return _projections.merx2s(x, y)
Pix2Sky_MER = Pix2Sky_Mercator
class Sky2Pix_Mercator(Sky2PixProjection, Cylindrical):
r"""
Mercator - sky to pixel.
Corresponds to the ``MER`` projection in FITS WCS.
.. math::
x &= \phi \\
y &= \frac{180^{\circ}}{\pi}\ln \tan \left(\frac{90^{\circ} + \theta}{2}\right)
"""
@property
def inverse(self):
return Pix2Sky_Mercator()
@classmethod
def evaluate(cls, phi, theta):
return _projections.mers2x(phi, theta)
Sky2Pix_MER = Sky2Pix_Mercator
class PseudoCylindrical(Projection):
r"""Base class for pseudocylindrical projections.
Pseudocylindrical projections are like cylindrical projections
except the parallels of latitude are projected at diminishing
lengths toward the polar regions in order to reduce lateral
distortion there. Consequently, the meridians are curved.
"""
_separable = True
class Pix2Sky_SansonFlamsteed(Pix2SkyProjection, PseudoCylindrical):
r"""
Sanson-Flamsteed projection - pixel to sky.
Corresponds to the ``SFL`` projection in FITS WCS.
.. math::
\phi &= \frac{x}{\cos y} \\
\theta &= y
"""
@property
def inverse(self):
return Sky2Pix_SansonFlamsteed()
@classmethod
def evaluate(cls, x, y):
return _projections.sflx2s(x, y)
Pix2Sky_SFL = Pix2Sky_SansonFlamsteed
class Sky2Pix_SansonFlamsteed(Sky2PixProjection, PseudoCylindrical):
r"""
Sanson-Flamsteed projection - sky to pixel.
Corresponds to the ``SFL`` projection in FITS WCS.
.. math::
x &= \phi \cos \theta \\
y &= \theta
"""
@property
def inverse(self):
return Pix2Sky_SansonFlamsteed()
@classmethod
def evaluate(cls, phi, theta):
return _projections.sfls2x(phi, theta)
Sky2Pix_SFL = Sky2Pix_SansonFlamsteed
class Pix2Sky_Parabolic(Pix2SkyProjection, PseudoCylindrical):
r"""
Parabolic projection - pixel to sky.
Corresponds to the ``PAR`` projection in FITS WCS.
.. math::
\phi &= \frac{180^\circ}{\pi} \frac{x}{1 - 4(y / 180^\circ)^2} \\
\theta &= 3 \sin^{-1}\left(\frac{y}{180^\circ}\right)
"""
_separable = False
@property
def inverse(self):
return Sky2Pix_Parabolic()
@classmethod
def evaluate(cls, x, y):
return _projections.parx2s(x, y)
Pix2Sky_PAR = Pix2Sky_Parabolic
class Sky2Pix_Parabolic(Sky2PixProjection, PseudoCylindrical):
r"""
Parabolic projection - sky to pixel.
Corresponds to the ``PAR`` projection in FITS WCS.
.. math::
x &= \phi \left(2\cos\frac{2\theta}{3} - 1\right) \\
y &= 180^\circ \sin \frac{\theta}{3}
"""
_separable = False
@property
def inverse(self):
return Pix2Sky_Parabolic()
@classmethod
def evaluate(cls, phi, theta):
return _projections.pars2x(phi, theta)
Sky2Pix_PAR = Sky2Pix_Parabolic
class Pix2Sky_Molleweide(Pix2SkyProjection, PseudoCylindrical):
r"""
Molleweide's projection - pixel to sky.
Corresponds to the ``MOL`` projection in FITS WCS.
.. math::
\phi &= \frac{\pi x}{2 \sqrt{2 - \left(\frac{\pi}{180^\circ}y\right)^2}} \\
\theta &= \sin^{-1}\left(\frac{1}{90^\circ}\sin^{-1}\left(\frac{\pi}{180^\circ}\frac{y}{\sqrt{2}}\right) + \frac{y}{180^\circ}\sqrt{2 - \left(\frac{\pi}{180^\circ}y\right)^2}\right)
"""
_separable = False
@property
def inverse(self):
return Sky2Pix_Molleweide()
@classmethod
def evaluate(cls, x, y):
return _projections.molx2s(x, y)
Pix2Sky_MOL = Pix2Sky_Molleweide
class Sky2Pix_Molleweide(Sky2PixProjection, PseudoCylindrical):
r"""
Molleweide's projection - sky to pixel.
Corresponds to the ``MOL`` projection in FITS WCS.
.. math::
x &= \frac{2 \sqrt{2}}{\pi} \phi \cos \gamma \\
y &= \sqrt{2} \frac{180^\circ}{\pi} \sin \gamma
where :math:`\gamma` is defined as the solution of the
transcendental equation:
.. math::
\sin \theta = \frac{\gamma}{90^\circ} + \frac{\sin 2 \gamma}{\pi}
"""
_separable = False
@property
def inverse(self):
return Pix2Sky_Molleweide()
@classmethod
def evaluate(cls, phi, theta):
return _projections.mols2x(phi, theta)
Sky2Pix_MOL = Sky2Pix_Molleweide
class Pix2Sky_HammerAitoff(Pix2SkyProjection, PseudoCylindrical):
r"""
Hammer-Aitoff projection - pixel to sky.
Corresponds to the ``AIT`` projection in FITS WCS.
.. math::
\phi &= 2 \arg \left(2Z^2 - 1, \frac{\pi}{180^\circ} \frac{Z}{2}x\right) \\
\theta &= \sin^{-1}\left(\frac{\pi}{180^\circ}yZ\right)
"""
_separable = False
@property
def inverse(self):
return Sky2Pix_HammerAitoff()
@classmethod
def evaluate(cls, x, y):
return _projections.aitx2s(x, y)
Pix2Sky_AIT = Pix2Sky_HammerAitoff
class Sky2Pix_HammerAitoff(Sky2PixProjection, PseudoCylindrical):
r"""
Hammer-Aitoff projection - sky to pixel.
Corresponds to the ``AIT`` projection in FITS WCS.
.. math::
x &= 2 \gamma \cos \theta \sin \frac{\phi}{2} \\
y &= \gamma \sin \theta
where:
.. math::
\gamma = \frac{180^\circ}{\pi} \sqrt{\frac{2}{1 + \cos \theta \cos(\phi / 2)}}
"""
_separable = False
@property
def inverse(self):
return Pix2Sky_HammerAitoff()
@classmethod
def evaluate(cls, phi, theta):
return _projections.aits2x(phi, theta)
Sky2Pix_AIT = Sky2Pix_HammerAitoff
class Conic(Projection):
r"""Base class for conic projections.
In conic projections, the sphere is thought to be projected onto
the surface of a cone which is then opened out.
In a general sense, the pixel-to-sky transformation is defined as:
.. math::
\phi &= \arg\left(\frac{Y_0 - y}{R_\theta}, \frac{x}{R_\theta}\right) / C \\
R_\theta &= \mathrm{sign} \theta_a \sqrt{x^2 + (Y_0 - y)^2}
and the inverse (sky-to-pixel) is defined as:
.. math::
x &= R_\theta \sin (C \phi) \\
y &= R_\theta \cos (C \phi) + Y_0
where :math:`C` is the "constant of the cone":
.. math::
C = \frac{180^\circ \cos \theta}{\pi R_\theta}
"""
sigma = Parameter(default=90.0, getter=_to_orig_unit, setter=_to_radian)
delta = Parameter(default=0.0, getter=_to_orig_unit, setter=_to_radian)
_separable = False
class Pix2Sky_ConicPerspective(Pix2SkyProjection, Conic):
r"""
Colles' conic perspective projection - pixel to sky.
Corresponds to the ``COP`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \sin \theta_a \\
R_\theta &= \frac{180^\circ}{\pi} \cos \eta [ \cot \theta_a - \tan(\theta - \theta_a)] \\
Y_0 &= \frac{180^\circ}{\pi} \cos \eta \cot \theta_a
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
@property
def inverse(self):
return Sky2Pix_ConicPerspective(self.sigma.value, self.delta.value)
@classmethod
def evaluate(cls, x, y, sigma, delta):
return _projections.copx2s(x, y, _to_orig_unit(sigma), _to_orig_unit(delta))
Pix2Sky_COP = Pix2Sky_ConicPerspective
class Sky2Pix_ConicPerspective(Sky2PixProjection, Conic):
r"""
Colles' conic perspective projection - sky to pixel.
Corresponds to the ``COP`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \sin \theta_a \\
R_\theta &= \frac{180^\circ}{\pi} \cos \eta [ \cot \theta_a - \tan(\theta - \theta_a)] \\
Y_0 &= \frac{180^\circ}{\pi} \cos \eta \cot \theta_a
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
@property
def inverse(self):
return Pix2Sky_ConicPerspective(self.sigma.value, self.delta.value)
@classmethod
def evaluate(cls, phi, theta, sigma, delta):
return _projections.cops2x(phi, theta,
_to_orig_unit(sigma), _to_orig_unit(delta))
Sky2Pix_COP = Sky2Pix_ConicPerspective
class Pix2Sky_ConicEqualArea(Pix2SkyProjection, Conic):
r"""
Alber's conic equal area projection - pixel to sky.
Corresponds to the ``COE`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \gamma / 2 \\
R_\theta &= \frac{180^\circ}{\pi} \frac{2}{\gamma} \sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin \theta} \\
Y_0 &= \frac{180^\circ}{\pi} \frac{2}{\gamma} \sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin((\theta_1 + \theta_2)/2)}
where:
.. math::
\gamma = \sin \theta_1 + \sin \theta_2
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
@property
def inverse(self):
return Sky2Pix_ConicEqualArea(self.sigma.value, self.delta.value)
@classmethod
def evaluate(cls, x, y, sigma, delta):
return _projections.coex2s(x, y, _to_orig_unit(sigma), _to_orig_unit(delta))
Pix2Sky_COE = Pix2Sky_ConicEqualArea
class Sky2Pix_ConicEqualArea(Sky2PixProjection, Conic):
r"""
Alber's conic equal area projection - sky to pixel.
Corresponds to the ``COE`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \gamma / 2 \\
R_\theta &= \frac{180^\circ}{\pi} \frac{2}{\gamma} \sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin \theta} \\
Y_0 &= \frac{180^\circ}{\pi} \frac{2}{\gamma} \sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin((\theta_1 + \theta_2)/2)}
where:
.. math::
\gamma = \sin \theta_1 + \sin \theta_2
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
@property
def inverse(self):
return Pix2Sky_ConicEqualArea(self.sigma.value, self.delta.value)
@classmethod
def evaluate(cls, phi, theta, sigma, delta):
return _projections.coes2x(phi, theta,
_to_orig_unit(sigma), _to_orig_unit(delta))
Sky2Pix_COE = Sky2Pix_ConicEqualArea
class Pix2Sky_ConicEquidistant(Pix2SkyProjection, Conic):
r"""
Conic equidistant projection - pixel to sky.
Corresponds to the ``COD`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \frac{180^\circ}{\pi} \frac{\sin\theta_a\sin\eta}{\eta} \\
R_\theta &= \theta_a - \theta + \eta\cot\eta\cot\theta_a \\
Y_0 = \eta\cot\eta\cot\theta_a
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
@property
def inverse(self):
return Sky2Pix_ConicEquidistant(self.sigma.value, self.delta.value)
@classmethod
def evaluate(cls, x, y, sigma, delta):
return _projections.codx2s(x, y, _to_orig_unit(sigma), _to_orig_unit(delta))
Pix2Sky_COD = Pix2Sky_ConicEquidistant
class Sky2Pix_ConicEquidistant(Sky2PixProjection, Conic):
r"""
Conic equidistant projection - sky to pixel.
Corresponds to the ``COD`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \frac{180^\circ}{\pi} \frac{\sin\theta_a\sin\eta}{\eta} \\
R_\theta &= \theta_a - \theta + \eta\cot\eta\cot\theta_a \\
Y_0 = \eta\cot\eta\cot\theta_a
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
@property
def inverse(self):
return Pix2Sky_ConicEquidistant(self.sigma.value, self.delta.value)
@classmethod
def evaluate(cls, phi, theta, sigma, delta):
return _projections.cods2x(phi, theta,
_to_orig_unit(sigma), _to_orig_unit(delta))
Sky2Pix_COD = Sky2Pix_ConicEquidistant
class Pix2Sky_ConicOrthomorphic(Pix2SkyProjection, Conic):
r"""
Conic orthomorphic projection - pixel to sky.
Corresponds to the ``COO`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \frac{\ln \left( \frac{\cos\theta_2}{\cos\theta_1} \right)}
{\ln \left[ \frac{\tan\left(\frac{90^\circ-\theta_2}{2}\right)}
{\tan\left(\frac{90^\circ-\theta_1}{2}\right)} \right] } \\
R_\theta &= \psi \left[ \tan \left( \frac{90^\circ - \theta}{2} \right) \right]^C \\
Y_0 &= \psi \left[ \tan \left( \frac{90^\circ - \theta_a}{2} \right) \right]^C
where:
.. math::
\psi = \frac{180^\circ}{\pi} \frac{\cos \theta}
{C\left[\tan\left(\frac{90^\circ-\theta}{2}\right)\right]^C}
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
@property
def inverse(self):
return Sky2Pix_ConicOrthomorphic(self.sigma.value, self.delta.value)
@classmethod
def evaluate(cls, x, y, sigma, delta):
return _projections.coox2s(x, y, _to_orig_unit(sigma), _to_orig_unit(delta))
Pix2Sky_COO = Pix2Sky_ConicOrthomorphic
class Sky2Pix_ConicOrthomorphic(Sky2PixProjection, Conic):
r"""
Conic orthomorphic projection - sky to pixel.
Corresponds to the ``COO`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \frac{\ln \left( \frac{\cos\theta_2}{\cos\theta_1} \right)}
{\ln \left[ \frac{\tan\left(\frac{90^\circ-\theta_2}{2}\right)}
{\tan\left(\frac{90^\circ-\theta_1}{2}\right)} \right] } \\
R_\theta &= \psi \left[ \tan \left( \frac{90^\circ - \theta}{2} \right) \right]^C \\
Y_0 &= \psi \left[ \tan \left( \frac{90^\circ - \theta_a}{2} \right) \right]^C
where:
.. math::
\psi = \frac{180^\circ}{\pi} \frac{\cos \theta}
{C\left[\tan\left(\frac{90^\circ-\theta}{2}\right)\right]^C}
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
@property
def inverse(self):
return Pix2Sky_ConicOrthomorphic(self.sigma.value, self.delta.value)
@classmethod
def evaluate(cls, phi, theta, sigma, delta):
return _projections.coos2x(phi, theta,
_to_orig_unit(sigma), _to_orig_unit(delta))
Sky2Pix_COO = Sky2Pix_ConicOrthomorphic
class PseudoConic(Projection):
r"""Base class for pseudoconic projections.
Pseudoconics are a subclass of conics with concentric parallels.
"""
class Pix2Sky_BonneEqualArea(Pix2SkyProjection, PseudoConic):
r"""
Bonne's equal area pseudoconic projection - pixel to sky.
Corresponds to the ``BON`` projection in FITS WCS.
.. math::
\phi &= \frac{\pi}{180^\circ} A_\phi R_\theta / \cos \theta \\
\theta &= Y_0 - R_\theta
where:
.. math::
R_\theta &= \mathrm{sign} \theta_1 \sqrt{x^2 + (Y_0 - y)^2} \\
A_\phi &= \arg\left(\frac{Y_0 - y}{R_\theta}, \frac{x}{R_\theta}\right)
Parameters
----------
theta1 : float
Bonne conformal latitude, in degrees.
"""
theta1 = Parameter(default=0.0, getter=_to_orig_unit, setter=_to_radian)
_separable = True
@property
def inverse(self):
return Sky2Pix_BonneEqualArea(self.theta1.value)
@classmethod
def evaluate(cls, x, y, theta1):
return _projections.bonx2s(x, y, _to_orig_unit(theta1))
Pix2Sky_BON = Pix2Sky_BonneEqualArea
class Sky2Pix_BonneEqualArea(Sky2PixProjection, PseudoConic):
r"""
Bonne's equal area pseudoconic projection - sky to pixel.
Corresponds to the ``BON`` projection in FITS WCS.
.. math::
x &= R_\theta \sin A_\phi \\
y &= -R_\theta \cos A_\phi + Y_0
where:
.. math::
A_\phi &= \frac{180^\circ}{\pi R_\theta} \phi \cos \theta \\
R_\theta &= Y_0 - \theta \\
Y_0 &= \frac{180^\circ}{\pi} \cot \theta_1 + \theta_1
Parameters
----------
theta1 : float
Bonne conformal latitude, in degrees.
"""
theta1 = Parameter(default=0.0, getter=_to_orig_unit, setter=_to_radian)
_separable = True
@property
def inverse(self):
return Pix2Sky_BonneEqualArea(self.theta1.value)
@classmethod
def evaluate(cls, phi, theta, theta1):
return _projections.bons2x(phi, theta,
_to_orig_unit(theta1))
Sky2Pix_BON = Sky2Pix_BonneEqualArea
class Pix2Sky_Polyconic(Pix2SkyProjection, PseudoConic):
r"""
Polyconic projection - pixel to sky.
Corresponds to the ``PCO`` projection in FITS WCS.
"""
_separable = False
@property
def inverse(self):
return Sky2Pix_Polyconic()
@classmethod
def evaluate(cls, x, y):
return _projections.pcox2s(x, y)
Pix2Sky_PCO = Pix2Sky_Polyconic
class Sky2Pix_Polyconic(Sky2PixProjection, PseudoConic):
r"""
Polyconic projection - sky to pixel.
Corresponds to the ``PCO`` projection in FITS WCS.
"""
_separable = False
@property
def inverse(self):
return Pix2Sky_Polyconic()
@classmethod
def evaluate(cls, phi, theta):
return _projections.pcos2x(phi, theta)
Sky2Pix_PCO = Sky2Pix_Polyconic
class QuadCube(Projection):
r"""Base class for quad cube projections.
Quadrilateralized spherical cube (quad-cube) projections belong to
the class of polyhedral projections in which the sphere is
projected onto the surface of an enclosing polyhedron.
The six faces of the quad-cube projections are numbered and laid
out as::
0
4 3 2 1 4 3 2
5
"""
class Pix2Sky_TangentialSphericalCube(Pix2SkyProjection, QuadCube):
r"""
Tangential spherical cube projection - pixel to sky.
Corresponds to the ``TSC`` projection in FITS WCS.
"""
_separable = False
@property
def inverse(self):
return Sky2Pix_TangentialSphericalCube()
@classmethod
def evaluate(cls, x, y):
return _projections.tscx2s(x, y)
Pix2Sky_TSC = Pix2Sky_TangentialSphericalCube
class Sky2Pix_TangentialSphericalCube(Sky2PixProjection, QuadCube):
r"""
Tangential spherical cube projection - sky to pixel.
Corresponds to the ``PCO`` projection in FITS WCS.
"""
_separable = False
@property
def inverse(self):
return Pix2Sky_TangentialSphericalCube()
@classmethod
def evaluate(cls, phi, theta):
return _projections.tscs2x(phi, theta)
Sky2Pix_TSC = Sky2Pix_TangentialSphericalCube
class Pix2Sky_COBEQuadSphericalCube(Pix2SkyProjection, QuadCube):
r"""
COBE quadrilateralized spherical cube projection - pixel to sky.
Corresponds to the ``CSC`` projection in FITS WCS.
"""
_separable = False
@property
def inverse(self):
return Sky2Pix_COBEQuadSphericalCube()
@classmethod
def evaluate(cls, x, y):
return _projections.cscx2s(x, y)
Pix2Sky_CSC = Pix2Sky_COBEQuadSphericalCube
class Sky2Pix_COBEQuadSphericalCube(Sky2PixProjection, QuadCube):
r"""
COBE quadrilateralized spherical cube projection - sky to pixel.
Corresponds to the ``CSC`` projection in FITS WCS.
"""
_separable = False
@property
def inverse(self):
return Pix2Sky_COBEQuadSphericalCube()
@classmethod
def evaluate(cls, phi, theta):
return _projections.cscs2x(phi, theta)
Sky2Pix_CSC = Sky2Pix_COBEQuadSphericalCube
class Pix2Sky_QuadSphericalCube(Pix2SkyProjection, QuadCube):
r"""
Quadrilateralized spherical cube projection - pixel to sky.
Corresponds to the ``QSC`` projection in FITS WCS.
"""
_separable = False
@property
def inverse(self):
return Sky2Pix_QuadSphericalCube()
@classmethod
def evaluate(cls, x, y):
return _projections.qscx2s(x, y)
Pix2Sky_QSC = Pix2Sky_QuadSphericalCube
class Sky2Pix_QuadSphericalCube(Sky2PixProjection, QuadCube):
r"""
Quadrilateralized spherical cube projection - sky to pixel.
Corresponds to the ``QSC`` projection in FITS WCS.
"""
_separable = False
@property
def inverse(self):
return Pix2Sky_QuadSphericalCube()
@classmethod
def evaluate(cls, phi, theta):
return _projections.qscs2x(phi, theta)
Sky2Pix_QSC = Sky2Pix_QuadSphericalCube
class HEALPix(Projection):
r"""Base class for HEALPix projections.
"""
class Pix2Sky_HEALPix(Pix2SkyProjection, HEALPix):
r"""
HEALPix - pixel to sky.
Corresponds to the ``HPX`` projection in FITS WCS.
Parameters
----------
H : float
The number of facets in longitude direction.
X : float
The number of facets in latitude direction.
"""
_separable = True
H = Parameter(default=4.0)
X = Parameter(default=3.0)
@property
def inverse(self):
return Sky2Pix_HEALPix(self.H.value, self.X.value)
@classmethod
def evaluate(cls, x, y, H, X):
return _projections.hpxx2s(x, y, H, X)
Pix2Sky_HPX = Pix2Sky_HEALPix
class Sky2Pix_HEALPix(Sky2PixProjection, HEALPix):
r"""
HEALPix projection - sky to pixel.
Corresponds to the ``HPX`` projection in FITS WCS.
Parameters
----------
H : float
The number of facets in longitude direction.
X : float
The number of facets in latitude direction.
"""
_separable = True
H = Parameter(default=4.0)
X = Parameter(default=3.0)
@property
def inverse(self):
return Pix2Sky_HEALPix(self.H.value, self.X.value)
@classmethod
def evaluate(cls, phi, theta, H, X):
return _projections.hpxs2x(phi, theta, H, X)
Sky2Pix_HPX = Sky2Pix_HEALPix
class Pix2Sky_HEALPixPolar(Pix2SkyProjection, HEALPix):
r"""
HEALPix polar, aka "butterfly" projection - pixel to sky.
Corresponds to the ``XPH`` projection in FITS WCS.
"""
_separable = False
@property
def inverse(self):
return Sky2Pix_HEALPix()
@classmethod
def evaluate(cls, x, y):
return _projections.xphx2s(x, y)
Pix2Sky_XPH = Pix2Sky_HEALPixPolar
class Sky2Pix_HEALPixPolar(Sky2PixProjection, HEALPix):
r"""
HEALPix polar, aka "butterfly" projection - pixel to sky.
Corresponds to the ``XPH`` projection in FITS WCS.
"""
_separable = False
@property
def inverse(self):
return Pix2Sky_HEALPix()
@classmethod
def evaluate(cls, phi, theta):
return _projections.hpxs2x(phi, theta)
Sky2Pix_XPH = Sky2Pix_HEALPixPolar
class AffineTransformation2D(Model):
"""
Perform an affine transformation in 2 dimensions.
Parameters
----------
matrix : array
A 2x2 matrix specifying the linear transformation to apply to the
inputs
translation : array
A 2D vector (given as either a 2x1 or 1x2 array) specifying a
translation to apply to the inputs
"""
inputs = ('x', 'y')
outputs = ('x', 'y')
standard_broadcasting = False
_separable = False
matrix = Parameter(default=[[1.0, 0.0], [0.0, 1.0]])
translation = Parameter(default=[0.0, 0.0])
@matrix.validator
def matrix(self, value):
"""Validates that the input matrix is a 2x2 2D array."""
if np.shape(value) != (2, 2):
raise InputParameterError(
"Expected transformation matrix to be a 2x2 array")
@translation.validator
def translation(self, value):
"""
Validates that the translation vector is a 2D vector. This allows
either a "row" vector or a "column" vector where in the latter case the
resultant Numpy array has ``ndim=2`` but the shape is ``(1, 2)``.
"""
if not ((np.ndim(value) == 1 and np.shape(value) == (2,)) or
(np.ndim(value) == 2 and np.shape(value) == (1, 2))):
raise InputParameterError(
"Expected translation vector to be a 2 element row or column "
"vector array")
@property
def inverse(self):
"""
Inverse transformation.
Raises `~astropy.modeling.InputParameterError` if the transformation cannot be inverted.
"""
det = np.linalg.det(self.matrix.value)
if det == 0:
raise InputParameterError(
"Transformation matrix is singular; {0} model does not "
"have an inverse".format(self.__class__.__name__))
matrix = np.linalg.inv(self.matrix.value)
if self.matrix.unit is not None:
matrix = matrix * self.matrix.unit
# If matrix has unit then translation has unit, so no need to assign it.
translation = -np.dot(matrix, self.translation.value)
return self.__class__(matrix=matrix, translation=translation)
@classmethod
def evaluate(cls, x, y, matrix, translation):
"""
Apply the transformation to a set of 2D Cartesian coordinates given as
two lists--one for the x coordinates and one for a y coordinates--or a
single coordinate pair.
Parameters
----------
x, y : array, float
x and y coordinates
"""
if x.shape != y.shape:
raise ValueError("Expected input arrays to have the same shape")
shape = x.shape or (1,)
inarr = np.vstack([x.flatten(), y.flatten(), np.ones(x.size)])
if inarr.shape[0] != 3 or inarr.ndim != 2:
raise ValueError("Incompatible input shapes")
augmented_matrix = cls._create_augmented_matrix(matrix, translation)
result = np.dot(augmented_matrix, inarr)
x, y = result[0], result[1]
x.shape = y.shape = shape
return x, y
@staticmethod
def _create_augmented_matrix(matrix, translation):
unit = None
if any([hasattr(translation, 'unit'), hasattr(matrix, 'unit')]):
if not all([hasattr(translation, 'unit'), hasattr(matrix, 'unit')]):
raise ValueError("To use AffineTransformation with quantities, "
"both matrix and unit need to be quantities.")
unit = translation.unit
# matrix should have the same units as translation
if not (matrix.unit / translation.unit) == u.dimensionless_unscaled:
raise ValueError("matrix and translation must have the same units.")
augmented_matrix = np.empty((3, 3), dtype=float)
augmented_matrix[0:2, 0:2] = matrix
augmented_matrix[0:2, 2:].flat = translation
augmented_matrix[2] = [0, 0, 1]
if unit is not None:
return augmented_matrix * unit
else:
return augmented_matrix
@property
def input_units(self):
if self.translation.unit is None and self.matrix.unit is None:
return None
elif self.translation.unit is not None:
return {'x': self.translation.unit,
'y': self.translation.unit
}
else:
return {'x': self.matrix.unit,
'y': self.matrix.unit
}
|
00be02b0f8c4d8f5be621c6cd8d3c5eed2d190d40542a588035b9bd093156ad1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains models representing polynomials and polynomial series.
"""
from collections import OrderedDict
import numpy as np
from .core import FittableModel, Model
from .functional_models import Shift
from .parameters import Parameter
from .utils import poly_map_domain, comb
from astropy.utils import indent, check_broadcast
from astropy.units import Quantity
__all__ = [
'Chebyshev1D', 'Chebyshev2D', 'Hermite1D', 'Hermite2D',
'InverseSIP', 'Legendre1D', 'Legendre2D', 'Polynomial1D',
'Polynomial2D', 'SIP', 'OrthoPolynomialBase',
'PolynomialModel'
]
class PolynomialBase(FittableModel):
"""
Base class for all polynomial-like models with an arbitrary number of
parameters in the form of coefficients.
In this case Parameter instances are returned through the class's
``__getattr__`` rather than through class descriptors.
"""
# Default _param_names list; this will be filled in by the implementation's
# __init__
_param_names = ()
linear = True
col_fit_deriv = False
@property
def param_names(self):
"""Coefficient names generated based on the model's polynomial degree
and number of dimensions.
Subclasses should implement this to return parameter names in the
desired format.
On most `Model` classes this is a class attribute, but for polynomial
models it is an instance attribute since each polynomial model instance
can have different parameters depending on the degree of the polynomial
and the number of dimensions, for example.
"""
return self._param_names
def __getattr__(self, attr):
if self._param_names and attr in self._param_names:
return Parameter(attr, default=0.0, model=self)
raise AttributeError(attr)
def __setattr__(self, attr, value):
# TODO: Support a means of specifying default values for coefficients
# Check for self._ndim first--if it hasn't been defined then the
# instance hasn't been initialized yet and self.param_names probably
# won't work.
# This has to vaguely duplicate the functionality of
# Parameter.__set__.
# TODO: I wonder if there might be a way around that though...
if attr[0] != '_' and self._param_names and attr in self._param_names:
param = Parameter(attr, default=0.0, model=self)
# This is a little hackish, but we can actually reuse the
# Parameter.__set__ method here
param.__set__(self, value)
else:
super().__setattr__(attr, value)
class PolynomialModel(PolynomialBase):
"""
Base class for polynomial models.
Its main purpose is to determine how many coefficients are needed
based on the polynomial order and dimension and to provide their
default values, names and ordering.
"""
def __init__(self, degree, n_models=None, model_set_axis=None,
name=None, meta=None, **params):
self._degree = degree
self._order = self.get_num_coeff(self.n_inputs)
self._param_names = self._generate_coeff_names(self.n_inputs)
super().__init__(
n_models=n_models, model_set_axis=model_set_axis, name=name,
meta=meta, **params)
def __repr__(self):
return self._format_repr([self.degree])
def __str__(self):
return self._format_str([('Degree', self.degree)])
@property
def degree(self):
"""Degree of polynomial."""
return self._degree
def get_num_coeff(self, ndim):
"""
Return the number of coefficients in one parameter set
"""
if self.degree < 0:
raise ValueError("Degree of polynomial must be positive or null")
# deg+1 is used to account for the difference between iraf using
# degree and numpy using exact degree
if ndim != 1:
nmixed = comb(self.degree, ndim)
else:
nmixed = 0
numc = self.degree * ndim + nmixed + 1
return numc
def _invlex(self):
c = []
lencoeff = self.degree + 1
for i in range(lencoeff):
for j in range(lencoeff):
if i + j <= self.degree:
c.append((j, i))
return c[::-1]
def _generate_coeff_names(self, ndim):
names = []
if ndim == 1:
for n in range(self._order):
names.append('c{0}'.format(n))
else:
for i in range(self.degree + 1):
names.append('c{0}_{1}'.format(i, 0))
for i in range(1, self.degree + 1):
names.append('c{0}_{1}'.format(0, i))
for i in range(1, self.degree):
for j in range(1, self.degree):
if i + j < self.degree + 1:
names.append('c{0}_{1}'.format(i, j))
return tuple(names)
class OrthoPolynomialBase(PolynomialBase):
"""
This is a base class for the 2D Chebyshev and Legendre models.
The polynomials implemented here require a maximum degree in x and y.
Parameters
----------
x_degree : int
degree in x
y_degree : int
degree in y
x_domain : list or None, optional
domain of the x independent variable
x_window : list or None, optional
range of the x independent variable
y_domain : list or None, optional
domain of the y independent variable
y_window : list or None, optional
range of the y independent variable
**params : dict
{keyword: value} pairs, representing {parameter_name: value}
"""
inputs = ('x', 'y')
outputs = ('z',)
def __init__(self, x_degree, y_degree, x_domain=None, x_window=None,
y_domain=None, y_window=None, n_models=None,
model_set_axis=None, name=None, meta=None, **params):
# TODO: Perhaps some of these other parameters should be properties?
# TODO: An awful lot of the functionality in this method is still
# shared by PolynomialModel; perhaps some of it can be generalized in
# PolynomialBase
self.x_degree = x_degree
self.y_degree = y_degree
self._order = self.get_num_coeff()
self.x_domain = x_domain
self.y_domain = y_domain
self.x_window = x_window
self.y_window = y_window
self._param_names = self._generate_coeff_names()
super().__init__(
n_models=n_models, model_set_axis=model_set_axis,
name=name, meta=meta, **params)
def __repr__(self):
return self._format_repr([self.x_degree, self.y_degree])
def __str__(self):
return self._format_str(
[('X-Degree', self.x_degree),
('Y-Degree', self.y_degree)])
def get_num_coeff(self):
"""
Determine how many coefficients are needed
Returns
-------
numc : int
number of coefficients
"""
return (self.x_degree + 1) * (self.y_degree + 1)
def _invlex(self):
# TODO: This is a very slow way to do this; fix it and related methods
# like _alpha
c = []
xvar = np.arange(self.x_degree + 1)
yvar = np.arange(self.y_degree + 1)
for j in yvar:
for i in xvar:
c.append((i, j))
return np.array(c[::-1])
def invlex_coeff(self, coeffs):
invlex_coeffs = []
xvar = np.arange(self.x_degree + 1)
yvar = np.arange(self.y_degree + 1)
for j in yvar:
for i in xvar:
name = 'c{0}_{1}'.format(i, j)
coeff = coeffs[self.param_names.index(name)]
invlex_coeffs.append(coeff)
return np.array(invlex_coeffs[::-1])
def _alpha(self):
invlexdeg = self._invlex()
invlexdeg[:, 1] = invlexdeg[:, 1] + self.x_degree + 1
nx = self.x_degree + 1
ny = self.y_degree + 1
alpha = np.zeros((ny * nx + 3, ny + nx))
for n in range(len(invlexdeg)):
alpha[n][invlexdeg[n]] = [1, 1]
alpha[-2, 0] = 1
alpha[-3, nx] = 1
return alpha
def imhorner(self, x, y, coeff):
_coeff = list(coeff)
_coeff.extend([0, 0, 0])
alpha = self._alpha()
r0 = _coeff[0]
nalpha = len(alpha)
karr = np.diff(alpha, axis=0)
kfunc = self._fcache(x, y)
x_terms = self.x_degree + 1
y_terms = self.y_degree + 1
nterms = x_terms + y_terms
for n in range(1, nterms + 1 + 3):
setattr(self, 'r' + str(n), 0.)
for n in range(1, nalpha):
k = karr[n - 1].nonzero()[0].max() + 1
rsum = 0
for i in range(1, k + 1):
rsum = rsum + getattr(self, 'r' + str(i))
val = kfunc[k - 1] * (r0 + rsum)
setattr(self, 'r' + str(k), val)
r0 = _coeff[n]
for i in range(1, k):
setattr(self, 'r' + str(i), 0.)
result = r0
for i in range(1, nterms + 1 + 3):
result = result + getattr(self, 'r' + str(i))
return result
def _generate_coeff_names(self):
names = []
for j in range(self.y_degree + 1):
for i in range(self.x_degree + 1):
names.append('c{0}_{1}'.format(i, j))
return tuple(names)
def _fcache(self, x, y):
# TODO: Write a docstring explaining the actual purpose of this method
"""To be implemented by subclasses"""
raise NotImplementedError("Subclasses should implement this")
def evaluate(self, x, y, *coeffs):
if self.x_domain is not None:
x = poly_map_domain(x, self.x_domain, self.x_window)
if self.y_domain is not None:
y = poly_map_domain(y, self.y_domain, self.y_window)
invcoeff = self.invlex_coeff(coeffs)
return self.imhorner(x, y, invcoeff)
def prepare_inputs(self, x, y, **kwargs):
inputs, format_info = super().prepare_inputs(x, y, **kwargs)
x, y = inputs
if x.shape != y.shape:
raise ValueError("Expected input arrays to have the same shape")
return (x, y), format_info
class Chebyshev1D(PolynomialModel):
r"""
Univariate Chebyshev series.
It is defined as:
.. math::
P(x) = \sum_{i=0}^{i=n}C_{i} * T_{i}(x)
where ``T_i(x)`` is the corresponding Chebyshev polynomial of the 1st kind.
Parameters
----------
degree : int
degree of the series
domain : list or None, optional
window : list or None, optional
If None, it is set to [-1,1]
Fitters will remap the domain to this window
**params : dict
keyword : value pairs, representing parameter_name: value
Notes
-----
This model does not support the use of units/quantities, because each term
in the sum of Chebyshev polynomials is a polynomial in x - since the
coefficients within each Chebyshev polynomial are fixed, we can't use
quantities for x since the units would not be compatible. For example, the
third Chebyshev polynomial (T2) is 2x^2-1, but if x was specified with
units, 2x^2 and -1 would have incompatible units.
"""
inputs = ('x',)
outputs = ('y',)
_separable = True
def __init__(self, degree, domain=None, window=[-1, 1], n_models=None,
model_set_axis=None, name=None, meta=None, **params):
self.domain = domain
self.window = window
super().__init__(
degree, n_models=n_models, model_set_axis=model_set_axis,
name=name, meta=meta, **params)
def fit_deriv(self, x, *params):
"""
Computes the Vandermonde matrix.
Parameters
----------
x : ndarray
input
params : throw away parameter
parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
x = np.array(x, dtype=float, copy=False, ndmin=1)
v = np.empty((self.degree + 1,) + x.shape, dtype=x.dtype)
v[0] = 1
if self.degree > 0:
x2 = 2 * x
v[1] = x
for i in range(2, self.degree + 1):
v[i] = v[i - 1] * x2 - v[i - 2]
return np.rollaxis(v, 0, v.ndim)
def prepare_inputs(self, x, **kwargs):
inputs, format_info = super().prepare_inputs(x, **kwargs)
x = inputs[0]
return (x,), format_info
def evaluate(self, x, *coeffs):
if self.domain is not None:
x = poly_map_domain(x, self.domain, self.window)
return self.clenshaw(x, coeffs)
@staticmethod
def clenshaw(x, coeffs):
"""Evaluates the polynomial using Clenshaw's algorithm."""
if len(coeffs) == 1:
c0 = coeffs[0]
c1 = 0
elif len(coeffs) == 2:
c0 = coeffs[0]
c1 = coeffs[1]
else:
x2 = 2 * x
c0 = coeffs[-2]
c1 = coeffs[-1]
for i in range(3, len(coeffs) + 1):
tmp = c0
c0 = coeffs[-i] - c1
c1 = tmp + c1 * x2
return c0 + c1 * x
class Hermite1D(PolynomialModel):
r"""
Univariate Hermite series.
It is defined as:
.. math::
P(x) = \sum_{i=0}^{i=n}C_{i} * H_{i}(x)
where ``H_i(x)`` is the corresponding Hermite polynomial ("Physicist's kind").
Parameters
----------
degree : int
degree of the series
domain : list or None, optional
window : list or None, optional
If None, it is set to [-1,1]
Fitters will remap the domain to this window
**params : dict
keyword : value pairs, representing parameter_name: value
Notes
-----
This model does not support the use of units/quantities, because each term
in the sum of Hermite polynomials is a polynomial in x - since the
coefficients within each Hermite polynomial are fixed, we can't use
quantities for x since the units would not be compatible. For example, the
third Hermite polynomial (H2) is 4x^2-2, but if x was specified with units,
4x^2 and -2 would have incompatible units.
"""
inputs = ('x')
outputs = ('y')
_separable = True
def __init__(self, degree, domain=None, window=[-1, 1], n_models=None,
model_set_axis=None, name=None, meta=None, **params):
self.domain = domain
self.window = window
super().__init__(
degree, n_models=n_models, model_set_axis=model_set_axis,
name=name, meta=meta, **params)
def fit_deriv(self, x, *params):
"""
Computes the Vandermonde matrix.
Parameters
----------
x : ndarray
input
params : throw away parameter
parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
x = np.array(x, dtype=float, copy=False, ndmin=1)
v = np.empty((self.degree + 1,) + x.shape, dtype=x.dtype)
v[0] = 1
if self.degree > 0:
x2 = 2 * x
v[1] = 2 * x
for i in range(2, self.degree + 1):
v[i] = x2 * v[i - 1] - 2 * (i - 1) * v[i - 2]
return np.rollaxis(v, 0, v.ndim)
def prepare_inputs(self, x, **kwargs):
inputs, format_info = super().prepare_inputs(x, **kwargs)
x = inputs[0]
return (x,), format_info
def evaluate(self, x, *coeffs):
if self.domain is not None:
x = poly_map_domain(x, self.domain, self.window)
return self.clenshaw(x, coeffs)
@staticmethod
def clenshaw(x, coeffs):
x2 = x * 2
if len(coeffs) == 1:
c0 = coeffs[0]
c1 = 0
elif len(coeffs) == 2:
c0 = coeffs[0]
c1 = coeffs[1]
else:
nd = len(coeffs)
c0 = coeffs[-2]
c1 = coeffs[-1]
for i in range(3, len(coeffs) + 1):
temp = c0
nd = nd - 1
c0 = coeffs[-i] - c1 * (2 * (nd - 1))
c1 = temp + c1 * x2
return c0 + c1 * x2
class Hermite2D(OrthoPolynomialBase):
r"""
Bivariate Hermite series.
It is defined as
.. math:: P_{nm}(x,y) = \sum_{n,m=0}^{n=d,m=d}C_{nm} H_n(x) H_m(y)
where ``H_n(x)`` and ``H_m(y)`` are Hermite polynomials.
Parameters
----------
x_degree : int
degree in x
y_degree : int
degree in y
x_domain : list or None, optional
domain of the x independent variable
y_domain : list or None, optional
domain of the y independent variable
x_window : list or None, optional
range of the x independent variable
y_window : list or None, optional
range of the y independent variable
**params : dict
keyword: value pairs, representing parameter_name: value
Notes
-----
This model does not support the use of units/quantities, because each term
in the sum of Hermite polynomials is a polynomial in x and/or y - since the
coefficients within each Hermite polynomial are fixed, we can't use
quantities for x and/or y since the units would not be compatible. For
example, the third Hermite polynomial (H2) is 4x^2-2, but if x was
specified with units, 4x^2 and -2 would have incompatible units.
"""
_separable = False
def __init__(self, x_degree, y_degree, x_domain=None, x_window=[-1, 1],
y_domain=None, y_window=[-1, 1], n_models=None,
model_set_axis=None, name=None, meta=None, **params):
super().__init__(
x_degree, y_degree, x_domain=x_domain, y_domain=y_domain,
x_window=x_window, y_window=y_window, n_models=n_models,
model_set_axis=model_set_axis, name=name, meta=meta, **params)
def _fcache(self, x, y):
"""
Calculate the individual Hermite functions once and store them in a
dictionary to be reused.
"""
x_terms = self.x_degree + 1
y_terms = self.y_degree + 1
kfunc = {}
kfunc[0] = np.ones(x.shape)
kfunc[1] = 2 * x.copy()
kfunc[x_terms] = np.ones(y.shape)
kfunc[x_terms + 1] = 2 * y.copy()
for n in range(2, x_terms):
kfunc[n] = 2 * x * kfunc[n - 1] - 2 * (n - 1) * kfunc[n - 2]
for n in range(x_terms + 2, x_terms + y_terms):
kfunc[n] = 2 * y * kfunc[n - 1] - 2 * (n - 1) * kfunc[n - 2]
return kfunc
def fit_deriv(self, x, y, *params):
"""
Derivatives with respect to the coefficients.
This is an array with Hermite polynomials:
.. math::
H_{x_0}H_{y_0}, H_{x_1}H_{y_0}...H_{x_n}H_{y_0}...H_{x_n}H_{y_m}
Parameters
----------
x : ndarray
input
y : ndarray
input
params : throw away parameter
parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
if x.shape != y.shape:
raise ValueError("x and y must have the same shape")
x = x.flatten()
y = y.flatten()
x_deriv = self._hermderiv1d(x, self.x_degree + 1).T
y_deriv = self._hermderiv1d(y, self.y_degree + 1).T
ij = []
for i in range(self.y_degree + 1):
for j in range(self.x_degree + 1):
ij.append(x_deriv[j] * y_deriv[i])
v = np.array(ij)
return v.T
def _hermderiv1d(self, x, deg):
"""
Derivative of 1D Hermite series
"""
x = np.array(x, dtype=float, copy=False, ndmin=1)
d = np.empty((deg + 1, len(x)), dtype=x.dtype)
d[0] = x * 0 + 1
if deg > 0:
x2 = 2 * x
d[1] = x2
for i in range(2, deg + 1):
d[i] = x2 * d[i - 1] - 2 * (i - 1) * d[i - 2]
return np.rollaxis(d, 0, d.ndim)
class Legendre1D(PolynomialModel):
r"""
Univariate Legendre series.
It is defined as:
.. math::
P(x) = \sum_{i=0}^{i=n}C_{i} * L_{i}(x)
where ``L_i(x)`` is the corresponding Legendre polynomial.
Parameters
----------
degree : int
degree of the series
domain : list or None, optional
window : list or None, optional
If None, it is set to [-1,1]
Fitters will remap the domain to this window
**params : dict
keyword: value pairs, representing parameter_name: value
Notes
-----
This model does not support the use of units/quantities, because each term
in the sum of Legendre polynomials is a polynomial in x - since the
coefficients within each Legendre polynomial are fixed, we can't use
quantities for x since the units would not be compatible. For example, the
third Legendre polynomial (P2) is 1.5x^2-0.5, but if x was specified with
units, 1.5x^2 and -0.5 would have incompatible units.
"""
inputs = ('x',)
outputs = ('y',)
_separable = False
def __init__(self, degree, domain=None, window=[-1, 1], n_models=None,
model_set_axis=None, name=None, meta=None, **params):
self.domain = domain
self.window = window
super().__init__(
degree, n_models=n_models, model_set_axis=model_set_axis,
name=name, meta=meta, **params)
def prepare_inputs(self, x, **kwargs):
inputs, format_info = super().prepare_inputs(x, **kwargs)
x = inputs[0]
return (x,), format_info
def evaluate(self, x, *coeffs):
if self.domain is not None:
x = poly_map_domain(x, self.domain, self.window)
return self.clenshaw(x, coeffs)
def fit_deriv(self, x, *params):
"""
Computes the Vandermonde matrix.
Parameters
----------
x : ndarray
input
params : throw away parameter
parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
x = np.array(x, dtype=float, copy=False, ndmin=1)
v = np.empty((self.degree + 1,) + x.shape, dtype=x.dtype)
v[0] = 1
if self.degree > 0:
v[1] = x
for i in range(2, self.degree + 1):
v[i] = (v[i - 1] * x * (2 * i - 1) - v[i - 2] * (i - 1)) / i
return np.rollaxis(v, 0, v.ndim)
@staticmethod
def clenshaw(x, coeffs):
if len(coeffs) == 1:
c0 = coeffs[0]
c1 = 0
elif len(coeffs) == 2:
c0 = coeffs[0]
c1 = coeffs[1]
else:
nd = len(coeffs)
c0 = coeffs[-2]
c1 = coeffs[-1]
for i in range(3, len(coeffs) + 1):
tmp = c0
nd = nd - 1
c0 = coeffs[-i] - (c1 * (nd - 1)) / nd
c1 = tmp + (c1 * x * (2 * nd - 1)) / nd
return c0 + c1 * x
class Polynomial1D(PolynomialModel):
r"""
1D Polynomial model.
It is defined as:
.. math::
P = \sum_{i=0}^{i=n}C_{i} * x^{i}
Parameters
----------
degree : int
degree of the series
domain : list or None, optional
window : list or None, optional
If None, it is set to [-1,1]
Fitters will remap the domain to this window
**params : dict
keyword: value pairs, representing parameter_name: value
"""
inputs = ('x',)
outputs = ('y',)
_separable = True
def __init__(self, degree, domain=[-1, 1], window=[-1, 1], n_models=None,
model_set_axis=None, name=None, meta=None, **params):
self.domain = domain
self.window = window
super().__init__(
degree, n_models=n_models, model_set_axis=model_set_axis,
name=name, meta=meta, **params)
def prepare_inputs(self, x, **kwargs):
inputs, format_info = super().prepare_inputs(x, **kwargs)
x = inputs[0]
return (x,), format_info
def evaluate(self, x, *coeffs):
if self.domain is not None:
x = poly_map_domain(x, self.domain, self.window)
return self.horner(x, coeffs)
def fit_deriv(self, x, *params):
"""
Computes the Vandermonde matrix.
Parameters
----------
x : ndarray
input
params : throw away parameter
parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
v = np.empty((self.degree + 1,) + x.shape, dtype=float)
v[0] = 1
if self.degree > 0:
v[1] = x
for i in range(2, self.degree + 1):
v[i] = v[i - 1] * x
return np.rollaxis(v, 0, v.ndim)
@staticmethod
def horner(x, coeffs):
if len(coeffs) == 1:
c0 = coeffs[-1] * np.ones_like(x, subok=False)
else:
c0 = coeffs[-1]
for i in range(2, len(coeffs) + 1):
c0 = coeffs[-i] + c0 * x
return c0
@property
def input_units(self):
if self.degree == 0 or self.c1.unit is None:
return None
else:
return {'x': self.c0.unit / self.c1.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
mapping = []
for i in range(self.degree + 1):
par = getattr(self, 'c{0}'.format(i))
mapping.append((par.name, outputs_unit['y'] / inputs_unit['x'] ** i))
return OrderedDict(mapping)
class Polynomial2D(PolynomialModel):
"""
2D Polynomial model.
Represents a general polynomial of degree n:
.. math::
P(x,y) = c_{00} + c_{10}x + ...+ c_{n0}x^n + c_{01}y + ...+ c_{0n}y^n
+ c_{11}xy + c_{12}xy^2 + ... + c_{1(n-1)}xy^{n-1}+ ... + c_{(n-1)1}x^{n-1}y
Parameters
----------
degree : int
highest power of the polynomial,
the number of terms is degree+1
x_domain : list or None, optional
domain of the x independent variable
y_domain : list or None, optional
domain of the y independent variable
x_window : list or None, optional
range of the x independent variable
y_window : list or None, optional
range of the y independent variable
**params : dict
keyword: value pairs, representing parameter_name: value
"""
inputs = ('x', 'y')
outputs = ('z',)
_separable = False
def __init__(self, degree, x_domain=[-1, 1], y_domain=[-1, 1],
x_window=[-1, 1], y_window=[-1, 1], n_models=None,
model_set_axis=None, name=None, meta=None, **params):
super().__init__(
degree, n_models=n_models, model_set_axis=model_set_axis,
name=name, meta=meta, **params)
self.x_domain = x_domain
self.y_domain = y_domain
self.x_window = x_window
self.y_window = y_window
def prepare_inputs(self, x, y, **kwargs):
inputs, format_info = super().prepare_inputs(x, y, **kwargs)
x, y = inputs
if x.shape != y.shape:
raise ValueError("Expected input arrays to have the same shape")
return (x, y), format_info
def evaluate(self, x, y, *coeffs):
if self.x_domain is not None:
x = poly_map_domain(x, self.x_domain, self.x_window)
if self.y_domain is not None:
y = poly_map_domain(y, self.y_domain, self.y_window)
invcoeff = self.invlex_coeff(coeffs)
result = self.multivariate_horner(x, y, invcoeff)
# Special case for degree==0 to ensure that the shape of the output is
# still as expected by the broadcasting rules, even though the x and y
# inputs are not used in the evaluation
if self.degree == 0:
output_shape = check_broadcast(np.shape(coeffs[0]), x.shape)
if output_shape:
new_result = np.empty(output_shape)
new_result[:] = result
result = new_result
return result
def fit_deriv(self, x, y, *params):
"""
Computes the Vandermonde matrix.
Parameters
----------
x : ndarray
input
y : ndarray
input
params : throw away parameter
parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
if x.ndim == 2:
x = x.flatten()
if y.ndim == 2:
y = y.flatten()
if x.size != y.size:
raise ValueError('Expected x and y to be of equal size')
designx = x[:, None] ** np.arange(self.degree + 1)
designy = y[:, None] ** np.arange(1, self.degree + 1)
designmixed = []
for i in range(1, self.degree):
for j in range(1, self.degree):
if i + j <= self.degree:
designmixed.append((x ** i) * (y ** j))
designmixed = np.array(designmixed).T
if designmixed.any():
v = np.hstack([designx, designy, designmixed])
else:
v = np.hstack([designx, designy])
return v
def invlex_coeff(self, coeffs):
invlex_coeffs = []
lencoeff = range(self.degree + 1)
for i in lencoeff:
for j in lencoeff:
if i + j <= self.degree:
name = 'c{0}_{1}'.format(j, i)
coeff = coeffs[self.param_names.index(name)]
invlex_coeffs.append(coeff)
return invlex_coeffs[::-1]
def multivariate_horner(self, x, y, coeffs):
"""
Multivariate Horner's scheme
Parameters
----------
x, y : array
coeffs : array of coefficients in inverse lexical order
"""
alpha = self._invlex()
r0 = coeffs[0]
r1 = r0 * 0.0
r2 = r0 * 0.0
karr = np.diff(alpha, axis=0)
for n in range(len(karr)):
if karr[n, 1] != 0:
r2 = y * (r0 + r1 + r2)
r1 = np.zeros_like(coeffs[0], subok=False)
else:
r1 = x * (r0 + r1)
r0 = coeffs[n + 1]
return r0 + r1 + r2
@property
def input_units(self):
if self.degree == 0 or (self.c1_0.unit is None and self.c0_1.unit is None):
return None
else:
return {'x': self.c0_0.unit / self.c1_0.unit,
'y': self.c0_0.unit / self.c0_1.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
mapping = []
for i in range(self.degree + 1):
for j in range(self.degree + 1):
if i + j > 2:
continue
par = getattr(self, 'c{0}_{1}'.format(i, j))
mapping.append((par.name, outputs_unit['z'] / inputs_unit['x'] ** i / inputs_unit['y'] ** j))
return OrderedDict(mapping)
class Chebyshev2D(OrthoPolynomialBase):
r"""
Bivariate Chebyshev series..
It is defined as
.. math:: P_{nm}(x,y) = \sum_{n,m=0}^{n=d,m=d}C_{nm} T_n(x ) T_m(y)
where ``T_n(x)`` and ``T_m(y)`` are Chebyshev polynomials of the first kind.
Parameters
----------
x_degree : int
degree in x
y_degree : int
degree in y
x_domain : list or None, optional
domain of the x independent variable
y_domain : list or None, optional
domain of the y independent variable
x_window : list or None, optional
range of the x independent variable
y_window : list or None, optional
range of the y independent variable
**params : dict
keyword: value pairs, representing parameter_name: value
Notes
-----
This model does not support the use of units/quantities, because each term
in the sum of Chebyshev polynomials is a polynomial in x and/or y - since
the coefficients within each Chebyshev polynomial are fixed, we can't use
quantities for x and/or y since the units would not be compatible. For
example, the third Chebyshev polynomial (T2) is 2x^2-1, but if x was
specified with units, 2x^2 and -1 would have incompatible units.
"""
_separable = False
def __init__(self, x_degree, y_degree, x_domain=None, x_window=[-1, 1],
y_domain=None, y_window=[-1, 1], n_models=None,
model_set_axis=None, name=None, meta=None, **params):
super().__init__(
x_degree, y_degree, x_domain=x_domain, y_domain=y_domain,
x_window=x_window, y_window=y_window, n_models=n_models,
model_set_axis=model_set_axis, name=name, meta=meta, **params)
def _fcache(self, x, y):
"""
Calculate the individual Chebyshev functions once and store them in a
dictionary to be reused.
"""
x_terms = self.x_degree + 1
y_terms = self.y_degree + 1
kfunc = {}
kfunc[0] = np.ones(x.shape)
kfunc[1] = x.copy()
kfunc[x_terms] = np.ones(y.shape)
kfunc[x_terms + 1] = y.copy()
for n in range(2, x_terms):
kfunc[n] = 2 * x * kfunc[n - 1] - kfunc[n - 2]
for n in range(x_terms + 2, x_terms + y_terms):
kfunc[n] = 2 * y * kfunc[n - 1] - kfunc[n - 2]
return kfunc
def fit_deriv(self, x, y, *params):
"""
Derivatives with respect to the coefficients.
This is an array with Chebyshev polynomials:
.. math::
T_{x_0}T_{y_0}, T_{x_1}T_{y_0}...T_{x_n}T_{y_0}...T_{x_n}T_{y_m}
Parameters
----------
x : ndarray
input
y : ndarray
input
params : throw away parameter
parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
if x.shape != y.shape:
raise ValueError("x and y must have the same shape")
x = x.flatten()
y = y.flatten()
x_deriv = self._chebderiv1d(x, self.x_degree + 1).T
y_deriv = self._chebderiv1d(y, self.y_degree + 1).T
ij = []
for i in range(self.y_degree + 1):
for j in range(self.x_degree + 1):
ij.append(x_deriv[j] * y_deriv[i])
v = np.array(ij)
return v.T
def _chebderiv1d(self, x, deg):
"""
Derivative of 1D Chebyshev series
"""
x = np.array(x, dtype=float, copy=False, ndmin=1)
d = np.empty((deg + 1, len(x)), dtype=x.dtype)
d[0] = x * 0 + 1
if deg > 0:
x2 = 2 * x
d[1] = x
for i in range(2, deg + 1):
d[i] = d[i - 1] * x2 - d[i - 2]
return np.rollaxis(d, 0, d.ndim)
class Legendre2D(OrthoPolynomialBase):
r"""
Bivariate Legendre series.
Defined as:
.. math:: P_{n_m}(x,y) = \sum_{n,m=0}^{n=d,m=d}C_{nm} L_n(x ) L_m(y)
where ``L_n(x)`` and ``L_m(y)`` are Legendre polynomials.
Parameters
----------
x_degree : int
degree in x
y_degree : int
degree in y
x_domain : list or None, optional
domain of the x independent variable
y_domain : list or None, optional
domain of the y independent variable
x_window : list or None, optional
range of the x independent variable
y_window : list or None, optional
range of the y independent variable
**params : dict
keyword: value pairs, representing parameter_name: value
Notes
-----
Model formula:
.. math::
P(x) = \sum_{i=0}^{i=n}C_{i} * L_{i}(x)
where ``L_{i}`` is the corresponding Legendre polynomial.
This model does not support the use of units/quantities, because each term
in the sum of Legendre polynomials is a polynomial in x - since the
coefficients within each Legendre polynomial are fixed, we can't use
quantities for x since the units would not be compatible. For example, the
third Legendre polynomial (P2) is 1.5x^2-0.5, but if x was specified with
units, 1.5x^2 and -0.5 would have incompatible units.
"""
_separable = False
def __init__(self, x_degree, y_degree, x_domain=None, x_window=[-1, 1],
y_domain=None, y_window=[-1, 1], n_models=None,
model_set_axis=None, name=None, meta=None, **params):
super().__init__(
x_degree, y_degree, x_domain=x_domain, y_domain=y_domain,
x_window=x_window, y_window=y_window, n_models=n_models,
model_set_axis=model_set_axis, name=name, meta=meta, **params)
def _fcache(self, x, y):
"""
Calculate the individual Legendre functions once and store them in a
dictionary to be reused.
"""
x_terms = self.x_degree + 1
y_terms = self.y_degree + 1
kfunc = {}
kfunc[0] = np.ones(x.shape)
kfunc[1] = x.copy()
kfunc[x_terms] = np.ones(y.shape)
kfunc[x_terms + 1] = y.copy()
for n in range(2, x_terms):
kfunc[n] = (((2 * (n - 1) + 1) * x * kfunc[n - 1] -
(n - 1) * kfunc[n - 2]) / n)
for n in range(2, y_terms):
kfunc[n + x_terms] = ((2 * (n - 1) + 1) * y * kfunc[n + x_terms - 1] -
(n - 1) * kfunc[n + x_terms - 2]) / (n)
return kfunc
def fit_deriv(self, x, y, *params):
"""
Derivatives with respect to the coefficients.
This is an array with Legendre polynomials:
Lx0Ly0 Lx1Ly0...LxnLy0...LxnLym
Parameters
----------
x : ndarray
input
y : ndarray
input
params : throw away parameter
parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
if x.shape != y.shape:
raise ValueError("x and y must have the same shape")
x = x.flatten()
y = y.flatten()
x_deriv = self._legendderiv1d(x, self.x_degree + 1).T
y_deriv = self._legendderiv1d(y, self.y_degree + 1).T
ij = []
for i in range(self.y_degree + 1):
for j in range(self.x_degree + 1):
ij.append(x_deriv[j] * y_deriv[i])
v = np.array(ij)
return v.T
def _legendderiv1d(self, x, deg):
"""Derivative of 1D Legendre polynomial"""
x = np.array(x, dtype=float, copy=False, ndmin=1)
d = np.empty((deg + 1,) + x.shape, dtype=x.dtype)
d[0] = x * 0 + 1
if deg > 0:
d[1] = x
for i in range(2, deg + 1):
d[i] = (d[i - 1] * x * (2 * i - 1) - d[i - 2] * (i - 1)) / i
return np.rollaxis(d, 0, d.ndim)
class _SIP1D(PolynomialBase):
"""
This implements the Simple Imaging Polynomial Model (SIP) in 1D.
It's unlikely it will be used in 1D so this class is private
and SIP should be used instead.
"""
inputs = ('u', 'v')
outputs = ('w',)
_separable = False
def __init__(self, order, coeff_prefix, n_models=None,
model_set_axis=None, name=None, meta=None, **params):
self.order = order
self.coeff_prefix = coeff_prefix
self._param_names = self._generate_coeff_names(coeff_prefix)
super().__init__(n_models=n_models, model_set_axis=model_set_axis,
name=name, meta=meta, **params)
def __repr__(self):
return self._format_repr(args=[self.order, self.coeff_prefix])
def __str__(self):
return self._format_str(
[('Order', self.order),
('Coeff. Prefix', self.coeff_prefix)])
def evaluate(self, x, y, *coeffs):
# TODO: Rewrite this so that it uses a simpler method of determining
# the matrix based on the number of given coefficients.
mcoef = self._coeff_matrix(self.coeff_prefix, coeffs)
return self._eval_sip(x, y, mcoef)
def get_num_coeff(self, ndim):
"""
Return the number of coefficients in one param set
"""
if self.order < 2 or self.order > 9:
raise ValueError("Degree of polynomial must be 2< deg < 9")
nmixed = comb(self.order, ndim)
# remove 3 terms because SIP deg >= 2
numc = self.order * ndim + nmixed - 2
return numc
def _generate_coeff_names(self, coeff_prefix):
names = []
for i in range(2, self.order + 1):
names.append('{0}_{1}_{2}'.format(coeff_prefix, i, 0))
for i in range(2, self.order + 1):
names.append('{0}_{1}_{2}'.format(coeff_prefix, 0, i))
for i in range(1, self.order):
for j in range(1, self.order):
if i + j < self.order + 1:
names.append('{0}_{1}_{2}'.format(coeff_prefix, i, j))
return names
def _coeff_matrix(self, coeff_prefix, coeffs):
mat = np.zeros((self.order + 1, self.order + 1))
for i in range(2, self.order + 1):
attr = '{0}_{1}_{2}'.format(coeff_prefix, i, 0)
mat[i, 0] = coeffs[self.param_names.index(attr)]
for i in range(2, self.order + 1):
attr = '{0}_{1}_{2}'.format(coeff_prefix, 0, i)
mat[0, i] = coeffs[self.param_names.index(attr)]
for i in range(1, self.order):
for j in range(1, self.order):
if i + j < self.order + 1:
attr = '{0}_{1}_{2}'.format(coeff_prefix, i, j)
mat[i, j] = coeffs[self.param_names.index(attr)]
return mat
def _eval_sip(self, x, y, coef):
x = np.asarray(x, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
if self.coeff_prefix == 'A':
result = np.zeros(x.shape)
else:
result = np.zeros(y.shape)
for i in range(coef.shape[0]):
for j in range(coef.shape[1]):
if 1 < i + j < self.order + 1:
result = result + coef[i, j] * x ** i * y ** j
return result
class SIP(Model):
"""
Simple Imaging Polynomial (SIP) model.
The SIP convention is used to represent distortions in FITS image headers.
See [1]_ for a description of the SIP convention.
Parameters
----------
crpix : list or ndarray of length(2)
CRPIX values
a_order : int
SIP polynomial order for first axis
b_order : int
SIP order for second axis
a_coeff : dict
SIP coefficients for first axis
b_coeff : dict
SIP coefficients for the second axis
ap_order : int
order for the inverse transformation (AP coefficients)
bp_order : int
order for the inverse transformation (BP coefficients)
ap_coeff : dict
coefficients for the inverse transform
bp_coeff : dict
coefficients for the inverse transform
References
----------
.. [1] `David Shupe, et al, ADASS, ASP Conference Series, Vol. 347, 2005 <http://adsabs.harvard.edu/abs/2005ASPC..347..491S>`_
"""
inputs = ('u', 'v')
outputs = ('x', 'y')
_separable = False
def __init__(self, crpix, a_order, b_order, a_coeff={}, b_coeff={},
ap_order=None, bp_order=None, ap_coeff={}, bp_coeff={},
n_models=None, model_set_axis=None, name=None, meta=None):
self._crpix = crpix
self._a_order = a_order
self._b_order = b_order
self._a_coeff = a_coeff
self._b_coeff = b_coeff
self._ap_order = ap_order
self._bp_order = bp_order
self._ap_coeff = ap_coeff
self._bp_coeff = bp_coeff
self.shift_a = Shift(-crpix[0])
self.shift_b = Shift(-crpix[1])
self.sip1d_a = _SIP1D(a_order, coeff_prefix='A', n_models=n_models,
model_set_axis=model_set_axis, **a_coeff)
self.sip1d_b = _SIP1D(b_order, coeff_prefix='B', n_models=n_models,
model_set_axis=model_set_axis, **b_coeff)
super().__init__(n_models=n_models, model_set_axis=model_set_axis,
name=name, meta=meta)
def __repr__(self):
return '<{0}({1!r})>'.format(self.__class__.__name__,
[self.shift_a, self.shift_b, self.sip1d_a, self.sip1d_b])
def __str__(self):
parts = ['Model: {0}'.format(self.__class__.__name__)]
for model in [self.shift_a, self.shift_b, self.sip1d_a, self.sip1d_b]:
parts.append(indent(str(model), width=4))
parts.append('')
return '\n'.join(parts)
@property
def inverse(self):
if (self._ap_order is not None and self._bp_order is not None):
return InverseSIP(self._ap_order, self._bp_order,
self._ap_coeff, self._bp_coeff)
else:
raise NotImplementedError("SIP inverse coefficients are not available.")
def evaluate(self, x, y):
u = self.shift_a.evaluate(x, *self.shift_a.param_sets)
v = self.shift_b.evaluate(y, *self.shift_b.param_sets)
f = self.sip1d_a.evaluate(u, v, *self.sip1d_a.param_sets)
g = self.sip1d_b.evaluate(u, v, *self.sip1d_b.param_sets)
return f, g
class InverseSIP(Model):
"""
Inverse Simple Imaging Polynomial
Parameters
----------
ap_order : int
order for the inverse transformation (AP coefficients)
bp_order : int
order for the inverse transformation (BP coefficients)
ap_coeff : dict
coefficients for the inverse transform
bp_coeff : dict
coefficients for the inverse transform
"""
inputs = ('x', 'y')
outputs = ('u', 'v')
_separable = False
def __init__(self, ap_order, bp_order, ap_coeff={}, bp_coeff={},
n_models=None, model_set_axis=None, name=None, meta=None):
self._ap_order = ap_order
self._bp_order = bp_order
self._ap_coeff = ap_coeff
self._bp_coeff = bp_coeff
# define the 0th term in order to use Polynomial2D
ap_coeff.setdefault('AP_0_0', 0)
bp_coeff.setdefault('BP_0_0', 0)
ap_coeff_params = dict((k.replace('AP_', 'c'), v)
for k, v in ap_coeff.items())
bp_coeff_params = dict((k.replace('BP_', 'c'), v)
for k, v in bp_coeff.items())
self.sip1d_ap = Polynomial2D(degree=ap_order,
model_set_axis=model_set_axis,
**ap_coeff_params)
self.sip1d_bp = Polynomial2D(degree=bp_order,
model_set_axis=model_set_axis,
**bp_coeff_params)
super().__init__(n_models=n_models, model_set_axis=model_set_axis,
name=name, meta=meta)
def __repr__(self):
return '<{0}({1!r})>'.format(self.__class__.__name__,
[self.sip1d_ap, self.sip1d_bp])
def __str__(self):
parts = ['Model: {0}'.format(self.__class__.__name__)]
for model in [self.sip1d_ap, self.sip1d_bp]:
parts.append(indent(str(model), width=4))
parts.append('')
return '\n'.join(parts)
def evaluate(self, x, y):
x1 = self.sip1d_ap.evaluate(x, y, *self.sip1d_ap.param_sets)
y1 = self.sip1d_bp.evaluate(x, y, *self.sip1d_bp.param_sets)
return x1, y1
|
c5dde8204f5d5af19f1c8aced3997190655b289c40fc0d0fa3f8ab94818c5476 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tabular models.
Tabular models of any dimension can be created using `tabular_model`.
For convenience `Tabular1D` and `Tabular2D` are provided.
Examples
--------
>>> table = np.array([[ 3., 0., 0.],
... [ 0., 2., 0.],
... [ 0., 0., 0.]])
>>> points = ([1, 2, 3], [1, 2, 3])
>>> t2 = Tabular2D(points, lookup_table=table, bounds_error=False,
... fill_value=None, method='nearest')
"""
import abc
import numpy as np
from .core import Model
from astropy import units as u
from astropy.utils import minversion
try:
import scipy
from scipy.interpolate import interpn
has_scipy = True
except ImportError:
has_scipy = False
has_scipy = has_scipy and minversion(scipy, "0.14")
__all__ = ['tabular_model', 'Tabular1D', 'Tabular2D']
__doctest_requires__ = {('tabular_model'): ['scipy']}
class _Tabular(Model):
"""
Returns an interpolated lookup table value.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, ), optional
The points defining the regular grid in n dimensions.
lookup_table : array-like, shape (m1, ..., mn, ...)
The data on a regular grid in n dimensions.
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest", and "splinef2d". "splinef2d" is only supported for
2-dimensional data. Default is "linear".
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then ``fill_value`` is used.
fill_value : float or `~astropy.units.Quantity`, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated. Extrapolation is not supported by method
"splinef2d". If Quantity is given, it will be converted to the unit of
``lookup_table``, if applicable.
Returns
-------
value : ndarray
Interpolated values at input coordinates.
Raises
------
ImportError
Scipy is not installed.
Notes
-----
Uses `scipy.interpolate.interpn`.
"""
linear = False
fittable = False
standard_broadcasting = False
outputs = ('y',)
@property
@abc.abstractmethod
def lookup_table(self):
pass
_is_dynamic = True
_id = 0
def __init__(self, points=None, lookup_table=None, method='linear',
bounds_error=True, fill_value=np.nan, **kwargs):
n_models = kwargs.get('n_models', 1)
if n_models > 1:
raise NotImplementedError('Only n_models=1 is supported.')
super().__init__(**kwargs)
if lookup_table is None:
raise ValueError('Must provide a lookup table.')
if not isinstance(lookup_table, u.Quantity):
lookup_table = np.asarray(lookup_table)
if self.lookup_table.ndim != lookup_table.ndim:
raise ValueError("lookup_table should be an array with "
"{0} dimensions.".format(self.lookup_table.ndim))
if points is None:
points = tuple(np.arange(x, dtype=float)
for x in lookup_table.shape)
else:
if lookup_table.ndim == 1 and not isinstance(points, tuple):
points = (points,)
npts = len(points)
if npts != lookup_table.ndim:
raise ValueError(
"Expected grid points in "
"{0} directions, got {1}.".format(lookup_table.ndim, npts))
if (npts > 1 and isinstance(points[0], u.Quantity) and
len(set([getattr(p, 'unit', None) for p in points])) > 1):
raise ValueError('points must all have the same unit.')
if isinstance(fill_value, u.Quantity):
if not isinstance(lookup_table, u.Quantity):
raise ValueError('fill value is in {0} but expected to be '
'unitless.'.format(fill_value.unit))
fill_value = fill_value.to(lookup_table.unit).value
self.points = points
self.lookup_table = lookup_table
self.bounds_error = bounds_error
self.method = method
self.fill_value = fill_value
def __repr__(self):
fmt = "<{0}(points={1}, lookup_table={2})>".format(
self.__class__.__name__, self.points, self.lookup_table)
return fmt
def __str__(self):
default_keywords = [
('Model', self.__class__.__name__),
('Name', self.name),
('Inputs', self.inputs),
('Outputs', self.outputs),
('Parameters', ""),
(' points', self.points),
(' lookup_table', self.lookup_table),
(' method', self.method),
(' fill_value', self.fill_value),
(' bounds_error', self.bounds_error)
]
parts = ['{0}: {1}'.format(keyword, value)
for keyword, value in default_keywords
if value is not None]
return '\n'.join(parts)
@property
def input_units(self):
pts = self.points[0]
if not isinstance(pts, u.Quantity):
return None
else:
return dict([(x, pts.unit) for x in self.inputs])
@property
def return_units(self):
if not isinstance(self.lookup_table, u.Quantity):
return None
else:
return {'y': self.lookup_table.unit}
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits,
``(points_low, points_high)``.
Examples
--------
>>> from astropy.modeling.models import Tabular1D, Tabular2D
>>> t1 = Tabular1D(points=[1, 2, 3], lookup_table=[10, 20, 30])
>>> t1.bounding_box
(1, 3)
>>> t2 = Tabular2D(points=[[1, 2, 3], [2, 3, 4]],
... lookup_table=[[10, 20, 30], [20, 30, 40]])
>>> t2.bounding_box
((2, 4), (1, 3))
"""
bbox = [(min(p), max(p)) for p in self.points][::-1]
if len(bbox) == 1:
bbox = bbox[0]
return tuple(bbox)
def evaluate(self, *inputs):
"""
Return the interpolated values at the input coordinates.
Parameters
----------
inputs : list of scalars or ndarrays
Input coordinates. The number of inputs must be equal
to the dimensions of the lookup table.
"""
if isinstance(inputs, u.Quantity):
inputs = inputs.value
shape = inputs[0].shape
inputs = [inp.flatten() for inp in inputs[: self.n_inputs]]
inputs = np.array(inputs).T
if not has_scipy: # pragma: no cover
raise ImportError("This model requires scipy >= v0.14")
result = interpn(self.points, self.lookup_table, inputs,
method=self.method, bounds_error=self.bounds_error,
fill_value=self.fill_value)
# return_units not respected when points has no units
if (isinstance(self.lookup_table, u.Quantity) and
not isinstance(self.points[0], u.Quantity)):
result = result * self.lookup_table.unit
if self.n_outputs == 1:
result = result.reshape(shape)
else:
result = [r.reshape(shape) for r in result]
return result
def tabular_model(dim, name=None):
"""
Make a ``Tabular`` model where ``n_inputs`` is
based on the dimension of the lookup_table.
This model has to be further initialized and when evaluated
returns the interpolated values.
Parameters
----------
dim : int
Dimensions of the lookup table.
name : str
Name for the class.
Examples
--------
>>> table = np.array([[3., 0., 0.],
... [0., 2., 0.],
... [0., 0., 0.]])
>>> tab = tabular_model(2, name='Tabular2D')
>>> print(tab)
<class 'abc.Tabular2D'>
Name: Tabular2D
Inputs: (u'x0', u'x1')
Outputs: (u'y',)
>>> points = ([1, 2, 3], [1, 2, 3])
Setting fill_value to None, allows extrapolation.
>>> m = tab(points, lookup_table=table, name='my_table',
... bounds_error=False, fill_value=None, method='nearest')
>>> xinterp = [0, 1, 1.5, 2.72, 3.14]
>>> m(xinterp, xinterp) # doctest: +FLOAT_CMP
array([3., 3., 3., 0., 0.])
"""
if dim < 1:
raise ValueError('Lookup table must have at least one dimension.')
table = np.zeros([2] * dim)
inputs = tuple('x{0}'.format(idx) for idx in range(table.ndim))
members = {'lookup_table': table, 'inputs': inputs}
if dim == 1:
members['_separable'] = True
else:
members['_separable'] = False
if name is None:
model_id = _Tabular._id
_Tabular._id += 1
name = 'Tabular{0}'.format(model_id)
return type(str(name), (_Tabular,), members)
Tabular1D = tabular_model(1, name='Tabular1D')
Tabular2D = tabular_model(2, name='Tabular2D')
_tab_docs = """
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest", and "splinef2d". "splinef2d" is only supported for
2-dimensional data. Default is "linear".
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then ``fill_value`` is used.
fill_value : float, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated. Extrapolation is not supported by method
"splinef2d".
Returns
-------
value : ndarray
Interpolated values at input coordinates.
Raises
------
ImportError
Scipy is not installed.
Notes
-----
Uses `scipy.interpolate.interpn`.
"""
Tabular1D.__doc__ = """
Tabular model in 1D.
Returns an interpolated lookup table value.
Parameters
----------
points : array-like of float of ndim=1.
The points defining the regular grid in n dimensions.
lookup_table : array-like, of ndim=1.
The data in one dimensions.
""" + _tab_docs
Tabular2D.__doc__ = """
Tabular model in 2D.
Returns an interpolated lookup table value.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, m2), optional
The points defining the regular grid in n dimensions.
lookup_table : array-like, shape (m1, m2)
The data on a regular grid in 2 dimensions.
""" + _tab_docs
|
e320e7d65e7995b35468d39b05f763a7c1e401648e62ce031aebbad50b36ff3e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Power law model variants
"""
from collections import OrderedDict
import numpy as np
from .core import Fittable1DModel
from .parameters import Parameter, InputParameterError
from astropy.units import Quantity
__all__ = ['PowerLaw1D', 'BrokenPowerLaw1D', 'SmoothlyBrokenPowerLaw1D',
'ExponentialCutoffPowerLaw1D', 'LogParabola1D']
class PowerLaw1D(Fittable1DModel):
"""
One dimensional power law model.
Parameters
----------
amplitude : float
Model amplitude at the reference point
x_0 : float
Reference point
alpha : float
Power law index
See Also
--------
BrokenPowerLaw1D, ExponentialCutoffPowerLaw1D, LogParabola1D
Notes
-----
Model formula (with :math:`A` for ``amplitude`` and :math:`\\alpha` for ``alpha``):
.. math:: f(x) = A (x / x_0) ^ {-\\alpha}
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=1)
alpha = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, x_0, alpha):
"""One dimensional power law model function"""
xx = x / x_0
return amplitude * xx ** (-alpha)
@staticmethod
def fit_deriv(x, amplitude, x_0, alpha):
"""One dimensional power law derivative with respect to parameters"""
xx = x / x_0
d_amplitude = xx ** (-alpha)
d_x_0 = amplitude * alpha * d_amplitude / x_0
d_alpha = -amplitude * d_amplitude * np.log(xx)
return [d_amplitude, d_x_0, d_alpha]
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_0', inputs_unit['x']),
('amplitude', outputs_unit['y'])])
class BrokenPowerLaw1D(Fittable1DModel):
"""
One dimensional power law model with a break.
Parameters
----------
amplitude : float
Model amplitude at the break point.
x_break : float
Break point.
alpha_1 : float
Power law index for x < x_break.
alpha_2 : float
Power law index for x > x_break.
See Also
--------
PowerLaw1D, ExponentialCutoffPowerLaw1D, LogParabola1D
Notes
-----
Model formula (with :math:`A` for ``amplitude`` and :math:`\\alpha_1`
for ``alpha_1`` and :math:`\\alpha_2` for ``alpha_2``):
.. math::
f(x) = \\left \\{
\\begin{array}{ll}
A (x / x_{break}) ^ {-\\alpha_1} & : x < x_{break} \\\\
A (x / x_{break}) ^ {-\\alpha_2} & : x > x_{break} \\\\
\\end{array}
\\right.
"""
amplitude = Parameter(default=1)
x_break = Parameter(default=1)
alpha_1 = Parameter(default=1)
alpha_2 = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, x_break, alpha_1, alpha_2):
"""One dimensional broken power law model function"""
alpha = np.where(x < x_break, alpha_1, alpha_2)
xx = x / x_break
return amplitude * xx ** (-alpha)
@staticmethod
def fit_deriv(x, amplitude, x_break, alpha_1, alpha_2):
"""One dimensional broken power law derivative with respect to parameters"""
alpha = np.where(x < x_break, alpha_1, alpha_2)
xx = x / x_break
d_amplitude = xx ** (-alpha)
d_x_break = amplitude * alpha * d_amplitude / x_break
d_alpha = -amplitude * d_amplitude * np.log(xx)
d_alpha_1 = np.where(x < x_break, d_alpha, 0)
d_alpha_2 = np.where(x >= x_break, d_alpha, 0)
return [d_amplitude, d_x_break, d_alpha_1, d_alpha_2]
@property
def input_units(self):
if self.x_break.unit is None:
return None
else:
return {'x': self.x_break.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_break', inputs_unit['x']),
('amplitude', outputs_unit['y'])])
class SmoothlyBrokenPowerLaw1D(Fittable1DModel):
"""One dimensional smoothly broken power law model.
Parameters
----------
amplitude : float
Model amplitude at the break point.
x_break : float
Break point.
alpha_1 : float
Power law index for ``x << x_break``.
alpha_2 : float
Power law index for ``x >> x_break``.
delta : float
Smoothness parameter.
See Also
--------
BrokenPowerLaw1D
Notes
-----
Model formula (with :math:`A` for ``amplitude``, :math:`x_b` for
``x_break``, :math:`\\alpha_1` for ``alpha_1``,
:math:`\\alpha_2` for ``alpha_2`` and :math:`\\Delta` for
``delta``):
.. math::
f(x) = A \\left( \\frac{x}{x_b} \\right) ^ {-\\alpha_1}
\\left\\{
\\frac{1}{2}
\\left[
1 + \\left( \\frac{x}{x_b}\\right)^{1 / \\Delta}
\\right]
\\right\\}^{(\\alpha_1 - \\alpha_2) \\Delta}
The change of slope occurs between the values :math:`x_1`
and :math:`x_2` such that:
.. math::
\\log_{10} \\frac{x_2}{x_b} = \\log_{10} \\frac{x_b}{x_1}
\\sim \\Delta
At values :math:`x \\lesssim x_1` and :math:`x \\gtrsim x_2` the
model is approximately a simple power law with index
:math:`\\alpha_1` and :math:`\\alpha_2` respectively. The two
power laws are smoothly joined at values :math:`x_1 < x < x_2`,
hence the :math:`\\Delta` parameter sets the "smoothness" of the
slope change.
The ``delta`` parameter is bounded to values greater than 1e-3
(corresponding to :math:`x_2 / x_1 \\gtrsim 1.002`) to avoid
overflow errors.
The ``amplitude`` parameter is bounded to positive values since
this model is typically used to represent positive quantities.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling import models
x = np.logspace(0.7, 2.3, 500)
f = models.SmoothlyBrokenPowerLaw1D(amplitude=1, x_break=20,
alpha_1=-2, alpha_2=2)
plt.figure()
plt.title("amplitude=1, x_break=20, alpha_1=-2, alpha_2=2")
f.delta = 0.5
plt.loglog(x, f(x), '--', label='delta=0.5')
f.delta = 0.3
plt.loglog(x, f(x), '-.', label='delta=0.3')
f.delta = 0.1
plt.loglog(x, f(x), label='delta=0.1')
plt.axis([x.min(), x.max(), 0.1, 1.1])
plt.legend(loc='lower center')
plt.grid(True)
plt.show()
"""
amplitude = Parameter(default=1, min=0)
x_break = Parameter(default=1)
alpha_1 = Parameter(default=-2)
alpha_2 = Parameter(default=2)
delta = Parameter(default=1, min=1.e-3)
@amplitude.validator
def amplitude(self, value):
if np.any(value <= 0):
raise InputParameterError(
"amplitude parameter must be > 0")
@delta.validator
def delta(self, value):
if np.any(value < 0.001):
raise InputParameterError(
"delta parameter must be >= 0.001")
@staticmethod
def evaluate(x, amplitude, x_break, alpha_1, alpha_2, delta):
"""One dimensional smoothly broken power law model function"""
# Pre-calculate `x/x_b`
xx = x / x_break
# Initialize the return value
f = np.zeros_like(xx, subok=False)
if isinstance(amplitude, Quantity):
return_unit = amplitude.unit
amplitude = amplitude.value
else:
return_unit = None
# The quantity `t = (x / x_b)^(1 / delta)` can become quite
# large. To avoid overflow errors we will start by calculating
# its natural logarithm:
logt = np.log(xx) / delta
# When `t >> 1` or `t << 1` we don't actually need to compute
# the `t` value since the main formula (see docstring) can be
# significantly simplified by neglecting `1` or `t`
# respectively. In the following we will check whether `t` is
# much greater, much smaller, or comparable to 1 by comparing
# the `logt` value with an appropriate threshold.
threshold = 30 # corresponding to exp(30) ~ 1e13
i = logt > threshold
if (i.max()):
# In this case the main formula reduces to a simple power
# law with index `alpha_2`.
f[i] = amplitude * xx[i] ** (-alpha_2) \
/ (2. ** ((alpha_1 - alpha_2) * delta))
i = logt < -threshold
if (i.max()):
# In this case the main formula reduces to a simple power
# law with index `alpha_1`.
f[i] = amplitude * xx[i] ** (-alpha_1) \
/ (2. ** ((alpha_1 - alpha_2) * delta))
i = np.abs(logt) <= threshold
if (i.max()):
# In this case the `t` value is "comparable" to 1, hence we
# we will evaluate the whole formula.
t = np.exp(logt[i])
r = (1. + t) / 2.
f[i] = amplitude * xx[i] ** (-alpha_1) \
* r ** ((alpha_1 - alpha_2) * delta)
if return_unit:
return Quantity(f, unit=return_unit, copy=False)
else:
return f
@staticmethod
def fit_deriv(x, amplitude, x_break, alpha_1, alpha_2, delta):
"""One dimensional smoothly broken power law derivative with respect
to parameters"""
# Pre-calculate `x_b` and `x/x_b` and `logt` (see comments in
# SmoothlyBrokenPowerLaw1D.evaluate)
xx = x / x_break
logt = np.log(xx) / delta
# Initialize the return values
f = np.zeros_like(xx)
d_amplitude = np.zeros_like(xx)
d_x_break = np.zeros_like(xx)
d_alpha_1 = np.zeros_like(xx)
d_alpha_2 = np.zeros_like(xx)
d_delta = np.zeros_like(xx)
threshold = 30 # (see comments in SmoothlyBrokenPowerLaw1D.evaluate)
i = logt > threshold
if (i.max()):
f[i] = amplitude * xx[i] ** (-alpha_2) \
/ (2. ** ((alpha_1 - alpha_2) * delta))
d_amplitude[i] = f[i] / amplitude
d_x_break[i] = f[i] * alpha_2 / x_break
d_alpha_1[i] = f[i] * (-delta * np.log(2))
d_alpha_2[i] = f[i] * (-np.log(xx[i]) + delta * np.log(2))
d_delta[i] = f[i] * (-(alpha_1 - alpha_2) * np.log(2))
i = logt < -threshold
if (i.max()):
f[i] = amplitude * xx[i] ** (-alpha_1) \
/ (2. ** ((alpha_1 - alpha_2) * delta))
d_amplitude[i] = f[i] / amplitude
d_x_break[i] = f[i] * alpha_1 / x_break
d_alpha_1[i] = f[i] * (-np.log(xx[i]) - delta * np.log(2))
d_alpha_2[i] = f[i] * delta * np.log(2)
d_delta[i] = f[i] * (-(alpha_1 - alpha_2) * np.log(2))
i = np.abs(logt) <= threshold
if (i.max()):
t = np.exp(logt[i])
r = (1. + t) / 2.
f[i] = amplitude * xx[i] ** (-alpha_1) \
* r ** ((alpha_1 - alpha_2) * delta)
d_amplitude[i] = f[i] / amplitude
d_x_break[i] = f[i] * (alpha_1 - (alpha_1 - alpha_2) * t / 2. / r) / x_break
d_alpha_1[i] = f[i] * (-np.log(xx[i]) + delta * np.log(r))
d_alpha_2[i] = f[i] * (-delta * np.log(r))
d_delta[i] = f[i] * (alpha_1 - alpha_2) \
* (np.log(r) - t / (1. + t) / delta * np.log(xx[i]))
return [d_amplitude, d_x_break, d_alpha_1, d_alpha_2, d_delta]
@property
def input_units(self):
if self.x_break.unit is None:
return None
else:
return {'x': self.x_break.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_break', inputs_unit['x']),
('amplitude', outputs_unit['y'])])
class ExponentialCutoffPowerLaw1D(Fittable1DModel):
"""
One dimensional power law model with an exponential cutoff.
Parameters
----------
amplitude : float
Model amplitude
x_0 : float
Reference point
alpha : float
Power law index
x_cutoff : float
Cutoff point
See Also
--------
PowerLaw1D, BrokenPowerLaw1D, LogParabola1D
Notes
-----
Model formula (with :math:`A` for ``amplitude`` and :math:`\\alpha` for ``alpha``):
.. math:: f(x) = A (x / x_0) ^ {-\\alpha} \\exp (-x / x_{cutoff})
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=1)
alpha = Parameter(default=1)
x_cutoff = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, x_0, alpha, x_cutoff):
"""One dimensional exponential cutoff power law model function"""
xx = x / x_0
return amplitude * xx ** (-alpha) * np.exp(-x / x_cutoff)
@staticmethod
def fit_deriv(x, amplitude, x_0, alpha, x_cutoff):
"""One dimensional exponential cutoff power law derivative with respect to parameters"""
xx = x / x_0
xc = x / x_cutoff
d_amplitude = xx ** (-alpha) * np.exp(-xc)
d_x_0 = alpha * amplitude * d_amplitude / x_0
d_alpha = -amplitude * d_amplitude * np.log(xx)
d_x_cutoff = amplitude * x * d_amplitude / x_cutoff ** 2
return [d_amplitude, d_x_0, d_alpha, d_x_cutoff]
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_0', inputs_unit['x']),
('x_cutoff', inputs_unit['x']),
('amplitude', outputs_unit['y'])])
class LogParabola1D(Fittable1DModel):
"""
One dimensional log parabola model (sometimes called curved power law).
Parameters
----------
amplitude : float
Model amplitude
x_0 : float
Reference point
alpha : float
Power law index
beta : float
Power law curvature
See Also
--------
PowerLaw1D, BrokenPowerLaw1D, ExponentialCutoffPowerLaw1D
Notes
-----
Model formula (with :math:`A` for ``amplitude`` and :math:`\\alpha` for ``alpha`` and :math:`\\beta` for ``beta``):
.. math:: f(x) = A \\left(\\frac{x}{x_{0}}\\right)^{- \\alpha - \\beta \\log{\\left (\\frac{x}{x_{0}} \\right )}}
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=1)
alpha = Parameter(default=1)
beta = Parameter(default=0)
@staticmethod
def evaluate(x, amplitude, x_0, alpha, beta):
"""One dimensional log parabola model function"""
xx = x / x_0
exponent = -alpha - beta * np.log(xx)
return amplitude * xx ** exponent
@staticmethod
def fit_deriv(x, amplitude, x_0, alpha, beta):
"""One dimensional log parabola derivative with respect to parameters"""
xx = x / x_0
log_xx = np.log(xx)
exponent = -alpha - beta * log_xx
d_amplitude = xx ** exponent
d_beta = -amplitude * d_amplitude * log_xx ** 2
d_x_0 = amplitude * d_amplitude * (beta * log_xx / x_0 - exponent / x_0)
d_alpha = -amplitude * d_amplitude * log_xx
return [d_amplitude, d_x_0, d_alpha, d_beta]
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_0', inputs_unit['x']),
('amplitude', outputs_unit['y'])])
|
02e4a61339b478c388b7f7e7e4792cab638c50aaf1243e9a3d035a2639a5d499 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import os
import select
import socket
import threading
import time
import uuid
import warnings
import queue
import xmlrpc.client as xmlrpc
from urllib.parse import urlunparse
from astropy import log
from .constants import SAMP_STATUS_OK
from .constants import __profile_version__
from .errors import SAMPWarning, SAMPHubError, SAMPProxyError
from .utils import internet_on, ServerProxyPool, _HubAsClient
from .lockfile_helpers import read_lockfile, create_lock_file
from .standard_profile import ThreadingXMLRPCServer
from .web_profile import WebProfileXMLRPCServer, web_profile_text_dialog
__all__ = ['SAMPHubServer', 'WebProfileDialog']
__doctest_skip__ = ['.', 'SAMPHubServer.*']
class SAMPHubServer:
"""
SAMP Hub Server.
Parameters
----------
secret : str, optional
The secret code to use for the SAMP lockfile. If none is is specified,
the :func:`uuid.uuid1` function is used to generate one.
addr : str, optional
Listening address (or IP). This defaults to 127.0.0.1 if the internet
is not reachable, otherwise it defaults to the host name.
port : int, optional
Listening XML-RPC server socket port. If left set to 0 (the default),
the operating system will select a free port.
lockfile : str, optional
Custom lockfile name.
timeout : int, optional
Hub inactivity timeout. If ``timeout > 0`` then the Hub automatically
stops after an inactivity period longer than ``timeout`` seconds. By
default ``timeout`` is set to 0 (Hub never expires).
client_timeout : int, optional
Client inactivity timeout. If ``client_timeout > 0`` then the Hub
automatically unregisters the clients which result inactive for a
period longer than ``client_timeout`` seconds. By default
``client_timeout`` is set to 0 (clients never expire).
mode : str, optional
Defines the Hub running mode. If ``mode`` is ``'single'`` then the Hub
runs using the standard ``.samp`` lock-file, having a single instance
for user desktop session. Otherwise, if ``mode`` is ``'multiple'``,
then the Hub runs using a non-standard lock-file, placed in
``.samp-1`` directory, of the form ``samp-hub-<UUID>``, where
``<UUID>`` is a unique UUID assigned to the hub.
label : str, optional
A string used to label the Hub with a human readable name. This string
is written in the lock-file assigned to the ``hub.label`` token.
web_profile : bool, optional
Enables or disables the Web Profile support.
web_profile_dialog : class, optional
Allows a class instance to be specified using ``web_profile_dialog``
to replace the terminal-based message with e.g. a GUI pop-up. Two
`queue.Queue` instances will be added to the instance as attributes
``queue_request`` and ``queue_result``. When a request is received via
the ``queue_request`` queue, the pop-up should be displayed, and a
value of `True` or `False` should be added to ``queue_result``
depending on whether the user accepted or refused the connection.
web_port : int, optional
The port to use for web SAMP. This should not be changed except for
testing purposes, since web SAMP should always use port 21012.
pool_size : int, optional
The number of socket connections opened to communicate with the
clients.
"""
def __init__(self, secret=None, addr=None, port=0, lockfile=None,
timeout=0, client_timeout=0, mode='single', label="",
web_profile=True, web_profile_dialog=None, web_port=21012,
pool_size=20):
# Generate random ID for the hub
self._id = str(uuid.uuid1())
# General settings
self._is_running = False
self._customlockfilename = lockfile
self._lockfile = None
self._addr = addr
self._port = port
self._mode = mode
self._label = label
self._timeout = timeout
self._client_timeout = client_timeout
self._pool_size = pool_size
# Web profile specific attributes
self._web_profile = web_profile
self._web_profile_dialog = web_profile_dialog
self._web_port = web_port
self._web_profile_server = None
self._web_profile_callbacks = {}
self._web_profile_requests_queue = None
self._web_profile_requests_result = None
self._web_profile_requests_semaphore = None
self._host_name = "127.0.0.1"
if internet_on():
try:
self._host_name = socket.getfqdn()
socket.getaddrinfo(self._addr or self._host_name,
self._port or 0)
except socket.error:
self._host_name = "127.0.0.1"
# Threading stuff
self._thread_lock = threading.Lock()
self._thread_run = None
self._thread_hub_timeout = None
self._thread_client_timeout = None
self._launched_threads = []
# Variables for timeout testing:
self._last_activity_time = None
self._client_activity_time = {}
# Hub message id counter, used to create hub msg ids
self._hub_msg_id_counter = 0
# Hub secret code
self._hub_secret_code_customized = secret
self._hub_secret = self._create_secret_code()
# Hub public id (as SAMP client)
self._hub_public_id = ""
# Client ids
# {private_key: (public_id, timestamp)}
self._private_keys = {}
# Metadata per client
# {private_key: metadata}
self._metadata = {}
# List of subscribed clients per MType
# {mtype: private_key list}
self._mtype2ids = {}
# List of subscribed MTypes per client
# {private_key: mtype list}
self._id2mtypes = {}
# List of XML-RPC addresses per client
# {public_id: (XML-RPC address, ServerProxyPool instance)}
self._xmlrpc_endpoints = {}
# Synchronous message id heap
self._sync_msg_ids_heap = {}
# Public ids counter
self._client_id_counter = -1
@property
def id(self):
"""
The unique hub ID.
"""
return self._id
def _register_standard_api(self, server):
# Standard Profile only operations
server.register_function(self._ping, 'samp.hub.ping')
server.register_function(self._set_xmlrpc_callback, 'samp.hub.setXmlrpcCallback')
# Standard API operations
server.register_function(self._register, 'samp.hub.register')
server.register_function(self._unregister, 'samp.hub.unregister')
server.register_function(self._declare_metadata, 'samp.hub.declareMetadata')
server.register_function(self._get_metadata, 'samp.hub.getMetadata')
server.register_function(self._declare_subscriptions, 'samp.hub.declareSubscriptions')
server.register_function(self._get_subscriptions, 'samp.hub.getSubscriptions')
server.register_function(self._get_registered_clients, 'samp.hub.getRegisteredClients')
server.register_function(self._get_subscribed_clients, 'samp.hub.getSubscribedClients')
server.register_function(self._notify, 'samp.hub.notify')
server.register_function(self._notify_all, 'samp.hub.notifyAll')
server.register_function(self._call, 'samp.hub.call')
server.register_function(self._call_all, 'samp.hub.callAll')
server.register_function(self._call_and_wait, 'samp.hub.callAndWait')
server.register_function(self._reply, 'samp.hub.reply')
def _register_web_profile_api(self, server):
# Web Profile methods like Standard Profile
server.register_function(self._ping, 'samp.webhub.ping')
server.register_function(self._unregister, 'samp.webhub.unregister')
server.register_function(self._declare_metadata, 'samp.webhub.declareMetadata')
server.register_function(self._get_metadata, 'samp.webhub.getMetadata')
server.register_function(self._declare_subscriptions, 'samp.webhub.declareSubscriptions')
server.register_function(self._get_subscriptions, 'samp.webhub.getSubscriptions')
server.register_function(self._get_registered_clients, 'samp.webhub.getRegisteredClients')
server.register_function(self._get_subscribed_clients, 'samp.webhub.getSubscribedClients')
server.register_function(self._notify, 'samp.webhub.notify')
server.register_function(self._notify_all, 'samp.webhub.notifyAll')
server.register_function(self._call, 'samp.webhub.call')
server.register_function(self._call_all, 'samp.webhub.callAll')
server.register_function(self._call_and_wait, 'samp.webhub.callAndWait')
server.register_function(self._reply, 'samp.webhub.reply')
# Methods particularly for Web Profile
server.register_function(self._web_profile_register, 'samp.webhub.register')
server.register_function(self._web_profile_allowReverseCallbacks, 'samp.webhub.allowReverseCallbacks')
server.register_function(self._web_profile_pullCallbacks, 'samp.webhub.pullCallbacks')
def _start_standard_server(self):
self._server = ThreadingXMLRPCServer(
(self._addr or self._host_name, self._port or 0),
log, logRequests=False, allow_none=True)
prot = 'http'
self._port = self._server.socket.getsockname()[1]
addr = "{0}:{1}".format(self._addr or self._host_name, self._port)
self._url = urlunparse((prot, addr, '', '', '', ''))
self._server.register_introspection_functions()
self._register_standard_api(self._server)
def _start_web_profile_server(self):
self._web_profile_requests_queue = queue.Queue(1)
self._web_profile_requests_result = queue.Queue(1)
self._web_profile_requests_semaphore = queue.Queue(1)
if self._web_profile_dialog is not None:
# TODO: Some sort of duck-typing on the web_profile_dialog object
self._web_profile_dialog.queue_request = \
self._web_profile_requests_queue
self._web_profile_dialog.queue_result = \
self._web_profile_requests_result
try:
self._web_profile_server = WebProfileXMLRPCServer(
('localhost', self._web_port), log, logRequests=False,
allow_none=True)
self._web_port = self._web_profile_server.socket.getsockname()[1]
self._web_profile_server.register_introspection_functions()
self._register_web_profile_api(self._web_profile_server)
log.info("Hub set to run with Web Profile support enabled.")
except socket.error:
log.warning("Port {0} already in use. Impossible to run the "
"Hub with Web Profile support.".format(self._web_port),
SAMPWarning)
self._web_profile = False
# Cleanup
self._web_profile_requests_queue = None
self._web_profile_requests_result = None
self._web_profile_requests_semaphore = None
def _launch_thread(self, group=None, target=None, name=None, args=None):
# Remove inactive threads
remove = []
for t in self._launched_threads:
if not t.is_alive():
remove.append(t)
for t in remove:
self._launched_threads.remove(t)
# Start new thread
t = threading.Thread(group=group, target=target, name=name, args=args)
t.start()
# Add to list of launched threads
self._launched_threads.append(t)
def _join_launched_threads(self, timeout=None):
for t in self._launched_threads:
t.join(timeout=timeout)
def _timeout_test_hub(self):
if self._timeout == 0:
return
last = time.time()
while self._is_running:
time.sleep(0.05) # keep this small to check _is_running often
now = time.time()
if now - last > 1.:
with self._thread_lock:
if self._last_activity_time is not None:
if now - self._last_activity_time >= self._timeout:
warnings.warn("Timeout expired, Hub is shutting down!",
SAMPWarning)
self.stop()
return
last = now
def _timeout_test_client(self):
if self._client_timeout == 0:
return
last = time.time()
while self._is_running:
time.sleep(0.05) # keep this small to check _is_running often
now = time.time()
if now - last > 1.:
for private_key in self._client_activity_time.keys():
if (now - self._client_activity_time[private_key] > self._client_timeout
and private_key != self._hub_private_key):
warnings.warn(
"Client {} timeout expired!".format(private_key),
SAMPWarning)
self._notify_disconnection(private_key)
self._unregister(private_key)
last = now
def _hub_as_client_request_handler(self, method, args):
if method == 'samp.client.receiveCall':
return self._receive_call(*args)
elif method == 'samp.client.receiveNotification':
return self._receive_notification(*args)
elif method == 'samp.client.receiveResponse':
return self._receive_response(*args)
elif method == 'samp.app.ping':
return self._ping(*args)
def _setup_hub_as_client(self):
hub_metadata = {"samp.name": "Astropy SAMP Hub",
"samp.description.text": self._label,
"author.name": "The Astropy Collaboration",
"samp.documentation.url": "http://docs.astropy.org/en/stable/samp",
"samp.icon.url": self._url + "/samp/icon"}
result = self._register(self._hub_secret)
self._hub_public_id = result["samp.self-id"]
self._hub_private_key = result["samp.private-key"]
self._set_xmlrpc_callback(self._hub_private_key, self._url)
self._declare_metadata(self._hub_private_key, hub_metadata)
self._declare_subscriptions(self._hub_private_key,
{"samp.app.ping": {},
"x-samp.query.by-meta": {}})
def start(self, wait=False):
"""
Start the current SAMP Hub instance and create the lock file. Hub
start-up can be blocking or non blocking depending on the ``wait``
parameter.
Parameters
----------
wait : bool
If `True` then the Hub process is joined with the caller, blocking
the code flow. Usually `True` option is used to run a stand-alone
Hub in an executable script. If `False` (default), then the Hub
process runs in a separated thread. `False` is usually used in a
Python shell.
"""
if self._is_running:
raise SAMPHubError("Hub is already running")
if self._lockfile is not None:
raise SAMPHubError("Hub is not running but lockfile is set")
if self._web_profile:
self._start_web_profile_server()
self._start_standard_server()
self._lockfile = create_lock_file(lockfilename=self._customlockfilename,
mode=self._mode, hub_id=self.id,
hub_params=self.params)
self._update_last_activity_time()
self._setup_hub_as_client()
self._start_threads()
log.info("Hub started")
if wait and self._is_running:
self._thread_run.join()
self._thread_run = None
@property
def params(self):
"""
The hub parameters (which are written to the logfile)
"""
params = {}
# Keys required by standard profile
params['samp.secret'] = self._hub_secret
params['samp.hub.xmlrpc.url'] = self._url
params['samp.profile.version'] = __profile_version__
# Custom keys
params['hub.id'] = self.id
params['hub.label'] = self._label or "Hub {0}".format(self.id)
return params
def _start_threads(self):
self._thread_run = threading.Thread(target=self._serve_forever)
self._thread_run.daemon = True
if self._timeout > 0:
self._thread_hub_timeout = threading.Thread(
target=self._timeout_test_hub,
name="Hub timeout test")
self._thread_hub_timeout.daemon = True
else:
self._thread_hub_timeout = None
if self._client_timeout > 0:
self._thread_client_timeout = threading.Thread(
target=self._timeout_test_client,
name="Client timeout test")
self._thread_client_timeout.daemon = True
else:
self._thread_client_timeout = None
self._is_running = True
self._thread_run.start()
if self._thread_hub_timeout is not None:
self._thread_hub_timeout.start()
if self._thread_client_timeout is not None:
self._thread_client_timeout.start()
def _create_secret_code(self):
if self._hub_secret_code_customized is not None:
return self._hub_secret_code_customized
else:
return str(uuid.uuid1())
def stop(self):
"""
Stop the current SAMP Hub instance and delete the lock file.
"""
if not self._is_running:
return
log.info("Hub is stopping...")
self._notify_shutdown()
self._is_running = False
if self._lockfile and os.path.isfile(self._lockfile):
lockfiledict = read_lockfile(self._lockfile)
if lockfiledict['samp.secret'] == self._hub_secret:
os.remove(self._lockfile)
self._lockfile = None
# Reset variables
# TODO: What happens if not all threads are stopped after timeout?
self._join_all_threads(timeout=10.)
self._hub_msg_id_counter = 0
self._hub_secret = self._create_secret_code()
self._hub_public_id = ""
self._metadata = {}
self._private_keys = {}
self._mtype2ids = {}
self._id2mtypes = {}
self._xmlrpc_endpoints = {}
self._last_activity_time = None
log.info("Hub stopped.")
def _join_all_threads(self, timeout=None):
# In some cases, ``stop`` may be called from some of the sub-threads,
# so we just need to make sure that we don't try and shut down the
# calling thread.
current_thread = threading.current_thread()
if self._thread_run is not current_thread:
self._thread_run.join(timeout=timeout)
if not self._thread_run.is_alive():
self._thread_run = None
if self._thread_hub_timeout is not None and self._thread_hub_timeout is not current_thread:
self._thread_hub_timeout.join(timeout=timeout)
if not self._thread_hub_timeout.is_alive():
self._thread_hub_timeout = None
if self._thread_client_timeout is not None and self._thread_client_timeout is not current_thread:
self._thread_client_timeout.join(timeout=timeout)
if not self._thread_client_timeout.is_alive():
self._thread_client_timeout = None
self._join_launched_threads(timeout=timeout)
@property
def is_running(self):
"""Return an information concerning the Hub running status.
Returns
-------
running : bool
Is the hub running?
"""
return self._is_running
def _serve_forever(self):
while self._is_running:
try:
read_ready = select.select([self._server.socket], [], [], 0.01)[0]
except OSError as exc:
warnings.warn("Call to select() in SAMPHubServer failed: {0}".format(exc),
SAMPWarning)
else:
if read_ready:
self._server.handle_request()
if self._web_profile:
# We now check if there are any connection requests from the
# web profile, and if so, we initialize the pop-up.
if self._web_profile_dialog is None:
try:
request = self._web_profile_requests_queue.get_nowait()
except queue.Empty:
pass
else:
web_profile_text_dialog(request, self._web_profile_requests_result)
# We now check for requests over the web profile socket, and we
# also update the pop-up in case there are any changes.
try:
read_ready = select.select([self._web_profile_server.socket], [], [], 0.01)[0]
except OSError as exc:
warnings.warn("Call to select() in SAMPHubServer failed: {0}".format(exc),
SAMPWarning)
else:
if read_ready:
self._web_profile_server.handle_request()
self._server.server_close()
if self._web_profile_server is not None:
self._web_profile_server.server_close()
def _notify_shutdown(self):
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.shutdown")
for mtype in msubs:
if mtype in self._mtype2ids:
for key in self._mtype2ids[mtype]:
self._notify_(self._hub_private_key,
self._private_keys[key][0],
{"samp.mtype": "samp.hub.event.shutdown",
"samp.params": {}})
def _notify_register(self, private_key):
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.register")
for mtype in msubs:
if mtype in self._mtype2ids:
public_id = self._private_keys[private_key][0]
for key in self._mtype2ids[mtype]:
# if key != private_key:
self._notify(self._hub_private_key,
self._private_keys[key][0],
{"samp.mtype": "samp.hub.event.register",
"samp.params": {"id": public_id}})
def _notify_unregister(self, private_key):
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.unregister")
for mtype in msubs:
if mtype in self._mtype2ids:
public_id = self._private_keys[private_key][0]
for key in self._mtype2ids[mtype]:
if key != private_key:
self._notify(self._hub_private_key,
self._private_keys[key][0],
{"samp.mtype": "samp.hub.event.unregister",
"samp.params": {"id": public_id}})
def _notify_metadata(self, private_key):
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.metadata")
for mtype in msubs:
if mtype in self._mtype2ids:
public_id = self._private_keys[private_key][0]
for key in self._mtype2ids[mtype]:
# if key != private_key:
self._notify(self._hub_private_key,
self._private_keys[key][0],
{"samp.mtype": "samp.hub.event.metadata",
"samp.params": {"id": public_id,
"metadata": self._metadata[private_key]}
})
def _notify_subscriptions(self, private_key):
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.subscriptions")
for mtype in msubs:
if mtype in self._mtype2ids:
public_id = self._private_keys[private_key][0]
for key in self._mtype2ids[mtype]:
self._notify(self._hub_private_key,
self._private_keys[key][0],
{"samp.mtype": "samp.hub.event.subscriptions",
"samp.params": {"id": public_id,
"subscriptions": self._id2mtypes[private_key]}
})
def _notify_disconnection(self, private_key):
def _xmlrpc_call_disconnect(endpoint, private_key, hub_public_id, message):
endpoint.samp.client.receiveNotification(private_key, hub_public_id, message)
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.disconnect")
public_id = self._private_keys[private_key][0]
endpoint = self._xmlrpc_endpoints[public_id][1]
for mtype in msubs:
if mtype in self._mtype2ids and private_key in self._mtype2ids[mtype]:
log.debug("notify disconnection to {}".format(public_id))
self._launch_thread(target=_xmlrpc_call_disconnect,
args=(endpoint, private_key,
self._hub_public_id,
{"samp.mtype": "samp.hub.disconnect",
"samp.params": {"reason": "Timeout expired!"}}))
def _ping(self):
self._update_last_activity_time()
log.debug("ping")
return "1"
def _query_by_metadata(self, key, value):
public_id_list = []
for private_id in self._metadata:
if key in self._metadata[private_id]:
if self._metadata[private_id][key] == value:
public_id_list.append(self._private_keys[private_id][0])
return public_id_list
def _set_xmlrpc_callback(self, private_key, xmlrpc_addr):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
if private_key == self._hub_private_key:
public_id = self._private_keys[private_key][0]
self._xmlrpc_endpoints[public_id] = \
(xmlrpc_addr, _HubAsClient(self._hub_as_client_request_handler))
return ""
# Dictionary stored with the public id
log.debug("set_xmlrpc_callback: {} {}".format(private_key,
xmlrpc_addr))
server_proxy_pool = None
server_proxy_pool = ServerProxyPool(self._pool_size,
xmlrpc.ServerProxy,
xmlrpc_addr, allow_none=1)
public_id = self._private_keys[private_key][0]
self._xmlrpc_endpoints[public_id] = (xmlrpc_addr,
server_proxy_pool)
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
return ""
def _perform_standard_register(self):
with self._thread_lock:
private_key, public_id = self._get_new_ids()
self._private_keys[private_key] = (public_id, time.time())
self._update_last_activity_time(private_key)
self._notify_register(private_key)
log.debug("register: private-key = {} and self-id = {}"
.format(private_key, public_id))
return {"samp.self-id": public_id,
"samp.private-key": private_key,
"samp.hub-id": self._hub_public_id}
def _register(self, secret):
self._update_last_activity_time()
if secret == self._hub_secret:
return self._perform_standard_register()
else:
# return {"samp.self-id": "", "samp.private-key": "", "samp.hub-id": ""}
raise SAMPProxyError(7, "Bad secret code")
def _get_new_ids(self):
private_key = str(uuid.uuid1())
self._client_id_counter += 1
public_id = 'cli#hub'
if self._client_id_counter > 0:
public_id = "cli#{}".format(self._client_id_counter)
return private_key, public_id
def _unregister(self, private_key):
self._update_last_activity_time()
public_key = ""
self._notify_unregister(private_key)
with self._thread_lock:
if private_key in self._private_keys:
public_key = self._private_keys[private_key][0]
del self._private_keys[private_key]
else:
return ""
if private_key in self._metadata:
del self._metadata[private_key]
if private_key in self._id2mtypes:
del self._id2mtypes[private_key]
for mtype in self._mtype2ids.keys():
if private_key in self._mtype2ids[mtype]:
self._mtype2ids[mtype].remove(private_key)
if public_key in self._xmlrpc_endpoints:
del self._xmlrpc_endpoints[public_key]
if private_key in self._client_activity_time:
del self._client_activity_time[private_key]
if self._web_profile:
if private_key in self._web_profile_callbacks:
del self._web_profile_callbacks[private_key]
self._web_profile_server.remove_client(private_key)
log.debug("unregister {} ({})".format(public_key, private_key))
return ""
def _declare_metadata(self, private_key, metadata):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
log.debug("declare_metadata: private-key = {} metadata = {}"
.format(private_key, str(metadata)))
self._metadata[private_key] = metadata
self._notify_metadata(private_key)
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
return ""
def _get_metadata(self, private_key, client_id):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
client_private_key = self._public_id_to_private_key(client_id)
log.debug("get_metadata: private-key = {} client-id = {}"
.format(private_key, client_id))
if client_private_key is not None:
if client_private_key in self._metadata:
log.debug("--> metadata = {}"
.format(self._metadata[client_private_key]))
return self._metadata[client_private_key]
else:
return {}
else:
raise SAMPProxyError(6, "Invalid client ID")
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
def _declare_subscriptions(self, private_key, mtypes):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
log.debug("declare_subscriptions: private-key = {} mtypes = {}"
.format(private_key, str(mtypes)))
# remove subscription to previous mtypes
if private_key in self._id2mtypes:
prev_mtypes = self._id2mtypes[private_key]
for mtype in prev_mtypes:
try:
self._mtype2ids[mtype].remove(private_key)
except ValueError: # private_key is not in list
pass
self._id2mtypes[private_key] = copy.deepcopy(mtypes)
# remove duplicated MType for wildcard overwriting
original_mtypes = copy.deepcopy(mtypes)
for mtype in original_mtypes:
if mtype.endswith("*"):
for mtype2 in original_mtypes:
if mtype2.startswith(mtype[:-1]) and \
mtype2 != mtype:
if mtype2 in mtypes:
del(mtypes[mtype2])
log.debug("declare_subscriptions: subscriptions accepted from "
"{} => {}".format(private_key, str(mtypes)))
for mtype in mtypes:
if mtype in self._mtype2ids:
if private_key not in self._mtype2ids[mtype]:
self._mtype2ids[mtype].append(private_key)
else:
self._mtype2ids[mtype] = [private_key]
self._notify_subscriptions(private_key)
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
return ""
def _get_subscriptions(self, private_key, client_id):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
client_private_key = self._public_id_to_private_key(client_id)
if client_private_key is not None:
if client_private_key in self._id2mtypes:
log.debug("get_subscriptions: client-id = {} mtypes = {}"
.format(client_id,
str(self._id2mtypes[client_private_key])))
return self._id2mtypes[client_private_key]
else:
log.debug("get_subscriptions: client-id = {} mtypes = "
"missing".format(client_id))
return {}
else:
raise SAMPProxyError(6, "Invalid client ID")
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
def _get_registered_clients(self, private_key):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
reg_clients = []
for pkey in self._private_keys.keys():
if pkey != private_key:
reg_clients.append(self._private_keys[pkey][0])
log.debug("get_registered_clients: private_key = {} clients = {}"
.format(private_key, reg_clients))
return reg_clients
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
def _get_subscribed_clients(self, private_key, mtype):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
sub_clients = {}
for pkey in self._private_keys.keys():
if pkey != private_key and self._is_subscribed(pkey, mtype):
sub_clients[self._private_keys[pkey][0]] = {}
log.debug("get_subscribed_clients: private_key = {} mtype = {} "
"clients = {}".format(private_key, mtype, sub_clients))
return sub_clients
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
@staticmethod
def get_mtype_subtypes(mtype):
"""
Return a list containing all the possible wildcarded subtypes of MType.
Parameters
----------
mtype : str
MType to be parsed.
Returns
-------
types : list
List of subtypes
Examples
--------
>>> from astropy.samp import SAMPHubServer
>>> SAMPHubServer.get_mtype_subtypes("samp.app.ping")
['samp.app.ping', 'samp.app.*', 'samp.*', '*']
"""
subtypes = []
msubs = mtype.split(".")
indexes = list(range(len(msubs)))
indexes.reverse()
indexes.append(-1)
for i in indexes:
tmp_mtype = ".".join(msubs[:i + 1])
if tmp_mtype != mtype:
if tmp_mtype != "":
tmp_mtype = tmp_mtype + ".*"
else:
tmp_mtype = "*"
subtypes.append(tmp_mtype)
return subtypes
def _is_subscribed(self, private_key, mtype):
subscribed = False
msubs = SAMPHubServer.get_mtype_subtypes(mtype)
for msub in msubs:
if msub in self._mtype2ids:
if private_key in self._mtype2ids[msub]:
subscribed = True
return subscribed
def _notify(self, private_key, recipient_id, message):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
if self._is_subscribed(self._public_id_to_private_key(recipient_id),
message["samp.mtype"]) is False:
raise SAMPProxyError(2, "Client {} not subscribed to MType {}"
.format(recipient_id, message["samp.mtype"]))
self._launch_thread(target=self._notify_, args=(private_key,
recipient_id,
message))
return {}
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
def _notify_(self, sender_private_key, recipient_public_id, message):
if sender_private_key not in self._private_keys:
return
sender_public_id = self._private_keys[sender_private_key][0]
try:
log.debug("notify {} from {} to {}".format(
message["samp.mtype"], sender_public_id,
recipient_public_id))
recipient_private_key = self._public_id_to_private_key(recipient_public_id)
arg_params = (sender_public_id, message)
samp_method_name = "receiveNotification"
self._retry_method(recipient_private_key, recipient_public_id, samp_method_name, arg_params)
except Exception as exc:
warnings.warn("{} notification from client {} to client {} "
"failed [{}]".format(message["samp.mtype"],
sender_public_id,
recipient_public_id, exc),
SAMPWarning)
def _notify_all(self, private_key, message):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
if "samp.mtype" not in message:
raise SAMPProxyError(3, "samp.mtype keyword is missing")
recipient_ids = self._notify_all_(private_key, message)
return recipient_ids
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
def _notify_all_(self, sender_private_key, message):
recipient_ids = []
msubs = SAMPHubServer.get_mtype_subtypes(message["samp.mtype"])
for mtype in msubs:
if mtype in self._mtype2ids:
for key in self._mtype2ids[mtype]:
if key != sender_private_key:
_recipient_id = self._private_keys[key][0]
recipient_ids.append(_recipient_id)
self._launch_thread(target=self._notify,
args=(sender_private_key,
_recipient_id, message)
)
return recipient_ids
def _call(self, private_key, recipient_id, msg_tag, message):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
if self._is_subscribed(self._public_id_to_private_key(recipient_id),
message["samp.mtype"]) is False:
raise SAMPProxyError(2, "Client {} not subscribed to MType {}"
.format(recipient_id, message["samp.mtype"]))
public_id = self._private_keys[private_key][0]
msg_id = self._get_new_hub_msg_id(public_id, msg_tag)
self._launch_thread(target=self._call_, args=(private_key, public_id,
recipient_id, msg_id,
message))
return msg_id
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
def _call_(self, sender_private_key, sender_public_id,
recipient_public_id, msg_id, message):
if sender_private_key not in self._private_keys:
return
try:
log.debug("call {} from {} to {} ({})".format(
msg_id.split(";;")[0], sender_public_id,
recipient_public_id, message["samp.mtype"]))
recipient_private_key = self._public_id_to_private_key(recipient_public_id)
arg_params = (sender_public_id, msg_id, message)
samp_methodName = "receiveCall"
self._retry_method(recipient_private_key, recipient_public_id, samp_methodName, arg_params)
except Exception as exc:
warnings.warn("{} call {} from client {} to client {} failed "
"[{},{}]".format(message["samp.mtype"],
msg_id.split(";;")[0],
sender_public_id,
recipient_public_id, type(exc), exc),
SAMPWarning)
def _call_all(self, private_key, msg_tag, message):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
if "samp.mtype" not in message:
raise SAMPProxyError(3, "samp.mtype keyword is missing in "
"message tagged as {}".format(msg_tag))
public_id = self._private_keys[private_key][0]
msg_id = self._call_all_(private_key, public_id, msg_tag, message)
return msg_id
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
def _call_all_(self, sender_private_key, sender_public_id, msg_tag,
message):
msg_id = {}
msubs = SAMPHubServer.get_mtype_subtypes(message["samp.mtype"])
for mtype in msubs:
if mtype in self._mtype2ids:
for key in self._mtype2ids[mtype]:
if key != sender_private_key:
_msg_id = self._get_new_hub_msg_id(sender_public_id,
msg_tag)
receiver_public_id = self._private_keys[key][0]
msg_id[receiver_public_id] = _msg_id
self._launch_thread(target=self._call_,
args=(sender_private_key,
sender_public_id,
receiver_public_id, _msg_id,
message))
return msg_id
def _call_and_wait(self, private_key, recipient_id, message, timeout):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
timeout = int(timeout)
now = time.time()
response = {}
msg_id = self._call(private_key, recipient_id, "samp::sync::call",
message)
self._sync_msg_ids_heap[msg_id] = None
while self._is_running:
if 0 < timeout <= time.time() - now:
del(self._sync_msg_ids_heap[msg_id])
raise SAMPProxyError(1, "Timeout expired!")
if self._sync_msg_ids_heap[msg_id] is not None:
response = copy.deepcopy(self._sync_msg_ids_heap[msg_id])
del(self._sync_msg_ids_heap[msg_id])
break
time.sleep(0.01)
return response
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
def _reply(self, private_key, msg_id, response):
"""
The main method that gets called for replying. This starts up an
asynchronous reply thread and returns.
"""
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
self._launch_thread(target=self._reply_, args=(private_key, msg_id,
response))
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
return {}
def _reply_(self, responder_private_key, msg_id, response):
if responder_private_key not in self._private_keys or not msg_id:
return
responder_public_id = self._private_keys[responder_private_key][0]
counter, hub_public_id, recipient_public_id, recipient_msg_tag = msg_id.split(";;", 3)
try:
log.debug("reply {} from {} to {}".format(
counter, responder_public_id, recipient_public_id))
if recipient_msg_tag == "samp::sync::call":
if msg_id in self._sync_msg_ids_heap.keys():
self._sync_msg_ids_heap[msg_id] = response
else:
recipient_private_key = self._public_id_to_private_key(recipient_public_id)
arg_params = (responder_public_id, recipient_msg_tag, response)
samp_method_name = "receiveResponse"
self._retry_method(recipient_private_key, recipient_public_id, samp_method_name, arg_params)
except Exception as exc:
warnings.warn("{} reply from client {} to client {} failed [{}]"
.format(recipient_msg_tag, responder_public_id,
recipient_public_id, exc),
SAMPWarning)
def _retry_method(self, recipient_private_key, recipient_public_id, samp_method_name, arg_params):
"""
This method is used to retry a SAMP call several times.
Parameters
----------
recipient_private_key
The private key of the receiver of the call
recipient_public_key
The public key of the receiver of the call
samp_method_name : str
The name of the SAMP method to call
arg_params : tuple
Any additional arguments to be passed to the SAMP method
"""
if recipient_private_key is None:
raise SAMPHubError("Invalid client ID")
from . import conf
for attempt in range(conf.n_retries):
if not self._is_running:
time.sleep(0.01)
continue
try:
if (self._web_profile and
recipient_private_key in self._web_profile_callbacks):
# Web Profile
callback = {"samp.methodName": samp_method_name,
"samp.params": arg_params}
self._web_profile_callbacks[recipient_private_key].put(callback)
else:
# Standard Profile
hub = self._xmlrpc_endpoints[recipient_public_id][1]
getattr(hub.samp.client, samp_method_name)(recipient_private_key, *arg_params)
except xmlrpc.Fault as exc:
log.debug("{} XML-RPC endpoint error (attempt {}): {}"
.format(recipient_public_id, attempt + 1,
exc.faultString))
time.sleep(0.01)
else:
return
# If we are here, then the above attempts failed
error_message = samp_method_name + " failed after " + conf.n_retries + " attempts"
raise SAMPHubError(error_message)
def _public_id_to_private_key(self, public_id):
for private_key in self._private_keys.keys():
if self._private_keys[private_key][0] == public_id:
return private_key
return None
def _get_new_hub_msg_id(self, sender_public_id, sender_msg_id):
with self._thread_lock:
self._hub_msg_id_counter += 1
return "msg#{};;{};;{};;{}".format(self._hub_msg_id_counter,
self._hub_public_id,
sender_public_id, sender_msg_id)
def _update_last_activity_time(self, private_key=None):
with self._thread_lock:
self._last_activity_time = time.time()
if private_key is not None:
self._client_activity_time[private_key] = time.time()
def _receive_notification(self, private_key, sender_id, message):
return ""
def _receive_call(self, private_key, sender_id, msg_id, message):
if private_key == self._hub_private_key:
if "samp.mtype" in message and message["samp.mtype"] == "samp.app.ping":
self._reply(self._hub_private_key, msg_id,
{"samp.status": SAMP_STATUS_OK, "samp.result": {}})
elif ("samp.mtype" in message and
(message["samp.mtype"] == "x-samp.query.by-meta" or
message["samp.mtype"] == "samp.query.by-meta")):
ids_list = self._query_by_metadata(message["samp.params"]["key"],
message["samp.params"]["value"])
self._reply(self._hub_private_key, msg_id,
{"samp.status": SAMP_STATUS_OK,
"samp.result": {"ids": ids_list}})
return ""
else:
return ""
def _receive_response(self, private_key, responder_id, msg_tag, response):
return ""
def _web_profile_register(self, identity_info,
client_address=("unknown", 0),
origin="unknown"):
self._update_last_activity_time()
if not client_address[0] in ["localhost", "127.0.0.1"]:
raise SAMPProxyError(403, "Request of registration rejected "
"by the Hub.")
if not origin:
origin = "unknown"
if isinstance(identity_info, dict):
# an old version of the protocol provided just a string with the app name
if "samp.name" not in identity_info:
raise SAMPProxyError(403, "Request of registration rejected "
"by the Hub (application name not "
"provided).")
# Red semaphore for the other threads
self._web_profile_requests_semaphore.put("wait")
# Set the request to be displayed for the current thread
self._web_profile_requests_queue.put((identity_info, client_address,
origin))
# Get the popup dialogue response
response = self._web_profile_requests_result.get()
# OK, semaphore green
self._web_profile_requests_semaphore.get()
if response:
register_map = self._perform_standard_register()
translator_url = ("http://localhost:{}/translator/{}?ref="
.format(self._web_port, register_map["samp.private-key"]))
register_map["samp.url-translator"] = translator_url
self._web_profile_server.add_client(register_map["samp.private-key"])
return register_map
else:
raise SAMPProxyError(403, "Request of registration rejected by "
"the user.")
def _web_profile_allowReverseCallbacks(self, private_key, allow):
self._update_last_activity_time()
if private_key in self._private_keys:
if allow == "0":
if private_key in self._web_profile_callbacks:
del self._web_profile_callbacks[private_key]
else:
self._web_profile_callbacks[private_key] = queue.Queue()
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
return ""
def _web_profile_pullCallbacks(self, private_key, timeout_secs):
self._update_last_activity_time()
if private_key in self._private_keys:
callback = []
callback_queue = self._web_profile_callbacks[private_key]
try:
while self._is_running:
item_queued = callback_queue.get_nowait()
callback.append(item_queued)
except queue.Empty:
pass
return callback
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
class WebProfileDialog:
"""
A base class to make writing Web Profile GUI consent dialogs
easier.
The concrete class must:
1) Poll ``handle_queue`` periodically, using the timer services
of the GUI's event loop. This function will call
``self.show_dialog`` when a request requires authorization.
``self.show_dialog`` will be given the arguments:
- ``samp_name``: The name of the application making the request.
- ``details``: A dictionary of details about the client
making the request.
- ``client``: A hostname, port pair containing the client
address.
- ``origin``: A string containing the origin of the
request.
2) Call ``consent`` or ``reject`` based on the user's response to
the dialog.
"""
def handle_queue(self):
try:
request = self.queue_request.get_nowait()
except queue.Empty: # queue is set but empty
pass
except AttributeError: # queue has not been set yet
pass
else:
if isinstance(request[0], str): # To support the old protocol version
samp_name = request[0]
else:
samp_name = request[0]["samp.name"]
self.show_dialog(samp_name, request[0], request[1], request[2])
def consent(self):
self.queue_result.put(True)
def reject(self):
self.queue_result.put(False)
|
cb25e1d28da439ae5bbbe7dd62acb844be99948a900f3598d4f8f9825039038a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# TODO: this file should be refactored to use a more thread-safe and
# race-condition-safe lockfile mechanism.
import datetime
import os
import socket
import stat
import warnings
from contextlib import suppress
from urllib.parse import urlparse
import xmlrpc.client as xmlrpc
from astropy.config.paths import _find_home
from astropy import log
from astropy.utils.data import get_readable_fileobj
from .errors import SAMPHubError, SAMPWarning
def read_lockfile(lockfilename):
"""
Read in the lockfile given by ``lockfilename`` into a dictionary.
"""
# lockfilename may be a local file or a remote URL, but
# get_readable_fileobj takes care of this.
lockfiledict = {}
with get_readable_fileobj(lockfilename) as f:
for line in f:
if not line.startswith("#"):
kw, val = line.split("=")
lockfiledict[kw.strip()] = val.strip()
return lockfiledict
def write_lockfile(lockfilename, lockfiledict):
lockfile = open(lockfilename, "w")
lockfile.close()
os.chmod(lockfilename, stat.S_IREAD + stat.S_IWRITE)
lockfile = open(lockfilename, "w")
now_iso = datetime.datetime.now().isoformat()
lockfile.write("# SAMP lockfile written on {}\n".format(now_iso))
lockfile.write("# Standard Profile required keys\n")
for key, value in lockfiledict.items():
lockfile.write("{0}={1}\n".format(key, value))
lockfile.close()
def create_lock_file(lockfilename=None, mode=None, hub_id=None,
hub_params=None):
# Remove lock-files of dead hubs
remove_garbage_lock_files()
lockfiledir = ""
# CHECK FOR SAMP_HUB ENVIRONMENT VARIABLE
if "SAMP_HUB" in os.environ:
# For the time being I assume just the std profile supported.
if os.environ["SAMP_HUB"].startswith("std-lockurl:"):
lockfilename = os.environ["SAMP_HUB"][len("std-lockurl:"):]
lockfile_parsed = urlparse(lockfilename)
if lockfile_parsed[0] != 'file':
warnings.warn("Unable to start a Hub with lockfile {}. "
"Start-up process aborted.".format(lockfilename),
SAMPWarning)
return False
else:
lockfilename = lockfile_parsed[2]
else:
# If it is a fresh Hub instance
if lockfilename is None:
log.debug("Running mode: " + mode)
if mode == 'single':
lockfilename = os.path.join(_find_home(), ".samp")
else:
lockfiledir = os.path.join(_find_home(), ".samp-1")
# If missing create .samp-1 directory
try:
os.mkdir(lockfiledir)
except OSError:
pass # directory already exists
finally:
os.chmod(lockfiledir,
stat.S_IREAD + stat.S_IWRITE + stat.S_IEXEC)
lockfilename = os.path.join(lockfiledir,
"samp-hub-{}".format(hub_id))
else:
log.debug("Running mode: multiple")
hub_is_running, lockfiledict = check_running_hub(lockfilename)
if hub_is_running:
warnings.warn("Another SAMP Hub is already running. Start-up process "
"aborted.", SAMPWarning)
return False
log.debug("Lock-file: " + lockfilename)
write_lockfile(lockfilename, hub_params)
return lockfilename
def get_main_running_hub():
"""
Get either the hub given by the environment variable SAMP_HUB, or the one
given by the lockfile .samp in the user home directory.
"""
hubs = get_running_hubs()
if not hubs:
raise SAMPHubError("Unable to find a running SAMP Hub.")
# CHECK FOR SAMP_HUB ENVIRONMENT VARIABLE
if "SAMP_HUB" in os.environ:
# For the time being I assume just the std profile supported.
if os.environ["SAMP_HUB"].startswith("std-lockurl:"):
lockfilename = os.environ["SAMP_HUB"][len("std-lockurl:"):]
else:
raise SAMPHubError("SAMP Hub profile not supported.")
else:
lockfilename = os.path.join(_find_home(), ".samp")
return hubs[lockfilename]
def get_running_hubs():
"""
Return a dictionary containing the lock-file contents of all the currently
running hubs (single and/or multiple mode).
The dictionary format is:
``{<lock-file>: {<token-name>: <token-string>, ...}, ...}``
where ``{<lock-file>}`` is the lock-file name, ``{<token-name>}`` and
``{<token-string>}`` are the lock-file tokens (name and content).
Returns
-------
running_hubs : dict
Lock-file contents of all the currently running hubs.
"""
hubs = {}
lockfilename = ""
# HUB SINGLE INSTANCE MODE
# CHECK FOR SAMP_HUB ENVIRONMENT VARIABLE
if "SAMP_HUB" in os.environ:
# For the time being I assume just the std profile supported.
if os.environ["SAMP_HUB"].startswith("std-lockurl:"):
lockfilename = os.environ["SAMP_HUB"][len("std-lockurl:"):]
else:
lockfilename = os.path.join(_find_home(), ".samp")
hub_is_running, lockfiledict = check_running_hub(lockfilename)
if hub_is_running:
hubs[lockfilename] = lockfiledict
# HUB MULTIPLE INSTANCE MODE
lockfiledir = ""
lockfiledir = os.path.join(_find_home(), ".samp-1")
if os.path.isdir(lockfiledir):
for filename in os.listdir(lockfiledir):
if filename.startswith('samp-hub'):
lockfilename = os.path.join(lockfiledir, filename)
hub_is_running, lockfiledict = check_running_hub(lockfilename)
if hub_is_running:
hubs[lockfilename] = lockfiledict
return hubs
def check_running_hub(lockfilename):
"""
Test whether a hub identified by ``lockfilename`` is running or not.
Parameters
----------
lockfilename : str
Lock-file name (path + file name) of the Hub to be tested.
Returns
-------
is_running : bool
Whether the hub is running
hub_params : dict
If the hub is running this contains the parameters from the lockfile
"""
is_running = False
lockfiledict = {}
# Check whether a lockfile already exists
try:
lockfiledict = read_lockfile(lockfilename)
except OSError:
return is_running, lockfiledict
if "samp.hub.xmlrpc.url" in lockfiledict:
try:
proxy = xmlrpc.ServerProxy(lockfiledict["samp.hub.xmlrpc.url"]
.replace("\\", ""), allow_none=1)
proxy.samp.hub.ping()
is_running = True
except xmlrpc.ProtocolError:
# There is a protocol error (e.g. for authentication required),
# but the server is alive
is_running = True
except socket.error:
pass
return is_running, lockfiledict
def remove_garbage_lock_files():
lockfilename = ""
# HUB SINGLE INSTANCE MODE
lockfilename = os.path.join(_find_home(), ".samp")
hub_is_running, lockfiledict = check_running_hub(lockfilename)
if not hub_is_running:
# If lockfilename belongs to a dead hub, then it is deleted
if os.path.isfile(lockfilename):
with suppress(OSError):
os.remove(lockfilename)
# HUB MULTIPLE INSTANCE MODE
lockfiledir = os.path.join(_find_home(), ".samp-1")
if os.path.isdir(lockfiledir):
for filename in os.listdir(lockfiledir):
if filename.startswith('samp-hub'):
lockfilename = os.path.join(lockfiledir, filename)
hub_is_running, lockfiledict = check_running_hub(lockfilename)
if not hub_is_running:
# If lockfilename belongs to a dead hub, then it is deleted
if os.path.isfile(lockfilename):
with suppress(OSError):
os.remove(lockfilename)
|
48256f74bd49fde16fd9a98a466d3d9a47d13ce75be8de52a44849049a9b67c6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import time
import sys
import argparse
from astropy import log, __version__
from .hub import SAMPHubServer
__all__ = ['hub_script']
def hub_script(timeout=0):
"""
This main function is executed by the ``samp_hub`` command line tool.
"""
parser = argparse.ArgumentParser(prog="samp_hub " + __version__)
parser.add_argument("-k", "--secret", dest="secret", metavar="CODE",
help="custom secret code.")
parser.add_argument("-d", "--addr", dest="addr", metavar="ADDR",
help="listening address (or IP).")
parser.add_argument("-p", "--port", dest="port", metavar="PORT", type=int,
help="listening port number.")
parser.add_argument("-f", "--lockfile", dest="lockfile", metavar="FILE",
help="custom lockfile.")
parser.add_argument("-w", "--no-web-profile", dest="web_profile", action="store_false",
help="run the Hub disabling the Web Profile.", default=True)
parser.add_argument("-P", "--pool-size", dest="pool_size", metavar="SIZE", type=int,
help="the socket connections pool size.", default=20)
timeout_group = parser.add_argument_group("Timeout group",
"Special options to setup hub and client timeouts."
"It contains a set of special options that allows to set up the Hub and "
"clients inactivity timeouts, that is the Hub or client inactivity time "
"interval after which the Hub shuts down or unregisters the client. "
"Notification of samp.hub.disconnect MType is sent to the clients "
"forcibly unregistered for timeout expiration.")
timeout_group.add_argument("-t", "--timeout", dest="timeout", metavar="SECONDS",
help="set the Hub inactivity timeout in SECONDS. By default it "
"is set to 0, that is the Hub never expires.", type=int, default=0)
timeout_group.add_argument("-c", "--client-timeout", dest="client_timeout", metavar="SECONDS",
help="set the client inactivity timeout in SECONDS. By default it "
"is set to 0, that is the client never expires.", type=int, default=0)
parser.add_argument_group(timeout_group)
log_group = parser.add_argument_group("Logging options",
"Additional options which allow to customize the logging output. By "
"default the SAMP Hub uses the standard output and standard error "
"devices to print out INFO level logging messages. Using the options "
"here below it is possible to modify the logging level and also "
"specify the output files where redirect the logging messages.")
log_group.add_argument("-L", "--log-level", dest="loglevel", metavar="LEVEL",
help="set the Hub instance log level (OFF, ERROR, WARNING, INFO, DEBUG).",
type=str, choices=["OFF", "ERROR", "WARNING", "INFO", "DEBUG"], default='INFO')
log_group.add_argument("-O", "--log-output", dest="logout", metavar="FILE",
help="set the output file for the log messages.", default="")
parser.add_argument_group(log_group)
adv_group = parser.add_argument_group("Advanced group",
"Advanced options addressed to facilitate administrative tasks and "
"allow new non-standard Hub behaviors. In particular the --label "
"options is used to assign a value to hub.label token and is used to "
"assign a name to the Hub instance. "
"The very special --multi option allows to start a Hub in multi-instance mode. "
"Multi-instance mode is a non-standard Hub behavior that enables "
"multiple contemporaneous running Hubs. Multi-instance hubs place "
"their non-standard lock-files within the <home directory>/.samp-1 "
"directory naming them making use of the format: "
"samp-hub-<PID>-<ID>, where PID is the Hub process ID while ID is an "
"internal ID (integer).")
adv_group.add_argument("-l", "--label", dest="label", metavar="LABEL",
help="assign a LABEL to the Hub.", default="")
adv_group.add_argument("-m", "--multi", dest="mode",
help="run the Hub in multi-instance mode generating a custom "
"lockfile with a random name.",
action="store_const", const='multiple', default='single')
parser.add_argument_group(adv_group)
options = parser.parse_args()
try:
if options.loglevel in ("OFF", "ERROR", "WARNING", "DEBUG", "INFO"):
log.setLevel(options.loglevel)
if options.logout != "":
context = log.log_to_file(options.logout)
else:
class dummy_context:
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
pass
context = dummy_context()
with context:
args = copy.deepcopy(options.__dict__)
del(args["loglevel"])
del(args["logout"])
hub = SAMPHubServer(**args)
hub.start(False)
if not timeout:
while hub.is_running:
time.sleep(0.01)
else:
time.sleep(timeout)
hub.stop()
except KeyboardInterrupt:
try:
hub.stop()
except NameError:
pass
except OSError as e:
print("[SAMP] Error: I/O error({0}): {1}".format(e.errno, e.strerror))
sys.exit(1)
except SystemExit:
pass
|
92ceaa5c1e62cdabe82ec0c125901076465055f1f4a7a6ed97b228797a101780 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from urllib.parse import parse_qs
from urllib.request import urlopen
from astropy.utils.data import get_pkg_data_contents
from .standard_profile import (SAMPSimpleXMLRPCRequestHandler,
ThreadingXMLRPCServer)
__all__ = []
CROSS_DOMAIN = get_pkg_data_contents('data/crossdomain.xml')
CLIENT_ACCESS_POLICY = get_pkg_data_contents('data/clientaccesspolicy.xml')
class WebProfileRequestHandler(SAMPSimpleXMLRPCRequestHandler):
"""
Handler of XMLRPC requests performed through the Web Profile.
"""
def _send_CORS_header(self):
if self.headers.get('Origin') is not None:
method = self.headers.get('Access-Control-Request-Method')
if method and self.command == "OPTIONS":
# Preflight method
self.send_header('Content-Length', '0')
self.send_header('Access-Control-Allow-Origin',
self.headers.get('Origin'))
self.send_header('Access-Control-Allow-Methods', method)
self.send_header('Access-Control-Allow-Headers', 'Content-Type')
self.send_header('Access-Control-Allow-Credentials', 'true')
else:
# Simple method
self.send_header('Access-Control-Allow-Origin',
self.headers.get('Origin'))
self.send_header('Access-Control-Allow-Headers', 'Content-Type')
self.send_header('Access-Control-Allow-Credentials', 'true')
def end_headers(self):
self._send_CORS_header()
SAMPSimpleXMLRPCRequestHandler.end_headers(self)
def _serve_cross_domain_xml(self):
cross_domain = False
if self.path == "/crossdomain.xml":
# Adobe standard
response = CROSS_DOMAIN
self.send_response(200, 'OK')
self.send_header('Content-Type', 'text/x-cross-domain-policy')
self.send_header("Content-Length", "{0}".format(len(response)))
self.end_headers()
self.wfile.write(response.encode('utf-8'))
self.wfile.flush()
cross_domain = True
elif self.path == "/clientaccesspolicy.xml":
# Microsoft standard
response = CLIENT_ACCESS_POLICY
self.send_response(200, 'OK')
self.send_header('Content-Type', 'text/xml')
self.send_header("Content-Length", "{0}".format(len(response)))
self.end_headers()
self.wfile.write(response.encode('utf-8'))
self.wfile.flush()
cross_domain = True
return cross_domain
def do_POST(self):
if self._serve_cross_domain_xml():
return
return SAMPSimpleXMLRPCRequestHandler.do_POST(self)
def do_HEAD(self):
if not self.is_http_path_valid():
self.report_404()
return
if self._serve_cross_domain_xml():
return
def do_OPTIONS(self):
self.send_response(200, 'OK')
self.end_headers()
def do_GET(self):
if not self.is_http_path_valid():
self.report_404()
return
split_path = self.path.split('?')
if split_path[0] in ['/translator/{}'.format(clid) for clid in self.server.clients]:
# Request of a file proxying
urlpath = parse_qs(split_path[1])
try:
proxyfile = urlopen(urlpath["ref"][0])
self.send_response(200, 'OK')
self.end_headers()
self.wfile.write(proxyfile.read())
proxyfile.close()
except OSError:
self.report_404()
return
if self._serve_cross_domain_xml():
return
def is_http_path_valid(self):
valid_paths = (["/clientaccesspolicy.xml", "/crossdomain.xml"] +
['/translator/{}'.format(clid) for clid in self.server.clients])
return self.path.split('?')[0] in valid_paths
class WebProfileXMLRPCServer(ThreadingXMLRPCServer):
"""
XMLRPC server supporting the SAMP Web Profile.
"""
def __init__(self, addr, log=None, requestHandler=WebProfileRequestHandler,
logRequests=True, allow_none=True, encoding=None):
self.clients = []
ThreadingXMLRPCServer.__init__(self, addr, log, requestHandler,
logRequests, allow_none, encoding)
def add_client(self, client_id):
self.clients.append(client_id)
def remove_client(self, client_id):
try:
self.clients.remove(client_id)
except ValueError:
# No warning here because this method gets called for all clients,
# not just web clients, and we expect it to fail for non-web
# clients.
pass
def web_profile_text_dialog(request, queue):
samp_name = "unknown"
if isinstance(request[0], str):
# To support the old protocol version
samp_name = request[0]
else:
samp_name = request[0]["samp.name"]
text = \
"""A Web application which declares to be
Name: {}
Origin: {}
is requesting to be registered with the SAMP Hub.
Pay attention that if you permit its registration, such
application will acquire all current user privileges, like
file read/write.
Do you give your consent? [yes|no]""".format(samp_name, request[2])
print(text)
answer = input(">>> ")
queue.put(answer.lower() in ["yes", "y"])
|
109af8dfe5650f8c47e9ca5175875a6b398580a1aa47ad3ee1bc35e953dd02cf | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This subpackage provides classes to communicate with other applications via the
`Simple Application Messaging Protocal (SAMP)
<http://www.ivoa.net/documents/SAMP/>`_.
Before integration into Astropy it was known as
`SAMPy <https://pypi.python.org/pypi/sampy/>`_, and was developed by Luigi Paioro
(INAF - Istituto Nazionale di Astrofisica).
"""
from .constants import *
from .errors import *
from .utils import *
from .hub import *
from .client import *
from .integrated_client import *
from .hub_proxy import *
from astropy import config as _config
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.samp`.
"""
use_internet = _config.ConfigItem(
True,
"Whether to allow `astropy.samp` to use "
"the internet, if available.",
aliases=['astropy.samp.utils.use_internet'])
n_retries = _config.ConfigItem(10,
"How many times to retry communications when they fail")
conf = Conf()
|
15bcedef467ba6ca937da6e95a4cbe5404d500b20c9f766498cd0e169d251845 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import os
import select
import socket
import threading
import warnings
from urllib.parse import urlunparse
from .constants import SAMP_STATUS_OK, SAMP_STATUS_WARNING
from .hub import SAMPHubServer
from .errors import SAMPClientError, SAMPWarning
from .utils import internet_on, get_num_args
from .standard_profile import ThreadingXMLRPCServer
__all__ = ['SAMPClient']
class SAMPClient:
"""
Utility class which provides facilities to create and manage a SAMP
compliant XML-RPC server that acts as SAMP callable client application.
Parameters
----------
hub : :class:`~astropy.samp.SAMPHubProxy`
An instance of :class:`~astropy.samp.SAMPHubProxy` to be
used for messaging with the SAMP Hub.
name : str, optional
Client name (corresponding to ``samp.name`` metadata keyword).
description : str, optional
Client description (corresponding to ``samp.description.text`` metadata
keyword).
metadata : dict, optional
Client application metadata in the standard SAMP format.
addr : str, optional
Listening address (or IP). This defaults to 127.0.0.1 if the internet
is not reachable, otherwise it defaults to the host name.
port : int, optional
Listening XML-RPC server socket port. If left set to 0 (the default),
the operating system will select a free port.
callable : bool, optional
Whether the client can receive calls and notifications. If set to
`False`, then the client can send notifications and calls, but can not
receive any.
"""
# TODO: define what is meant by callable
def __init__(self, hub, name=None, description=None, metadata=None,
addr=None, port=0, callable=True):
# GENERAL
self._is_running = False
self._is_registered = False
if metadata is None:
metadata = {}
if name is not None:
metadata["samp.name"] = name
if description is not None:
metadata["samp.description.text"] = description
self._metadata = metadata
self._addr = addr
self._port = port
self._xmlrpcAddr = None
self._callable = callable
# HUB INTERACTION
self.client = None
self._public_id = None
self._private_key = None
self._hub_id = None
self._notification_bindings = {}
self._call_bindings = {"samp.app.ping": [self._ping, {}],
"client.env.get": [self._client_env_get, {}]}
self._response_bindings = {}
self._host_name = "127.0.0.1"
if internet_on():
try:
self._host_name = socket.getfqdn()
socket.getaddrinfo(self._addr or self._host_name, self._port or 0)
except socket.error:
self._host_name = "127.0.0.1"
self.hub = hub
if self._callable:
self._thread = threading.Thread(target=self._serve_forever)
self._thread.daemon = True
self.client = ThreadingXMLRPCServer((self._addr or self._host_name,
self._port), logRequests=False, allow_none=True)
self.client.register_introspection_functions()
self.client.register_function(self.receive_notification, 'samp.client.receiveNotification')
self.client.register_function(self.receive_call, 'samp.client.receiveCall')
self.client.register_function(self.receive_response, 'samp.client.receiveResponse')
# If the port was set to zero, then the operating system has
# selected a free port. We now check what this port number is.
if self._port == 0:
self._port = self.client.socket.getsockname()[1]
protocol = 'http'
self._xmlrpcAddr = urlunparse((protocol,
'{0}:{1}'.format(self._addr or self._host_name,
self._port),
'', '', '', ''))
def start(self):
"""
Start the client in a separate thread (non-blocking).
This only has an effect if ``callable`` was set to `True` when
initializing the client.
"""
if self._callable:
self._is_running = True
self._run_client()
def stop(self, timeout=10.):
"""
Stop the client.
Parameters
----------
timeout : float
Timeout after which to give up if the client cannot be cleanly
shut down.
"""
# Setting _is_running to False causes the loop in _serve_forever to
# exit. The thread should then stop running. We wait for the thread to
# terminate until the timeout, then we continue anyway.
self._is_running = False
if self._callable and self._thread.is_alive():
self._thread.join(timeout)
if self._thread.is_alive():
raise SAMPClientError("Client was not shut down successfully "
"(timeout={0}s)".format(timeout))
@property
def is_running(self):
"""
Whether the client is currently running.
"""
return self._is_running
@property
def is_registered(self):
"""
Whether the client is currently registered.
"""
return self._is_registered
def _run_client(self):
if self._callable:
self._thread.start()
def _serve_forever(self):
while self._is_running:
try:
read_ready = select.select([self.client.socket], [], [], 0.1)[0]
except OSError as exc:
warnings.warn("Call to select in SAMPClient failed: {0}".format(exc),
SAMPWarning)
else:
if read_ready:
self.client.handle_request()
self.client.server_close()
def _ping(self, private_key, sender_id, msg_id, msg_mtype, msg_params,
message):
reply = {"samp.status": SAMP_STATUS_OK, "samp.result": {}}
self.hub.reply(private_key, msg_id, reply)
def _client_env_get(self, private_key, sender_id, msg_id, msg_mtype,
msg_params, message):
if msg_params["name"] in os.environ:
reply = {"samp.status": SAMP_STATUS_OK,
"samp.result": {"value": os.environ[msg_params["name"]]}}
else:
reply = {"samp.status": SAMP_STATUS_WARNING,
"samp.result": {"value": ""},
"samp.error": {"samp.errortxt":
"Environment variable not defined."}}
self.hub.reply(private_key, msg_id, reply)
def _handle_notification(self, private_key, sender_id, message):
if private_key == self.get_private_key() and "samp.mtype" in message:
msg_mtype = message["samp.mtype"]
del message["samp.mtype"]
msg_params = message["samp.params"]
del message["samp.params"]
msubs = SAMPHubServer.get_mtype_subtypes(msg_mtype)
for mtype in msubs:
if mtype in self._notification_bindings:
bound_func = self._notification_bindings[mtype][0]
if get_num_args(bound_func) == 5:
bound_func(private_key, sender_id, msg_mtype,
msg_params, message)
else:
bound_func(private_key, sender_id, None, msg_mtype,
msg_params, message)
return ""
def receive_notification(self, private_key, sender_id, message):
"""
Standard callable client ``receive_notification`` method.
This method is automatically handled when the
:meth:`~astropy.samp.client.SAMPClient.bind_receive_notification`
method is used to bind distinct operations to MTypes. In case of a
customized callable client implementation that inherits from the
:class:`~astropy.samp.SAMPClient` class this method should be
overwritten.
.. note:: When overwritten, this method must always return
a string result (even empty).
Parameters
----------
private_key : str
Client private key.
sender_id : str
Sender public ID.
message : dict
Received message.
Returns
-------
confirmation : str
Any confirmation string.
"""
return self._handle_notification(private_key, sender_id, message)
def _handle_call(self, private_key, sender_id, msg_id, message):
if private_key == self.get_private_key() and "samp.mtype" in message:
msg_mtype = message["samp.mtype"]
del message["samp.mtype"]
msg_params = message["samp.params"]
del message["samp.params"]
msubs = SAMPHubServer.get_mtype_subtypes(msg_mtype)
for mtype in msubs:
if mtype in self._call_bindings:
self._call_bindings[mtype][0](private_key, sender_id,
msg_id, msg_mtype,
msg_params, message)
return ""
def receive_call(self, private_key, sender_id, msg_id, message):
"""
Standard callable client ``receive_call`` method.
This method is automatically handled when the
:meth:`~astropy.samp.client.SAMPClient.bind_receive_call` method is
used to bind distinct operations to MTypes. In case of a customized
callable client implementation that inherits from the
:class:`~astropy.samp.SAMPClient` class this method should be
overwritten.
.. note:: When overwritten, this method must always return
a string result (even empty).
Parameters
----------
private_key : str
Client private key.
sender_id : str
Sender public ID.
msg_id : str
Message ID received.
message : dict
Received message.
Returns
-------
confirmation : str
Any confirmation string.
"""
return self._handle_call(private_key, sender_id, msg_id, message)
def _handle_response(self, private_key, responder_id, msg_tag, response):
if (private_key == self.get_private_key() and
msg_tag in self._response_bindings):
self._response_bindings[msg_tag](private_key, responder_id,
msg_tag, response)
return ""
def receive_response(self, private_key, responder_id, msg_tag, response):
"""
Standard callable client ``receive_response`` method.
This method is automatically handled when the
:meth:`~astropy.samp.client.SAMPClient.bind_receive_response` method
is used to bind distinct operations to MTypes. In case of a customized
callable client implementation that inherits from the
:class:`~astropy.samp.SAMPClient` class this method should be
overwritten.
.. note:: When overwritten, this method must always return
a string result (even empty).
Parameters
----------
private_key : str
Client private key.
responder_id : str
Responder public ID.
msg_tag : str
Response message tag.
response : dict
Received response.
Returns
-------
confirmation : str
Any confirmation string.
"""
return self._handle_response(private_key, responder_id, msg_tag,
response)
def bind_receive_message(self, mtype, function, declare=True,
metadata=None):
"""
Bind a specific MType to a function or class method, being intended for
a call or a notification.
The function must be of the form::
def my_function_or_method(<self,> private_key, sender_id, msg_id,
mtype, params, extra)
where ``private_key`` is the client private-key, ``sender_id`` is the
notification sender ID, ``msg_id`` is the Hub message-id (calls only,
otherwise is `None`), ``mtype`` is the message MType, ``params`` is the
message parameter set (content of ``"samp.params"``) and ``extra`` is a
dictionary containing any extra message map entry. The client is
automatically declared subscribed to the MType by default.
Parameters
----------
mtype : str
MType to be caught.
function : callable
Application function to be used when ``mtype`` is received.
declare : bool, optional
Specify whether the client must be automatically declared as
subscribed to the MType (see also
:meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`).
metadata : dict, optional
Dictionary containing additional metadata to declare associated
with the MType subscribed to (see also
:meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`).
"""
self.bind_receive_call(mtype, function, declare=declare,
metadata=metadata)
self.bind_receive_notification(mtype, function, declare=declare,
metadata=metadata)
def bind_receive_notification(self, mtype, function, declare=True, metadata=None):
"""
Bind a specific MType notification to a function or class method.
The function must be of the form::
def my_function_or_method(<self,> private_key, sender_id, mtype,
params, extra)
where ``private_key`` is the client private-key, ``sender_id`` is the
notification sender ID, ``mtype`` is the message MType, ``params`` is
the notified message parameter set (content of ``"samp.params"``) and
``extra`` is a dictionary containing any extra message map entry. The
client is automatically declared subscribed to the MType by default.
Parameters
----------
mtype : str
MType to be caught.
function : callable
Application function to be used when ``mtype`` is received.
declare : bool, optional
Specify whether the client must be automatically declared as
subscribed to the MType (see also
:meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`).
metadata : dict, optional
Dictionary containing additional metadata to declare associated
with the MType subscribed to (see also
:meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`).
"""
if self._callable:
if not metadata:
metadata = {}
self._notification_bindings[mtype] = [function, metadata]
if declare:
self._declare_subscriptions()
else:
raise SAMPClientError("Client not callable.")
def bind_receive_call(self, mtype, function, declare=True, metadata=None):
"""
Bind a specific MType call to a function or class method.
The function must be of the form::
def my_function_or_method(<self,> private_key, sender_id, msg_id,
mtype, params, extra)
where ``private_key`` is the client private-key, ``sender_id`` is the
notification sender ID, ``msg_id`` is the Hub message-id, ``mtype`` is
the message MType, ``params`` is the message parameter set (content of
``"samp.params"``) and ``extra`` is a dictionary containing any extra
message map entry. The client is automatically declared subscribed to
the MType by default.
Parameters
----------
mtype : str
MType to be caught.
function : callable
Application function to be used when ``mtype`` is received.
declare : bool, optional
Specify whether the client must be automatically declared as
subscribed to the MType (see also
:meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`).
metadata : dict, optional
Dictionary containing additional metadata to declare associated
with the MType subscribed to (see also
:meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`).
"""
if self._callable:
if not metadata:
metadata = {}
self._call_bindings[mtype] = [function, metadata]
if declare:
self._declare_subscriptions()
else:
raise SAMPClientError("Client not callable.")
def bind_receive_response(self, msg_tag, function):
"""
Bind a specific msg-tag response to a function or class method.
The function must be of the form::
def my_function_or_method(<self,> private_key, responder_id,
msg_tag, response)
where ``private_key`` is the client private-key, ``responder_id`` is
the message responder ID, ``msg_tag`` is the message-tag provided at
call time and ``response`` is the response received.
Parameters
----------
msg_tag : str
Message-tag to be caught.
function : callable
Application function to be used when ``msg_tag`` is received.
"""
if self._callable:
self._response_bindings[msg_tag] = function
else:
raise SAMPClientError("Client not callable.")
def unbind_receive_notification(self, mtype, declare=True):
"""
Remove from the notifications binding table the specified MType and
unsubscribe the client from it (if required).
Parameters
----------
mtype : str
MType to be removed.
declare : bool
Specify whether the client must be automatically declared as
unsubscribed from the MType (see also
:meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`).
"""
if self._callable:
del self._notification_bindings[mtype]
if declare:
self._declare_subscriptions()
else:
raise SAMPClientError("Client not callable.")
def unbind_receive_call(self, mtype, declare=True):
"""
Remove from the calls binding table the specified MType and unsubscribe
the client from it (if required).
Parameters
----------
mtype : str
MType to be removed.
declare : bool
Specify whether the client must be automatically declared as
unsubscribed from the MType (see also
:meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`).
"""
if self._callable:
del self._call_bindings[mtype]
if declare:
self._declare_subscriptions()
else:
raise SAMPClientError("Client not callable.")
def unbind_receive_response(self, msg_tag):
"""
Remove from the responses binding table the specified message-tag.
Parameters
----------
msg_tag : str
Message-tag to be removed.
"""
if self._callable:
del self._response_bindings[msg_tag]
else:
raise SAMPClientError("Client not callable.")
def declare_subscriptions(self, subscriptions=None):
"""
Declares the MTypes the client wishes to subscribe to, implicitly
defined with the MType binding methods
:meth:`~astropy.samp.client.SAMPClient.bind_receive_notification`
and :meth:`~astropy.samp.client.SAMPClient.bind_receive_call`.
An optional ``subscriptions`` map can be added to the final map passed
to the :meth:`~astropy.samp.hub_proxy.SAMPHubProxy.declare_subscriptions`
method.
Parameters
----------
subscriptions : dict, optional
Dictionary containing the list of MTypes to subscribe to, with the
same format of the ``subscriptions`` map passed to the
:meth:`~astropy.samp.hub_proxy.SAMPHubProxy.declare_subscriptions`
method.
"""
if self._callable:
self._declare_subscriptions(subscriptions)
else:
raise SAMPClientError("Client not callable.")
def register(self):
"""
Register the client to the SAMP Hub.
"""
if self.hub.is_connected:
if self._private_key is not None:
raise SAMPClientError("Client already registered")
result = self.hub.register(self.hub.lockfile["samp.secret"])
if result["samp.self-id"] == "":
raise SAMPClientError("Registration failed - "
"samp.self-id was not set by the hub.")
if result["samp.private-key"] == "":
raise SAMPClientError("Registration failed - "
"samp.private-key was not set by the hub.")
self._public_id = result["samp.self-id"]
self._private_key = result["samp.private-key"]
self._hub_id = result["samp.hub-id"]
if self._callable:
self._set_xmlrpc_callback()
self._declare_subscriptions()
if self._metadata != {}:
self.declare_metadata()
self._is_registered = True
else:
raise SAMPClientError("Unable to register to the SAMP Hub. "
"Hub proxy not connected.")
def unregister(self):
"""
Unregister the client from the SAMP Hub.
"""
if self.hub.is_connected:
self._is_registered = False
self.hub.unregister(self._private_key)
self._hub_id = None
self._public_id = None
self._private_key = None
else:
raise SAMPClientError("Unable to unregister from the SAMP Hub. "
"Hub proxy not connected.")
def _set_xmlrpc_callback(self):
if self.hub.is_connected and self._private_key is not None:
self.hub.set_xmlrpc_callback(self._private_key,
self._xmlrpcAddr)
def _declare_subscriptions(self, subscriptions=None):
if self.hub.is_connected and self._private_key is not None:
mtypes_dict = {}
# Collect notification mtypes and metadata
for mtype in self._notification_bindings.keys():
mtypes_dict[mtype] = copy.deepcopy(self._notification_bindings[mtype][1])
# Collect notification mtypes and metadata
for mtype in self._call_bindings.keys():
mtypes_dict[mtype] = copy.deepcopy(self._call_bindings[mtype][1])
# Add optional subscription map
if subscriptions:
mtypes_dict.update(copy.deepcopy(subscriptions))
self.hub.declare_subscriptions(self._private_key, mtypes_dict)
else:
raise SAMPClientError("Unable to declare subscriptions. Hub "
"unreachable or not connected or client "
"not registered.")
def declare_metadata(self, metadata=None):
"""
Declare the client application metadata supported.
Parameters
----------
metadata : dict, optional
Dictionary containing the client application metadata as defined in
the SAMP definition document. If omitted, then no metadata are
declared.
"""
if self.hub.is_connected and self._private_key is not None:
if metadata is not None:
self._metadata.update(metadata)
self.hub.declare_metadata(self._private_key, self._metadata)
else:
raise SAMPClientError("Unable to declare metadata. Hub "
"unreachable or not connected or client "
"not registered.")
def get_private_key(self):
"""
Return the client private key used for the Standard Profile
communications obtained at registration time (``samp.private-key``).
Returns
-------
key : str
Client private key.
"""
return self._private_key
def get_public_id(self):
"""
Return public client ID obtained at registration time
(``samp.self-id``).
Returns
-------
id : str
Client public ID.
"""
return self._public_id
|
858bf67b329fee59492c4957fb8ce9b72fa0b9729ab424862cd8f8b12bac8d3a | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Sundry function and class decorators."""
import functools
import inspect
import textwrap
import types
import warnings
from inspect import signature
from functools import wraps
from .exceptions import (AstropyDeprecationWarning, AstropyUserWarning,
AstropyPendingDeprecationWarning)
__all__ = ['classproperty', 'deprecated', 'deprecated_attribute',
'deprecated_renamed_argument', 'format_doc',
'lazyproperty', 'sharedmethod', 'wraps']
_NotFound = object()
def deprecated(since, message='', name='', alternative='', pending=False,
obj_type=None, warning_type=AstropyDeprecationWarning):
"""
Used to mark a function or class as deprecated.
To mark an attribute as deprecated, use `deprecated_attribute`.
Parameters
------------
since : str
The release at which this API became deprecated. This is
required.
message : str, optional
Override the default deprecation message. The format
specifier ``func`` may be used for the name of the function,
and ``alternative`` may be used in the deprecation message
to insert the name of an alternative to the deprecated
function. ``obj_type`` may be used to insert a friendly name
for the type of object being deprecated.
name : str, optional
The name of the deprecated function or class; if not provided
the name is automatically determined from the passed in
function or class, though this is useful in the case of
renamed functions, where the new function is just assigned to
the name of the deprecated function. For example::
def new_function():
...
oldFunction = new_function
alternative : str, optional
An alternative function or class name that the user may use in
place of the deprecated object. The deprecation warning will
tell the user about this alternative if provided.
pending : bool, optional
If True, uses a AstropyPendingDeprecationWarning instead of a
``warning_type``.
obj_type : str, optional
The type of this object, if the automatically determined one
needs to be overridden.
warning_type : warning
Warning to be issued.
Default is `~astropy.utils.exceptions.AstropyDeprecationWarning`.
"""
method_types = (classmethod, staticmethod, types.MethodType)
def deprecate_doc(old_doc, message):
"""
Returns a given docstring with a deprecation message prepended
to it.
"""
if not old_doc:
old_doc = ''
old_doc = textwrap.dedent(old_doc).strip('\n')
new_doc = (('\n.. deprecated:: {since}'
'\n {message}\n\n'.format(
**{'since': since, 'message': message.strip()})) + old_doc)
if not old_doc:
# This is to prevent a spurious 'unexpected unindent' warning from
# docutils when the original docstring was blank.
new_doc += r'\ '
return new_doc
def get_function(func):
"""
Given a function or classmethod (or other function wrapper type), get
the function object.
"""
if isinstance(func, method_types):
func = func.__func__
return func
def deprecate_function(func, message, warning_type=warning_type):
"""
Returns a wrapped function that displays ``warning_type``
when it is called.
"""
if isinstance(func, method_types):
func_wrapper = type(func)
else:
func_wrapper = lambda f: f
func = get_function(func)
def deprecated_func(*args, **kwargs):
if pending:
category = AstropyPendingDeprecationWarning
else:
category = warning_type
warnings.warn(message, category, stacklevel=2)
return func(*args, **kwargs)
# If this is an extension function, we can't call
# functools.wraps on it, but we normally don't care.
# This crazy way to get the type of a wrapper descriptor is
# straight out of the Python 3.3 inspect module docs.
if type(func) is not type(str.__dict__['__add__']): # nopep8
deprecated_func = functools.wraps(func)(deprecated_func)
deprecated_func.__doc__ = deprecate_doc(
deprecated_func.__doc__, message)
return func_wrapper(deprecated_func)
def deprecate_class(cls, message, warning_type=warning_type):
"""
Update the docstring and wrap the ``__init__`` in-place (or ``__new__``
if the class or any of the bases overrides ``__new__``) so it will give
a deprecation warning when an instance is created.
This won't work for extension classes because these can't be modified
in-place and the alternatives don't work in the general case:
- Using a new class that looks and behaves like the original doesn't
work because the __new__ method of extension types usually makes sure
that it's the same class or a subclass.
- Subclassing the class and return the subclass can lead to problems
with pickle and will look weird in the Sphinx docs.
"""
cls.__doc__ = deprecate_doc(cls.__doc__, message)
if cls.__new__ is object.__new__:
cls.__init__ = deprecate_function(get_function(cls.__init__),
message, warning_type)
else:
cls.__new__ = deprecate_function(get_function(cls.__new__),
message, warning_type)
return cls
def deprecate(obj, message=message, name=name, alternative=alternative,
pending=pending, warning_type=warning_type):
if obj_type is None:
if isinstance(obj, type):
obj_type_name = 'class'
elif inspect.isfunction(obj):
obj_type_name = 'function'
elif inspect.ismethod(obj) or isinstance(obj, method_types):
obj_type_name = 'method'
else:
obj_type_name = 'object'
else:
obj_type_name = obj_type
if not name:
name = get_function(obj).__name__
altmessage = ''
if not message or type(message) is type(deprecate):
if pending:
message = ('The {func} {obj_type} will be deprecated in a '
'future version.')
else:
message = ('The {func} {obj_type} is deprecated and may '
'be removed in a future version.')
if alternative:
altmessage = '\n Use {} instead.'.format(alternative)
message = ((message.format(**{
'func': name,
'name': name,
'alternative': alternative,
'obj_type': obj_type_name})) +
altmessage)
if isinstance(obj, type):
return deprecate_class(obj, message, warning_type)
else:
return deprecate_function(obj, message, warning_type)
if type(message) is type(deprecate):
return deprecate(message)
return deprecate
def deprecated_attribute(name, since, message=None, alternative=None,
pending=False, warning_type=AstropyDeprecationWarning):
"""
Used to mark a public attribute as deprecated. This creates a
property that will warn when the given attribute name is accessed.
To prevent the warning (i.e. for internal code), use the private
name for the attribute by prepending an underscore
(i.e. ``self._name``).
Parameters
----------
name : str
The name of the deprecated attribute.
since : str
The release at which this API became deprecated. This is
required.
message : str, optional
Override the default deprecation message. The format
specifier ``name`` may be used for the name of the attribute,
and ``alternative`` may be used in the deprecation message
to insert the name of an alternative to the deprecated
function.
alternative : str, optional
An alternative attribute that the user may use in place of the
deprecated attribute. The deprecation warning will tell the
user about this alternative if provided.
pending : bool, optional
If True, uses a AstropyPendingDeprecationWarning instead of
``warning_type``.
warning_type : warning
Warning to be issued.
Default is `~astropy.utils.exceptions.AstropyDeprecationWarning`.
Examples
--------
::
class MyClass:
# Mark the old_name as deprecated
old_name = misc.deprecated_attribute('old_name', '0.1')
def method(self):
self._old_name = 42
"""
private_name = '_' + name
@deprecated(since, name=name, obj_type='attribute', warning_type=warning_type)
def get(self):
return getattr(self, private_name)
@deprecated(since, name=name, obj_type='attribute', warning_type=warning_type)
def set(self, val):
setattr(self, private_name, val)
@deprecated(since, name=name, obj_type='attribute', warning_type=warning_type)
def delete(self):
delattr(self, private_name)
return property(get, set, delete)
def deprecated_renamed_argument(old_name, new_name, since,
arg_in_kwargs=False, relax=False,
pending=False,
warning_type=AstropyDeprecationWarning,
alternative=''):
"""Deprecate a _renamed_ or _removed_ function argument.
The decorator assumes that the argument with the ``old_name`` was removed
from the function signature and the ``new_name`` replaced it at the
**same position** in the signature. If the ``old_name`` argument is
given when calling the decorated function the decorator will catch it and
issue a deprecation warning and pass it on as ``new_name`` argument.
Parameters
----------
old_name : str or list/tuple thereof
The old name of the argument.
new_name : str or list/tuple thereof or `None`
The new name of the argument. Set this to `None` to remove the
argument ``old_name`` instead of renaming it.
since : str or number or list/tuple thereof
The release at which the old argument became deprecated.
arg_in_kwargs : bool or list/tuple thereof, optional
If the argument is not a named argument (for example it
was meant to be consumed by ``**kwargs``) set this to
``True``. Otherwise the decorator will throw an Exception
if the ``new_name`` cannot be found in the signature of
the decorated function.
Default is ``False``.
relax : bool or list/tuple thereof, optional
If ``False`` a ``TypeError`` is raised if both ``new_name`` and
``old_name`` are given. If ``True`` the value for ``new_name`` is used
and a Warning is issued.
Default is ``False``.
pending : bool or list/tuple thereof, optional
If ``True`` this will hide the deprecation warning and ignore the
corresponding ``relax`` parameter value.
Default is ``False``.
warning_type : warning
Warning to be issued.
Default is `~astropy.utils.exceptions.AstropyDeprecationWarning`.
alternative : str, optional
An alternative function or class name that the user may use in
place of the deprecated object if ``new_name`` is None. The deprecation
warning will tell the user about this alternative if provided.
Raises
------
TypeError
If the new argument name cannot be found in the function
signature and arg_in_kwargs was False or if it is used to
deprecate the name of the ``*args``-, ``**kwargs``-like arguments.
At runtime such an Error is raised if both the new_name
and old_name were specified when calling the function and
"relax=False".
Notes
-----
The decorator should be applied to a function where the **name**
of an argument was changed but it applies the same logic.
.. warning::
If ``old_name`` is a list or tuple the ``new_name`` and ``since`` must
also be a list or tuple with the same number of entries. ``relax`` and
``arg_in_kwarg`` can be a single bool (applied to all) or also a
list/tuple with the same number of entries like ``new_name``, etc.
Examples
--------
The deprecation warnings are not shown in the following examples.
To deprecate a positional or keyword argument::
>>> from astropy.utils.decorators import deprecated_renamed_argument
>>> @deprecated_renamed_argument('sig', 'sigma', '1.0')
... def test(sigma):
... return sigma
>>> test(2)
2
>>> test(sigma=2)
2
>>> test(sig=2)
2
To deprecate an argument caught inside the ``**kwargs`` the
``arg_in_kwargs`` has to be set::
>>> @deprecated_renamed_argument('sig', 'sigma', '1.0',
... arg_in_kwargs=True)
... def test(**kwargs):
... return kwargs['sigma']
>>> test(sigma=2)
2
>>> test(sig=2)
2
By default providing the new and old keyword will lead to an Exception. If
a Warning is desired set the ``relax`` argument::
>>> @deprecated_renamed_argument('sig', 'sigma', '1.0', relax=True)
... def test(sigma):
... return sigma
>>> test(sig=2)
2
It is also possible to replace multiple arguments. The ``old_name``,
``new_name`` and ``since`` have to be `tuple` or `list` and contain the
same number of entries::
>>> @deprecated_renamed_argument(['a', 'b'], ['alpha', 'beta'],
... ['1.0', 1.2])
... def test(alpha, beta):
... return alpha, beta
>>> test(a=2, b=3)
(2, 3)
In this case ``arg_in_kwargs`` and ``relax`` can be a single value (which
is applied to all renamed arguments) or must also be a `tuple` or `list`
with values for each of the arguments.
"""
cls_iter = (list, tuple)
if isinstance(old_name, cls_iter):
n = len(old_name)
# Assume that new_name and since are correct (tuple/list with the
# appropriate length) in the spirit of the "consenting adults". But the
# optional parameters may not be set, so if these are not iterables
# wrap them.
if not isinstance(arg_in_kwargs, cls_iter):
arg_in_kwargs = [arg_in_kwargs] * n
if not isinstance(relax, cls_iter):
relax = [relax] * n
if not isinstance(pending, cls_iter):
pending = [pending] * n
else:
# To allow a uniform approach later on, wrap all arguments in lists.
n = 1
old_name = [old_name]
new_name = [new_name]
since = [since]
arg_in_kwargs = [arg_in_kwargs]
relax = [relax]
pending = [pending]
def decorator(function):
# The named arguments of the function.
arguments = signature(function).parameters
keys = list(arguments.keys())
position = [None] * n
for i in range(n):
# Determine the position of the argument.
if arg_in_kwargs[i]:
pass
else:
if new_name[i] is None:
continue
elif new_name[i] in arguments:
param = arguments[new_name[i]]
# In case the argument is not found in the list of arguments
# the only remaining possibility is that it should be caught
# by some kind of **kwargs argument.
# This case has to be explicitly specified, otherwise throw
# an exception!
else:
raise TypeError('"{}" was not specified in the function '
'signature. If it was meant to be part of '
'"**kwargs" then set "arg_in_kwargs" to "True"'
'.'.format(new_name[i]))
# There are several possibilities now:
# 1.) Positional or keyword argument:
if param.kind == param.POSITIONAL_OR_KEYWORD:
position[i] = keys.index(new_name[i])
# 2.) Keyword only argument:
elif param.kind == param.KEYWORD_ONLY:
# These cannot be specified by position.
position[i] = None
# 3.) positional-only argument, varargs, varkwargs or some
# unknown type:
else:
raise TypeError('cannot replace argument "{0}" of kind '
'{1!r}.'.format(new_name[i], param.kind))
@functools.wraps(function)
def wrapper(*args, **kwargs):
for i in range(n):
# The only way to have oldkeyword inside the function is
# that it is passed as kwarg because the oldkeyword
# parameter was renamed to newkeyword.
if old_name[i] in kwargs:
value = kwargs.pop(old_name[i])
# Display the deprecation warning only when it's not
# pending.
if not pending[i]:
message = ('"{0}" was deprecated in version {1} '
'and will be removed in a future version. '
.format(old_name[i], since[i]))
if new_name[i] is not None:
message += ('Use argument "{}" instead.'
.format(new_name[i]))
elif alternative:
message += ('\n Use {} instead.'
.format(alternative))
warnings.warn(message, warning_type, stacklevel=2)
# Check if the newkeyword was given as well.
newarg_in_args = (position[i] is not None and
len(args) > position[i])
newarg_in_kwargs = new_name[i] in kwargs
if newarg_in_args or newarg_in_kwargs:
if not pending[i]:
# If both are given print a Warning if relax is
# True or raise an Exception is relax is False.
if relax[i]:
warnings.warn(
'"{0}" and "{1}" keywords were set. '
'Using the value of "{1}".'
''.format(old_name[i], new_name[i]),
AstropyUserWarning)
else:
raise TypeError(
'cannot specify both "{}" and "{}"'
'.'.format(old_name[i], new_name[i]))
else:
# Pass the value of the old argument with the
# name of the new argument to the function
if new_name[i] is not None:
kwargs[new_name[i]] = value
return function(*args, **kwargs)
return wrapper
return decorator
# TODO: This can still be made to work for setters by implementing an
# accompanying metaclass that supports it; we just don't need that right this
# second
class classproperty(property):
"""
Similar to `property`, but allows class-level properties. That is,
a property whose getter is like a `classmethod`.
The wrapped method may explicitly use the `classmethod` decorator (which
must become before this decorator), or the `classmethod` may be omitted
(it is implicit through use of this decorator).
.. note::
classproperty only works for *read-only* properties. It does not
currently allow writeable/deletable properties, due to subtleties of how
Python descriptors work. In order to implement such properties on a class
a metaclass for that class must be implemented.
Parameters
----------
fget : callable
The function that computes the value of this property (in particular,
the function when this is used as a decorator) a la `property`.
doc : str, optional
The docstring for the property--by default inherited from the getter
function.
lazy : bool, optional
If True, caches the value returned by the first call to the getter
function, so that it is only called once (used for lazy evaluation
of an attribute). This is analogous to `lazyproperty`. The ``lazy``
argument can also be used when `classproperty` is used as a decorator
(see the third example below). When used in the decorator syntax this
*must* be passed in as a keyword argument.
Examples
--------
::
>>> class Foo:
... _bar_internal = 1
... @classproperty
... def bar(cls):
... return cls._bar_internal + 1
...
>>> Foo.bar
2
>>> foo_instance = Foo()
>>> foo_instance.bar
2
>>> foo_instance._bar_internal = 2
>>> foo_instance.bar # Ignores instance attributes
2
As previously noted, a `classproperty` is limited to implementing
read-only attributes::
>>> class Foo:
... _bar_internal = 1
... @classproperty
... def bar(cls):
... return cls._bar_internal
... @bar.setter
... def bar(cls, value):
... cls._bar_internal = value
...
Traceback (most recent call last):
...
NotImplementedError: classproperty can only be read-only; use a
metaclass to implement modifiable class-level properties
When the ``lazy`` option is used, the getter is only called once::
>>> class Foo:
... @classproperty(lazy=True)
... def bar(cls):
... print("Performing complicated calculation")
... return 1
...
>>> Foo.bar
Performing complicated calculation
1
>>> Foo.bar
1
If a subclass inherits a lazy `classproperty` the property is still
re-evaluated for the subclass::
>>> class FooSub(Foo):
... pass
...
>>> FooSub.bar
Performing complicated calculation
1
>>> FooSub.bar
1
"""
def __new__(cls, fget=None, doc=None, lazy=False):
if fget is None:
# Being used as a decorator--return a wrapper that implements
# decorator syntax
def wrapper(func):
return cls(func, lazy=lazy)
return wrapper
return super().__new__(cls)
def __init__(self, fget, doc=None, lazy=False):
self._lazy = lazy
if lazy:
self._cache = {}
fget = self._wrap_fget(fget)
super().__init__(fget=fget, doc=doc)
# There is a buglet in Python where self.__doc__ doesn't
# get set properly on instances of property subclasses if
# the doc argument was used rather than taking the docstring
# from fget
# Related Python issue: https://bugs.python.org/issue24766
if doc is not None:
self.__doc__ = doc
def __get__(self, obj, objtype):
if self._lazy and objtype in self._cache:
return self._cache[objtype]
# The base property.__get__ will just return self here;
# instead we pass objtype through to the original wrapped
# function (which takes the class as its sole argument)
val = self.fget.__wrapped__(objtype)
if self._lazy:
self._cache[objtype] = val
return val
def getter(self, fget):
return super().getter(self._wrap_fget(fget))
def setter(self, fset):
raise NotImplementedError(
"classproperty can only be read-only; use a metaclass to "
"implement modifiable class-level properties")
def deleter(self, fdel):
raise NotImplementedError(
"classproperty can only be read-only; use a metaclass to "
"implement modifiable class-level properties")
@staticmethod
def _wrap_fget(orig_fget):
if isinstance(orig_fget, classmethod):
orig_fget = orig_fget.__func__
# Using stock functools.wraps instead of the fancier version
# found later in this module, which is overkill for this purpose
@functools.wraps(orig_fget)
def fget(obj):
return orig_fget(obj.__class__)
return fget
class lazyproperty(property):
"""
Works similarly to property(), but computes the value only once.
This essentially memorizes the value of the property by storing the result
of its computation in the ``__dict__`` of the object instance. This is
useful for computing the value of some property that should otherwise be
invariant. For example::
>>> class LazyTest:
... @lazyproperty
... def complicated_property(self):
... print('Computing the value for complicated_property...')
... return 42
...
>>> lt = LazyTest()
>>> lt.complicated_property
Computing the value for complicated_property...
42
>>> lt.complicated_property
42
As the example shows, the second time ``complicated_property`` is accessed,
the ``print`` statement is not executed. Only the return value from the
first access off ``complicated_property`` is returned.
By default, a setter and deleter are used which simply overwrite and
delete, respectively, the value stored in ``__dict__``. Any user-specified
setter or deleter is executed before executing these default actions.
The one exception is that the default setter is not run if the user setter
already sets the new value in ``__dict__`` and returns that value and the
returned value is not ``None``.
Adapted from the recipe at
http://code.activestate.com/recipes/363602-lazy-property-evaluation
"""
def __init__(self, fget, fset=None, fdel=None, doc=None):
super().__init__(fget, fset, fdel, doc)
self._key = self.fget.__name__
def __get__(self, obj, owner=None):
try:
val = obj.__dict__.get(self._key, _NotFound)
if val is not _NotFound:
return val
else:
val = self.fget(obj)
obj.__dict__[self._key] = val
return val
except AttributeError:
if obj is None:
return self
raise
def __set__(self, obj, val):
obj_dict = obj.__dict__
if self.fset:
ret = self.fset(obj, val)
if ret is not None and obj_dict.get(self._key) is ret:
# By returning the value set the setter signals that it took
# over setting the value in obj.__dict__; this mechanism allows
# it to override the input value
return
obj_dict[self._key] = val
def __delete__(self, obj):
if self.fdel:
self.fdel(obj)
if self._key in obj.__dict__:
del obj.__dict__[self._key]
class sharedmethod(classmethod):
"""
This is a method decorator that allows both an instancemethod and a
`classmethod` to share the same name.
When using `sharedmethod` on a method defined in a class's body, it
may be called on an instance, or on a class. In the former case it
behaves like a normal instance method (a reference to the instance is
automatically passed as the first ``self`` argument of the method)::
>>> class Example:
... @sharedmethod
... def identify(self, *args):
... print('self was', self)
... print('additional args were', args)
...
>>> ex = Example()
>>> ex.identify(1, 2)
self was <astropy.utils.decorators.Example object at 0x...>
additional args were (1, 2)
In the latter case, when the `sharedmethod` is called directly from a
class, it behaves like a `classmethod`::
>>> Example.identify(3, 4)
self was <class 'astropy.utils.decorators.Example'>
additional args were (3, 4)
This also supports a more advanced usage, where the `classmethod`
implementation can be written separately. If the class's *metaclass*
has a method of the same name as the `sharedmethod`, the version on
the metaclass is delegated to::
>>> class ExampleMeta(type):
... def identify(self):
... print('this implements the {0}.identify '
... 'classmethod'.format(self.__name__))
...
>>> class Example(metaclass=ExampleMeta):
... @sharedmethod
... def identify(self):
... print('this implements the instancemethod')
...
>>> Example().identify()
this implements the instancemethod
>>> Example.identify()
this implements the Example.identify classmethod
"""
def __get__(self, obj, objtype=None):
if obj is None:
mcls = type(objtype)
clsmeth = getattr(mcls, self.__func__.__name__, None)
if callable(clsmeth):
func = clsmeth
else:
func = self.__func__
return self._make_method(func, objtype)
else:
return self._make_method(self.__func__, obj)
@staticmethod
def _make_method(func, instance):
return types.MethodType(func, instance)
def format_doc(docstring, *args, **kwargs):
"""
Replaces the docstring of the decorated object and then formats it.
The formatting works like :meth:`str.format` and if the decorated object
already has a docstring this docstring can be included in the new
documentation if you use the ``{__doc__}`` placeholder.
Its primary use is for reusing a *long* docstring in multiple functions
when it is the same or only slightly different between them.
Parameters
----------
docstring : str or object or None
The docstring that will replace the docstring of the decorated
object. If it is an object like a function or class it will
take the docstring of this object. If it is a string it will use the
string itself. One special case is if the string is ``None`` then
it will use the decorated functions docstring and formats it.
args :
passed to :meth:`str.format`.
kwargs :
passed to :meth:`str.format`. If the function has a (not empty)
docstring the original docstring is added to the kwargs with the
keyword ``'__doc__'``.
Raises
------
ValueError
If the ``docstring`` (or interpreted docstring if it was ``None``
or not a string) is empty.
IndexError, KeyError
If a placeholder in the (interpreted) ``docstring`` was not filled. see
:meth:`str.format` for more information.
Notes
-----
Using this decorator allows, for example Sphinx, to parse the
correct docstring.
Examples
--------
Replacing the current docstring is very easy::
>>> from astropy.utils.decorators import format_doc
>>> @format_doc('''Perform num1 + num2''')
... def add(num1, num2):
... return num1+num2
...
>>> help(add) # doctest: +SKIP
Help on function add in module __main__:
<BLANKLINE>
add(num1, num2)
Perform num1 + num2
sometimes instead of replacing you only want to add to it::
>>> doc = '''
... {__doc__}
... Parameters
... ----------
... num1, num2 : Numbers
... Returns
... -------
... result: Number
... '''
>>> @format_doc(doc)
... def add(num1, num2):
... '''Perform addition.'''
... return num1+num2
...
>>> help(add) # doctest: +SKIP
Help on function add in module __main__:
<BLANKLINE>
add(num1, num2)
Perform addition.
Parameters
----------
num1, num2 : Numbers
Returns
-------
result : Number
in case one might want to format it further::
>>> doc = '''
... Perform {0}.
... Parameters
... ----------
... num1, num2 : Numbers
... Returns
... -------
... result: Number
... result of num1 {op} num2
... {__doc__}
... '''
>>> @format_doc(doc, 'addition', op='+')
... def add(num1, num2):
... return num1+num2
...
>>> @format_doc(doc, 'subtraction', op='-')
... def subtract(num1, num2):
... '''Notes: This one has additional notes.'''
... return num1-num2
...
>>> help(add) # doctest: +SKIP
Help on function add in module __main__:
<BLANKLINE>
add(num1, num2)
Perform addition.
Parameters
----------
num1, num2 : Numbers
Returns
-------
result : Number
result of num1 + num2
>>> help(subtract) # doctest: +SKIP
Help on function subtract in module __main__:
<BLANKLINE>
subtract(num1, num2)
Perform subtraction.
Parameters
----------
num1, num2 : Numbers
Returns
-------
result : Number
result of num1 - num2
Notes : This one has additional notes.
These methods can be combined an even taking the docstring from another
object is possible as docstring attribute. You just have to specify the
object::
>>> @format_doc(add)
... def another_add(num1, num2):
... return num1 + num2
...
>>> help(another_add) # doctest: +SKIP
Help on function another_add in module __main__:
<BLANKLINE>
another_add(num1, num2)
Perform addition.
Parameters
----------
num1, num2 : Numbers
Returns
-------
result : Number
result of num1 + num2
But be aware that this decorator *only* formats the given docstring not
the strings passed as ``args`` or ``kwargs`` (not even the original
docstring)::
>>> @format_doc(doc, 'addition', op='+')
... def yet_another_add(num1, num2):
... '''This one is good for {0}.'''
... return num1 + num2
...
>>> help(yet_another_add) # doctest: +SKIP
Help on function yet_another_add in module __main__:
<BLANKLINE>
yet_another_add(num1, num2)
Perform addition.
Parameters
----------
num1, num2 : Numbers
Returns
-------
result : Number
result of num1 + num2
This one is good for {0}.
To work around it you could specify the docstring to be ``None``::
>>> @format_doc(None, 'addition')
... def last_add_i_swear(num1, num2):
... '''This one is good for {0}.'''
... return num1 + num2
...
>>> help(last_add_i_swear) # doctest: +SKIP
Help on function last_add_i_swear in module __main__:
<BLANKLINE>
last_add_i_swear(num1, num2)
This one is good for addition.
Using it with ``None`` as docstring allows to use the decorator twice
on an object to first parse the new docstring and then to parse the
original docstring or the ``args`` and ``kwargs``.
"""
def set_docstring(obj):
if docstring is None:
# None means: use the objects __doc__
doc = obj.__doc__
# Delete documentation in this case so we don't end up with
# awkwardly self-inserted docs.
obj.__doc__ = None
elif isinstance(docstring, str):
# String: use the string that was given
doc = docstring
else:
# Something else: Use the __doc__ of this
doc = docstring.__doc__
if not doc:
# In case the docstring is empty it's probably not what was wanted.
raise ValueError('docstring must be a string or containing a '
'docstring that is not empty.')
# If the original has a not-empty docstring append it to the format
# kwargs.
kwargs['__doc__'] = obj.__doc__ or ''
obj.__doc__ = doc.format(*args, **kwargs)
return obj
return set_docstring
|
31bf1c6a95300621689208585bc1d703926729b8eb46f1b39eb9bce38f8a1065 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Utilities for console input and output.
"""
import codecs
import locale
import re
import math
import multiprocessing
import os
import struct
import sys
import threading
import time
try:
import fcntl
import termios
import signal
_CAN_RESIZE_TERMINAL = True
except ImportError:
_CAN_RESIZE_TERMINAL = False
from astropy import conf
from .misc import isiterable
from .decorators import classproperty
__all__ = [
'isatty', 'color_print', 'human_time', 'human_file_size',
'ProgressBar', 'Spinner', 'print_code_line', 'ProgressBarOrSpinner',
'terminal_size']
_DEFAULT_ENCODING = 'utf-8'
class _IPython:
"""Singleton class given access to IPython streams, etc."""
@classproperty
def get_ipython(cls):
try:
from IPython import get_ipython
except ImportError:
pass
return get_ipython
@classproperty
def OutStream(cls):
if not hasattr(cls, '_OutStream'):
cls._OutStream = None
try:
cls.get_ipython()
except NameError:
return None
try:
from ipykernel.iostream import OutStream
except ImportError:
try:
from IPython.zmq.iostream import OutStream
except ImportError:
from IPython import version_info
if version_info[0] >= 4:
return None
try:
from IPython.kernel.zmq.iostream import OutStream
except ImportError:
return None
cls._OutStream = OutStream
return cls._OutStream
@classproperty
def ipyio(cls):
if not hasattr(cls, '_ipyio'):
try:
from IPython.utils import io
except ImportError:
cls._ipyio = None
else:
cls._ipyio = io
return cls._ipyio
@classproperty
def IOStream(cls):
if cls.ipyio is None:
return None
else:
return cls.ipyio.IOStream
@classmethod
def get_stream(cls, stream):
return getattr(cls.ipyio, stream)
def _get_stdout(stderr=False):
"""
This utility function contains the logic to determine what streams to use
by default for standard out/err.
Typically this will just return `sys.stdout`, but it contains additional
logic for use in IPython on Windows to determine the correct stream to use
(usually ``IPython.util.io.stdout`` but only if sys.stdout is a TTY).
"""
if stderr:
stream = 'stderr'
else:
stream = 'stdout'
sys_stream = getattr(sys, stream)
if not isatty(sys_stream) or _IPython.OutStream is None:
return sys_stream
# Our system stream is an atty and we're in ipython.
ipyio_stream = _IPython.get_stream(stream)
if ipyio_stream is not None and isatty(ipyio_stream):
# Use the IPython console output stream
return ipyio_stream
else:
# sys.stdout was set to some other non-TTY stream (a file perhaps)
# so just use it directly
return sys_stream
def isatty(file):
"""
Returns `True` if ``file`` is a tty.
Most built-in Python file-like objects have an `isatty` member,
but some user-defined types may not, so this assumes those are not
ttys.
"""
if (multiprocessing.current_process().name != 'MainProcess' or
threading.current_thread().getName() != 'MainThread'):
return False
if hasattr(file, 'isatty'):
return file.isatty()
# Use two isinstance calls to only evaluate IOStream when necessary.
if (_IPython.OutStream is None or
(not isinstance(file, _IPython.OutStream) and
not isinstance(file, _IPython.IOStream))):
return False
# File is an IPython OutStream or IOStream. Check whether:
# - File name is 'stdout'; or
# - File wraps a Console
if getattr(file, 'name', None) == 'stdout':
return True
if hasattr(file, 'stream'):
# On Windows, in IPython 2 the standard I/O streams will wrap
# pyreadline.Console objects if pyreadline is available; this should
# be considered a TTY.
try:
from pyreadline.console import Console as PyreadlineConsole
except ImportError:
return False
return isinstance(file.stream, PyreadlineConsole)
return False
def terminal_size(file=None):
"""
Returns a tuple (height, width) containing the height and width of
the terminal.
This function will look for the width in height in multiple areas
before falling back on the width and height in astropy's
configuration.
"""
if file is None:
file = _get_stdout()
try:
s = struct.pack(str("HHHH"), 0, 0, 0, 0)
x = fcntl.ioctl(file, termios.TIOCGWINSZ, s)
(lines, width, xpixels, ypixels) = struct.unpack(str("HHHH"), x)
if lines > 12:
lines -= 6
if width > 10:
width -= 1
if lines <= 0 or width <= 0:
raise Exception('unable to get terminal size')
return (lines, width)
except Exception:
try:
# see if POSIX standard variables will work
return (int(os.environ.get('LINES')),
int(os.environ.get('COLUMNS')))
except TypeError:
# fall back on configuration variables, or if not
# set, (25, 80)
lines = conf.max_lines
width = conf.max_width
if lines is None:
lines = 25
if width is None:
width = 80
return lines, width
def _color_text(text, color):
"""
Returns a string wrapped in ANSI color codes for coloring the
text in a terminal::
colored_text = color_text('Here is a message', 'blue')
This won't actually effect the text until it is printed to the
terminal.
Parameters
----------
text : str
The string to return, bounded by the color codes.
color : str
An ANSI terminal color name. Must be one of:
black, red, green, brown, blue, magenta, cyan, lightgrey,
default, darkgrey, lightred, lightgreen, yellow, lightblue,
lightmagenta, lightcyan, white, or '' (the empty string).
"""
color_mapping = {
'black': '0;30',
'red': '0;31',
'green': '0;32',
'brown': '0;33',
'blue': '0;34',
'magenta': '0;35',
'cyan': '0;36',
'lightgrey': '0;37',
'default': '0;39',
'darkgrey': '1;30',
'lightred': '1;31',
'lightgreen': '1;32',
'yellow': '1;33',
'lightblue': '1;34',
'lightmagenta': '1;35',
'lightcyan': '1;36',
'white': '1;37'}
if sys.platform == 'win32' and _IPython.OutStream is None:
# On Windows do not colorize text unless in IPython
return text
color_code = color_mapping.get(color, '0;39')
return '\033[{0}m{1}\033[0m'.format(color_code, text)
def _decode_preferred_encoding(s):
"""Decode the supplied byte string using the preferred encoding
for the locale (`locale.getpreferredencoding`) or, if the default encoding
is invalid, fall back first on utf-8, then on latin-1 if the message cannot
be decoded with utf-8.
"""
enc = locale.getpreferredencoding()
try:
try:
return s.decode(enc)
except LookupError:
enc = _DEFAULT_ENCODING
return s.decode(enc)
except UnicodeDecodeError:
return s.decode('latin-1')
def _write_with_fallback(s, write, fileobj):
"""Write the supplied string with the given write function like
``write(s)``, but use a writer for the locale's preferred encoding in case
of a UnicodeEncodeError. Failing that attempt to write with 'utf-8' or
'latin-1'.
"""
if (_IPython.IOStream is not None and
isinstance(fileobj, _IPython.IOStream)):
# If the output stream is an IPython.utils.io.IOStream object that's
# not going to be very helpful to us since it doesn't raise any
# exceptions when an error occurs writing to its underlying stream.
# There's no advantage to us using IOStream.write directly though;
# instead just write directly to its underlying stream:
write = fileobj.stream.write
try:
write(s)
return write
except UnicodeEncodeError:
# Let's try the next approach...
pass
enc = locale.getpreferredencoding()
try:
Writer = codecs.getwriter(enc)
except LookupError:
Writer = codecs.getwriter(_DEFAULT_ENCODING)
f = Writer(fileobj)
write = f.write
try:
write(s)
return write
except UnicodeEncodeError:
Writer = codecs.getwriter('latin-1')
f = Writer(fileobj)
write = f.write
# If this doesn't work let the exception bubble up; I'm out of ideas
write(s)
return write
def color_print(*args, end='\n', **kwargs):
"""
Prints colors and styles to the terminal uses ANSI escape
sequences.
::
color_print('This is the color ', 'default', 'GREEN', 'green')
Parameters
----------
positional args : str
The positional arguments come in pairs (*msg*, *color*), where
*msg* is the string to display and *color* is the color to
display it in.
*color* is an ANSI terminal color name. Must be one of:
black, red, green, brown, blue, magenta, cyan, lightgrey,
default, darkgrey, lightred, lightgreen, yellow, lightblue,
lightmagenta, lightcyan, white, or '' (the empty string).
file : writeable file-like object, optional
Where to write to. Defaults to `sys.stdout`. If file is not
a tty (as determined by calling its `isatty` member, if one
exists), no coloring will be included.
end : str, optional
The ending of the message. Defaults to ``\\n``. The end will
be printed after resetting any color or font state.
"""
file = kwargs.get('file', _get_stdout())
write = file.write
if isatty(file) and conf.use_color:
for i in range(0, len(args), 2):
msg = args[i]
if i + 1 == len(args):
color = ''
else:
color = args[i + 1]
if color:
msg = _color_text(msg, color)
# Some file objects support writing unicode sensibly on some Python
# versions; if this fails try creating a writer using the locale's
# preferred encoding. If that fails too give up.
write = _write_with_fallback(msg, write, file)
write(end)
else:
for i in range(0, len(args), 2):
msg = args[i]
write(msg)
write(end)
def strip_ansi_codes(s):
"""
Remove ANSI color codes from the string.
"""
return re.sub('\033\\[([0-9]+)(;[0-9]+)*m', '', s)
def human_time(seconds):
"""
Returns a human-friendly time string that is always exactly 6
characters long.
Depending on the number of seconds given, can be one of::
1w 3d
2d 4h
1h 5m
1m 4s
15s
Will be in color if console coloring is turned on.
Parameters
----------
seconds : int
The number of seconds to represent
Returns
-------
time : str
A human-friendly representation of the given number of seconds
that is always exactly 6 characters.
"""
units = [
('y', 60 * 60 * 24 * 7 * 52),
('w', 60 * 60 * 24 * 7),
('d', 60 * 60 * 24),
('h', 60 * 60),
('m', 60),
('s', 1),
]
seconds = int(seconds)
if seconds < 60:
return ' {0:2d}s'.format(seconds)
for i in range(len(units) - 1):
unit1, limit1 = units[i]
unit2, limit2 = units[i + 1]
if seconds >= limit1:
return '{0:2d}{1}{2:2d}{3}'.format(
seconds // limit1, unit1,
(seconds % limit1) // limit2, unit2)
return ' ~inf'
def human_file_size(size):
"""
Returns a human-friendly string representing a file size
that is 2-4 characters long.
For example, depending on the number of bytes given, can be one
of::
256b
64k
1.1G
Parameters
----------
size : int
The size of the file (in bytes)
Returns
-------
size : str
A human-friendly representation of the size of the file
"""
if hasattr(size, 'unit'):
# Import units only if necessary because the import takes a
# significant time [#4649]
from astropy import units as u
size = u.Quantity(size, u.byte).value
suffixes = ' kMGTPEZY'
if size == 0:
num_scale = 0
else:
num_scale = int(math.floor(math.log(size) / math.log(1000)))
if num_scale > 7:
suffix = '?'
else:
suffix = suffixes[num_scale]
num_scale = int(math.pow(1000, num_scale))
value = size / num_scale
str_value = str(value)
if suffix == ' ':
str_value = str_value[:str_value.index('.')]
elif str_value[2] == '.':
str_value = str_value[:2]
else:
str_value = str_value[:3]
return "{0:>3s}{1}".format(str_value, suffix)
class _mapfunc(object):
"""
A function wrapper to support ProgressBar.map().
"""
def __init__(self, func):
self._func = func
def __call__(self, i_arg):
i, arg = i_arg
return i, self._func(arg)
class ProgressBar:
"""
A class to display a progress bar in the terminal.
It is designed to be used either with the ``with`` statement::
with ProgressBar(len(items)) as bar:
for item in enumerate(items):
bar.update()
or as a generator::
for item in ProgressBar(items):
item.process()
"""
def __init__(self, total_or_items, ipython_widget=False, file=None):
"""
Parameters
----------
total_or_items : int or sequence
If an int, the number of increments in the process being
tracked. If a sequence, the items to iterate over.
ipython_widget : bool, optional
If `True`, the progress bar will display as an IPython
notebook widget.
file : writable file-like object, optional
The file to write the progress bar to. Defaults to
`sys.stdout`. If ``file`` is not a tty (as determined by
calling its `isatty` member, if any, or special case hacks
to detect the IPython console), the progress bar will be
completely silent.
"""
if file is None:
file = _get_stdout()
if not ipython_widget and not isatty(file):
self.update = self._silent_update
self._silent = True
else:
self._silent = False
if isiterable(total_or_items):
self._items = iter(total_or_items)
self._total = len(total_or_items)
else:
try:
self._total = int(total_or_items)
except TypeError:
raise TypeError("First argument must be int or sequence")
else:
self._items = iter(range(self._total))
self._file = file
self._start_time = time.time()
self._human_total = human_file_size(self._total)
self._ipython_widget = ipython_widget
self._signal_set = False
if not ipython_widget:
self._should_handle_resize = (
_CAN_RESIZE_TERMINAL and self._file.isatty())
self._handle_resize()
if self._should_handle_resize:
signal.signal(signal.SIGWINCH, self._handle_resize)
self._signal_set = True
self.update(0)
def _handle_resize(self, signum=None, frame=None):
terminal_width = terminal_size(self._file)[1]
self._bar_length = terminal_width - 37
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if not self._silent:
if exc_type is None:
self.update(self._total)
self._file.write('\n')
self._file.flush()
if self._signal_set:
signal.signal(signal.SIGWINCH, signal.SIG_DFL)
def __iter__(self):
return self
def __next__(self):
try:
rv = next(self._items)
except StopIteration:
self.__exit__(None, None, None)
raise
else:
self.update()
return rv
def update(self, value=None):
"""
Update progress bar via the console or notebook accordingly.
"""
# Update self.value
if value is None:
value = self._current_value + 1
self._current_value = value
# Choose the appropriate environment
if self._ipython_widget:
self._update_ipython_widget(value)
else:
self._update_console(value)
def _update_console(self, value=None):
"""
Update the progress bar to the given value (out of the total
given to the constructor).
"""
if self._total == 0:
frac = 1.0
else:
frac = float(value) / float(self._total)
file = self._file
write = file.write
if frac > 1:
bar_fill = int(self._bar_length)
else:
bar_fill = int(float(self._bar_length) * frac)
write('\r|')
color_print('=' * bar_fill, 'blue', file=file, end='')
if bar_fill < self._bar_length:
color_print('>', 'green', file=file, end='')
write('-' * (self._bar_length - bar_fill - 1))
write('|')
if value >= self._total:
t = time.time() - self._start_time
prefix = ' '
elif value <= 0:
t = None
prefix = ''
else:
t = ((time.time() - self._start_time) * (1.0 - frac)) / frac
prefix = ' ETA '
write(' {0:>4s}/{1:>4s}'.format(
human_file_size(value),
self._human_total))
write(' ({:>6.2%})'.format(frac))
write(prefix)
if t is not None:
write(human_time(t))
self._file.flush()
def _update_ipython_widget(self, value=None):
"""
Update the progress bar to the given value (out of a total
given to the constructor).
This method is for use in the IPython notebook 2+.
"""
# Create and display an empty progress bar widget,
# if none exists.
if not hasattr(self, '_widget'):
# Import only if an IPython widget, i.e., widget in iPython NB
from IPython import version_info
if version_info[0] < 4:
from IPython.html import widgets
self._widget = widgets.FloatProgressWidget()
else:
_IPython.get_ipython()
from ipywidgets import widgets
self._widget = widgets.FloatProgress()
from IPython.display import display
display(self._widget)
self._widget.value = 0
# Calculate percent completion, and update progress bar
frac = (value/self._total)
self._widget.value = frac * 100
self._widget.description = ' ({:>6.2%})'.format(frac)
def _silent_update(self, value=None):
pass
@classmethod
def map(cls, function, items, multiprocess=False, file=None, step=100,
ipython_widget=False):
"""
Does a `map` operation while displaying a progress bar with
percentage complete. The map operation may run on arbitrary order
on the items, but the results are returned in sequential order.
::
def work(i):
print(i)
ProgressBar.map(work, range(50))
Parameters
----------
function : function
Function to call for each step
items : sequence
Sequence where each element is a tuple of arguments to pass to
*function*.
multiprocess : bool, int, optional
If `True`, use the `multiprocessing` module to distribute each task
to a different processor core. If a number greater than 1, then use
that number of cores.
ipython_widget : bool, optional
If `True`, the progress bar will display as an IPython
notebook widget.
file : writeable file-like object, optional
The file to write the progress bar to. Defaults to
`sys.stdout`. If ``file`` is not a tty (as determined by
calling its `isatty` member, if any), the scrollbar will
be completely silent.
step : int, optional
Update the progress bar at least every *step* steps (default: 100).
If ``multiprocess`` is `True`, this will affect the size
of the chunks of ``items`` that are submitted as separate tasks
to the process pool. A large step size may make the job
complete faster if ``items`` is very long.
"""
if multiprocess:
function = _mapfunc(function)
items = list(enumerate(items))
results = cls.map_unordered(function, items, multiprocess=multiprocess,
file=file, step=step,
ipython_widget=ipython_widget)
if multiprocess:
_, results = zip(*sorted(results))
results = list(results)
return results
@classmethod
def map_unordered(cls, function, items, multiprocess=False, file=None,
step=100, ipython_widget=False):
"""
Does a `map` operation while displaying a progress bar with
percentage complete. The map operation may run on arbitrary order
on the items, and the results may be returned in arbitrary order.
::
def work(i):
print(i)
ProgressBar.map(work, range(50))
Parameters
----------
function : function
Function to call for each step
items : sequence
Sequence where each element is a tuple of arguments to pass to
*function*.
multiprocess : bool, int, optional
If `True`, use the `multiprocessing` module to distribute each task
to a different processor core. If a number greater than 1, then use
that number of cores.
ipython_widget : bool, optional
If `True`, the progress bar will display as an IPython
notebook widget.
file : writeable file-like object, optional
The file to write the progress bar to. Defaults to
`sys.stdout`. If ``file`` is not a tty (as determined by
calling its `isatty` member, if any), the scrollbar will
be completely silent.
step : int, optional
Update the progress bar at least every *step* steps (default: 100).
If ``multiprocess`` is `True`, this will affect the size
of the chunks of ``items`` that are submitted as separate tasks
to the process pool. A large step size may make the job
complete faster if ``items`` is very long.
"""
results = []
if file is None:
file = _get_stdout()
with cls(len(items), ipython_widget=ipython_widget, file=file) as bar:
if bar._ipython_widget:
chunksize = step
else:
default_step = max(int(float(len(items)) / bar._bar_length), 1)
chunksize = min(default_step, step)
if not multiprocess or multiprocess < 1:
for i, item in enumerate(items):
results.append(function(item))
if (i % chunksize) == 0:
bar.update(i)
else:
p = multiprocessing.Pool(
processes=(int(multiprocess) if multiprocess is not True else None))
for i, result in enumerate(
p.imap_unordered(function, items, chunksize=chunksize)):
bar.update(i)
results.append(result)
p.close()
p.join()
return results
class Spinner:
"""
A class to display a spinner in the terminal.
It is designed to be used with the ``with`` statement::
with Spinner("Reticulating splines", "green") as s:
for item in enumerate(items):
s.next()
"""
_default_unicode_chars = "◓◑◒◐"
_default_ascii_chars = "-/|\\"
def __init__(self, msg, color='default', file=None, step=1,
chars=None):
"""
Parameters
----------
msg : str
The message to print
color : str, optional
An ANSI terminal color name. Must be one of: black, red,
green, brown, blue, magenta, cyan, lightgrey, default,
darkgrey, lightred, lightgreen, yellow, lightblue,
lightmagenta, lightcyan, white.
file : writeable file-like object, optional
The file to write the spinner to. Defaults to
`sys.stdout`. If ``file`` is not a tty (as determined by
calling its `isatty` member, if any, or special case hacks
to detect the IPython console), the spinner will be
completely silent.
step : int, optional
Only update the spinner every *step* steps
chars : str, optional
The character sequence to use for the spinner
"""
if file is None:
file = _get_stdout()
self._msg = msg
self._color = color
self._file = file
self._step = step
if chars is None:
if conf.unicode_output:
chars = self._default_unicode_chars
else:
chars = self._default_ascii_chars
self._chars = chars
self._silent = not isatty(file)
def _iterator(self):
chars = self._chars
index = 0
file = self._file
write = file.write
flush = file.flush
try_fallback = True
while True:
write('\r')
color_print(self._msg, self._color, file=file, end='')
write(' ')
try:
if try_fallback:
write = _write_with_fallback(chars[index], write, file)
else:
write(chars[index])
except UnicodeError:
# If even _write_with_fallback failed for any reason just give
# up on trying to use the unicode characters
chars = self._default_ascii_chars
write(chars[index])
try_fallback = False # No good will come of using this again
flush()
yield
for i in range(self._step):
yield
index = (index + 1) % len(chars)
def __enter__(self):
if self._silent:
return self._silent_iterator()
else:
return self._iterator()
def __exit__(self, exc_type, exc_value, traceback):
file = self._file
write = file.write
flush = file.flush
if not self._silent:
write('\r')
color_print(self._msg, self._color, file=file, end='')
if exc_type is None:
color_print(' [Done]', 'green', file=file)
else:
color_print(' [Failed]', 'red', file=file)
flush()
def _silent_iterator(self):
color_print(self._msg, self._color, file=self._file, end='')
self._file.flush()
while True:
yield
class ProgressBarOrSpinner:
"""
A class that displays either a `ProgressBar` or `Spinner`
depending on whether the total size of the operation is
known or not.
It is designed to be used with the ``with`` statement::
if file.has_length():
length = file.get_length()
else:
length = None
bytes_read = 0
with ProgressBarOrSpinner(length) as bar:
while file.read(blocksize):
bytes_read += blocksize
bar.update(bytes_read)
"""
def __init__(self, total, msg, color='default', file=None):
"""
Parameters
----------
total : int or None
If an int, the number of increments in the process being
tracked and a `ProgressBar` is displayed. If `None`, a
`Spinner` is displayed.
msg : str
The message to display above the `ProgressBar` or
alongside the `Spinner`.
color : str, optional
The color of ``msg``, if any. Must be an ANSI terminal
color name. Must be one of: black, red, green, brown,
blue, magenta, cyan, lightgrey, default, darkgrey,
lightred, lightgreen, yellow, lightblue, lightmagenta,
lightcyan, white.
file : writable file-like object, optional
The file to write the to. Defaults to `sys.stdout`. If
``file`` is not a tty (as determined by calling its `isatty`
member, if any), only ``msg`` will be displayed: the
`ProgressBar` or `Spinner` will be silent.
"""
if file is None:
file = _get_stdout()
if total is None or not isatty(file):
self._is_spinner = True
self._obj = Spinner(msg, color=color, file=file)
else:
self._is_spinner = False
color_print(msg, color, file=file)
self._obj = ProgressBar(total, file=file)
def __enter__(self):
self._iter = self._obj.__enter__()
return self
def __exit__(self, exc_type, exc_value, traceback):
return self._obj.__exit__(exc_type, exc_value, traceback)
def update(self, value):
"""
Update the progress bar to the given value (out of the total
given to the constructor.
"""
if self._is_spinner:
next(self._iter)
else:
self._obj.update(value)
def print_code_line(line, col=None, file=None, tabwidth=8, width=70):
"""
Prints a line of source code, highlighting a particular character
position in the line. Useful for displaying the context of error
messages.
If the line is more than ``width`` characters, the line is truncated
accordingly and '…' characters are inserted at the front and/or
end.
It looks like this::
there_is_a_syntax_error_here :
^
Parameters
----------
line : unicode
The line of code to display
col : int, optional
The character in the line to highlight. ``col`` must be less
than ``len(line)``.
file : writeable file-like object, optional
Where to write to. Defaults to `sys.stdout`.
tabwidth : int, optional
The number of spaces per tab (``'\\t'``) character. Default
is 8. All tabs will be converted to spaces to ensure that the
caret lines up with the correct column.
width : int, optional
The width of the display, beyond which the line will be
truncated. Defaults to 70 (this matches the default in the
standard library's `textwrap` module).
"""
if file is None:
file = _get_stdout()
if conf.unicode_output:
ellipsis = '…'
else:
ellipsis = '...'
write = file.write
if col is not None:
if col >= len(line):
raise ValueError('col must be less the the line lenght.')
ntabs = line[:col].count('\t')
col += ntabs * (tabwidth - 1)
line = line.rstrip('\n')
line = line.replace('\t', ' ' * tabwidth)
if col is not None and col > width:
new_col = min(width // 2, len(line) - col)
offset = col - new_col
line = line[offset + len(ellipsis):]
width -= len(ellipsis)
new_col = col
col -= offset
color_print(ellipsis, 'darkgrey', file=file, end='')
if len(line) > width:
write(line[:width - len(ellipsis)])
color_print(ellipsis, 'darkgrey', file=file)
else:
write(line)
write('\n')
if col is not None:
write(' ' * col)
color_print('^', 'red', file=file)
# The following four Getch* classes implement unbuffered character reading from
# stdin on Windows, linux, MacOSX. This is taken directly from ActiveState
# Code Recipes:
# http://code.activestate.com/recipes/134892-getch-like-unbuffered-character-reading-from-stdin/
#
class Getch:
"""Get a single character from standard input without screen echo.
Returns
-------
char : str (one character)
"""
def __init__(self):
try:
self.impl = _GetchWindows()
except ImportError:
try:
self.impl = _GetchMacCarbon()
except (ImportError, AttributeError):
self.impl = _GetchUnix()
def __call__(self):
return self.impl()
class _GetchUnix:
def __init__(self):
import tty # pylint: disable=W0611
import sys # pylint: disable=W0611
# import termios now or else you'll get the Unix
# version on the Mac
import termios # pylint: disable=W0611
def __call__(self):
import sys
import tty
import termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class _GetchWindows:
def __init__(self):
import msvcrt # pylint: disable=W0611
def __call__(self):
import msvcrt
return msvcrt.getch()
class _GetchMacCarbon:
"""
A function which returns the current ASCII key that is down;
if no ASCII key is down, the null string is returned. The
page http://www.mactech.com/macintosh-c/chap02-1.html was
very helpful in figuring out how to do this.
"""
def __init__(self):
import Carbon
Carbon.Evt # see if it has this (in Unix, it doesn't)
def __call__(self):
import Carbon
if Carbon.Evt.EventAvail(0x0008)[0] == 0: # 0x0008 is the keyDownMask
return ''
else:
#
# The event contains the following info:
# (what,msg,when,where,mod)=Carbon.Evt.GetNextEvent(0x0008)[1]
#
# The message (msg) contains the ASCII char which is
# extracted with the 0x000000FF charCodeMask; this
# number is converted to an ASCII character with chr() and
# returned
#
(what, msg, when, where, mod) = Carbon.Evt.GetNextEvent(0x0008)[1]
return chr(msg & 0x000000FF)
|
7c0ad779eb98c21eb310c362c7b5d96323f79d4b2d38d335fb6fb6122219d4a6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""General purpose timer related functions."""
# STDLIB
import time
import warnings
from collections import Iterable, OrderedDict
from functools import partial, wraps
# THIRD-PARTY
import numpy as np
# LOCAL
from astropy import units as u
from astropy import log
from astropy import modeling
from .exceptions import AstropyUserWarning
__all__ = ['timefunc', 'RunTimePredictor']
__doctest_skip__ = ['timefunc']
def timefunc(num_tries=1, verbose=True):
"""Decorator to time a function or method.
Parameters
----------
num_tries : int, optional
Number of calls to make. Timer will take the
average run time.
verbose : bool, optional
Extra log information.
Returns
-------
tt : float
Average run time in seconds.
result
Output(s) from the function.
Examples
--------
To add timer to time `numpy.log` for 100 times with
verbose output::
import numpy as np
from astropy.utils.timer import timefunc
@timefunc(100)
def timed_log(x):
return np.log(x)
To run the decorated function above:
>>> t, y = timed_log(100)
INFO: timed_log took 9.29832458496e-06 s on AVERAGE for 100 call(s). [...]
>>> t
9.298324584960938e-06
>>> y
4.6051701859880918
"""
def real_decorator(function):
@wraps(function)
def wrapper(*args, **kwargs):
ts = time.time()
for i in range(num_tries):
result = function(*args, **kwargs)
te = time.time()
tt = (te - ts) / num_tries
if verbose: # pragma: no cover
log.info('{0} took {1} s on AVERAGE for {2} call(s).'.format(
function.__name__, tt, num_tries))
return tt, result
return wrapper
return real_decorator
class RunTimePredictor:
"""Class to predict run time.
.. note:: Only predict for single varying numeric input parameter.
Parameters
----------
func : function
Function to time.
args : tuple
Fixed positional argument(s) for the function.
kwargs : dict
Fixed keyword argument(s) for the function.
Examples
--------
>>> from astropy.utils.timer import RunTimePredictor
Set up a predictor for :math:`10^{x}`:
>>> p = RunTimePredictor(pow, 10)
Give it baseline data to use for prediction and
get the function output values:
>>> p.time_func(range(10, 1000, 200))
>>> for input, result in sorted(p.results.items()):
... print("pow(10, {0})\\n{1}".format(input, result))
pow(10, 10)
10000000000
pow(10, 210)
10000000000...
pow(10, 410)
10000000000...
pow(10, 610)
10000000000...
pow(10, 810)
10000000000...
Fit a straight line assuming :math:`\\text{arg}^{1}` relationship
(coefficients are returned):
>>> p.do_fit() # doctest: +SKIP
array([1.16777420e-05, 1.00135803e-08])
Predict run time for :math:`10^{5000}`:
>>> p.predict_time(5000) # doctest: +SKIP
6.174564361572262e-05
Plot the prediction:
>>> p.plot(xlabeltext='Power of 10') # doctest: +SKIP
.. image:: /_static/timer_prediction_pow10.png
:width: 450px
:alt: Example plot from `astropy.utils.timer.RunTimePredictor`
When the changing argument is not the last, e.g.,
:math:`x^{2}`, something like this might work:
>>> p = RunTimePredictor(lambda x: pow(x, 2))
>>> p.time_func([2, 3, 5])
>>> sorted(p.results.items())
[(2, 4), (3, 9), (5, 25)]
"""
def __init__(self, func, *args, **kwargs):
self._funcname = func.__name__
self._pfunc = partial(func, *args, **kwargs)
self._cache_good = OrderedDict()
self._cache_bad = []
self._cache_est = OrderedDict()
self._cache_out = OrderedDict()
self._fit_func = None
self._power = None
@property
def results(self):
"""Function outputs from `time_func`.
A dictionary mapping input arguments (fixed arguments
are not included) to their respective output values.
"""
return self._cache_out
@timefunc(num_tries=1, verbose=False)
def _timed_pfunc(self, arg):
"""Run partial func once for single arg and time it."""
return self._pfunc(arg)
def _cache_time(self, arg):
"""Cache timing results without repetition."""
if arg not in self._cache_good and arg not in self._cache_bad:
try:
result = self._timed_pfunc(arg)
except Exception as e:
warnings.warn(str(e), AstropyUserWarning)
self._cache_bad.append(arg)
else:
self._cache_good[arg] = result[0] # Run time
self._cache_out[arg] = result[1] # Function output
def time_func(self, arglist):
"""Time the partial function for a list of single args
and store run time in a cache. This forms a baseline for
the prediction.
This also stores function outputs in `results`.
Parameters
----------
arglist : list of numbers
List of input arguments to time.
"""
if not isinstance(arglist, Iterable):
arglist = [arglist]
# Preserve arglist order
for arg in arglist:
self._cache_time(arg)
# FUTURE: Implement N^x * O(log(N)) fancy fitting.
def do_fit(self, model=None, fitter=None, power=1, min_datapoints=3):
"""Fit a function to the lists of arguments and
their respective run time in the cache.
By default, this does a linear least-square fitting
to a straight line on run time w.r.t. argument values
raised to the given power, and returns the optimal
intercept and slope.
Parameters
----------
model : `astropy.modeling.Model`
Model for the expected trend of run time (Y-axis)
w.r.t. :math:`\\text{arg}^{\\text{power}}` (X-axis).
If `None`, will use `~astropy.modeling.polynomial.Polynomial1D`
with ``degree=1``.
fitter : `astropy.modeling.fitting.Fitter`
Fitter for the given model to extract optimal coefficient values.
If `None`, will use `~astropy.modeling.fitting.LinearLSQFitter`.
power : int, optional
Power of values to fit.
min_datapoints : int, optional
Minimum number of data points required for fitting.
They can be built up with `time_func`.
Returns
-------
a : array-like
Fitted `~astropy.modeling.FittableModel` parameters.
Raises
------
ValueError
Insufficient data points for fitting.
ModelsError
Invalid model or fitter.
"""
# Reset related attributes
self._power = power
self._cache_est = OrderedDict()
x_arr = np.array(list(self._cache_good.keys()))
if x_arr.size < min_datapoints:
raise ValueError('requires {0} points but has {1}'.format(
min_datapoints, x_arr.size))
if model is None:
model = modeling.models.Polynomial1D(1)
elif not isinstance(model, modeling.core.Model):
raise modeling.fitting.ModelsError(
'{0} is not a model.'.format(model))
if fitter is None:
fitter = modeling.fitting.LinearLSQFitter()
elif not isinstance(fitter, modeling.fitting.Fitter):
raise modeling.fitting.ModelsError(
'{0} is not a fitter.'.format(fitter))
self._fit_func = fitter(
model, x_arr**power, list(self._cache_good.values()))
return self._fit_func.parameters
def predict_time(self, arg):
"""Predict run time for given argument.
If prediction is already cached, cached value is returned.
Parameters
----------
arg : number
Input argument to predict run time for.
Returns
-------
t_est : float
Estimated run time for given argument.
Raises
------
RuntimeError
No fitted data for prediction.
"""
if arg in self._cache_est:
t_est = self._cache_est[arg]
else:
if self._fit_func is None:
raise RuntimeError('no fitted data for prediction')
t_est = self._fit_func(arg**self._power)
self._cache_est[arg] = t_est
return t_est
def plot(self, xscale='linear', yscale='linear', xlabeltext='args',
save_as=''): # pragma: no cover
"""Plot prediction.
.. note:: Uses `matplotlib <http://matplotlib.org/>`_.
Parameters
----------
xscale, yscale : {'linear', 'log', 'symlog'}
Scaling for `matplotlib.axes.Axes`.
xlabeltext : str, optional
Text for X-label.
save_as : str, optional
Save plot as given filename.
Raises
------
RuntimeError
Insufficient data for plotting.
"""
import matplotlib.pyplot as plt
# Actual data
x_arr = sorted(self._cache_good)
y_arr = np.array([self._cache_good[x] for x in x_arr])
if len(x_arr) <= 1:
raise RuntimeError('insufficient data for plotting')
# Auto-ranging
qmean = y_arr.mean() * u.second
for cur_u in (u.minute, u.second, u.millisecond, u.microsecond,
u.nanosecond):
val = qmean.to_value(cur_u)
if 1000 > val >= 1:
break
y_arr = (y_arr * u.second).to_value(cur_u)
fig, ax = plt.subplots()
ax.plot(x_arr, y_arr, 'kx-', label='Actual')
# Fitted data
if self._fit_func is not None:
x_est = list(self._cache_est.keys())
y_est = (np.array(list(self._cache_est.values())) *
u.second).to_value(cur_u)
ax.scatter(x_est, y_est, marker='o', c='r', label='Predicted')
x_fit = np.array(sorted(x_arr + x_est))
y_fit = (self._fit_func(x_fit**self._power) *
u.second).to_value(cur_u)
ax.plot(x_fit, y_fit, 'b--', label='Fit')
ax.set_xscale(xscale)
ax.set_yscale(yscale)
ax.set_xlabel(xlabeltext)
ax.set_ylabel('Run time ({})'.format(cur_u.to_string()))
ax.set_title(self._funcname)
ax.legend(loc='best', numpoints=1)
plt.draw()
if save_as:
plt.savefig(save_as)
|
04e1fc6c9195cb0a4f499f7d463271bd609b9ee30029b10e6f7bdf8f349a5e23 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains helper functions and classes for handling metadata.
"""
from functools import wraps
import warnings
from collections import OrderedDict
from collections.abc import Mapping
from copy import deepcopy
import numpy as np
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.misc import dtype_bytes_or_chars
__all__ = ['MergeConflictError', 'MergeConflictWarning', 'MERGE_STRATEGIES',
'common_dtype', 'MergePlus', 'MergeNpConcatenate', 'MergeStrategy',
'MergeStrategyMeta', 'enable_merge_strategies', 'merge', 'MetaData']
class MergeConflictError(TypeError):
pass
class MergeConflictWarning(AstropyWarning):
pass
MERGE_STRATEGIES = []
def common_dtype(arrs):
"""
Use numpy to find the common dtype for a list of ndarrays.
Only allow arrays within the following fundamental numpy data types:
``np.bool_``, ``np.object_``, ``np.number``, ``np.character``, ``np.void``
Parameters
----------
arrs : list of ndarray objects
Arrays for which to find the common dtype
Returns
-------
dtype_str : str
String representation of dytpe (dtype ``str`` attribute)
"""
def dtype(arr):
return getattr(arr, 'dtype', np.dtype('O'))
np_types = (np.bool_, np.object_, np.number, np.character, np.void)
uniq_types = set(tuple(issubclass(dtype(arr).type, np_type) for np_type in np_types)
for arr in arrs)
if len(uniq_types) > 1:
# Embed into the exception the actual list of incompatible types.
incompat_types = [dtype(arr).name for arr in arrs]
tme = MergeConflictError('Arrays have incompatible types {0}'
.format(incompat_types))
tme._incompat_types = incompat_types
raise tme
arrs = [np.empty(1, dtype=dtype(arr)) for arr in arrs]
# For string-type arrays need to explicitly fill in non-zero
# values or the final arr_common = .. step is unpredictable.
for i, arr in enumerate(arrs):
if arr.dtype.kind in ('S', 'U'):
arrs[i] = [(u'0' if arr.dtype.kind == 'U' else b'0') *
dtype_bytes_or_chars(arr.dtype)]
arr_common = np.array([arr[0] for arr in arrs])
return arr_common.dtype.str
class MergeStrategyMeta(type):
"""
Metaclass that registers MergeStrategy subclasses into the
MERGE_STRATEGIES registry.
"""
def __new__(mcls, name, bases, members):
cls = super().__new__(mcls, name, bases, members)
# Wrap ``merge`` classmethod to catch any exception and re-raise as
# MergeConflictError.
if 'merge' in members and isinstance(members['merge'], classmethod):
orig_merge = members['merge'].__func__
@wraps(orig_merge)
def merge(cls, left, right):
try:
return orig_merge(cls, left, right)
except Exception as err:
raise MergeConflictError(err)
cls.merge = classmethod(merge)
# Register merging class (except for base MergeStrategy class)
if 'types' in members:
types = members['types']
if isinstance(types, tuple):
types = [types]
for left, right in reversed(types):
MERGE_STRATEGIES.insert(0, (left, right, cls))
return cls
class MergeStrategy(metaclass=MergeStrategyMeta):
"""
Base class for defining a strategy for merging metadata from two
sources, left and right, into a single output.
The primary functionality for the class is the ``merge(cls, left, right)``
class method. This takes ``left`` and ``right`` side arguments and
returns a single merged output.
The first class attribute is ``types``. This is defined as a list of
(left_types, right_types) tuples that indicate for which input types the
merge strategy applies. In determining whether to apply this merge
strategy to a pair of (left, right) objects, a test is done:
``isinstance(left, left_types) and isinstance(right, right_types)``. For
example::
types = [(np.ndarray, np.ndarray), # Two ndarrays
(np.ndarray, (list, tuple)), # ndarray and (list or tuple)
((list, tuple), np.ndarray)] # (list or tuple) and ndarray
As a convenience, ``types`` can be defined as a single two-tuple instead of
a list of two-tuples, e.g. ``types = (np.ndarray, np.ndarray)``.
The other class attribute is ``enabled``, which defaults to ``False`` in
the base class. By defining a subclass of ``MergeStrategy`` the new merge
strategy is automatically registered to be available for use in
merging. However, by default the new merge strategy is *not enabled*. This
prevents inadvertently changing the behavior of unrelated code that is
performing metadata merge operations.
In most cases (particularly in library code that others might use) it is
recommended to leave custom strategies disabled and use the
`~astropy.utils.metadata.enable_merge_strategies` context manager to locally
enable the desired strategies. However, if one is confident that the
new strategy will not produce unexpected behavior, then one can globally
enable it by setting the ``enabled`` class attribute to ``True``.
Examples
--------
Here we define a custom merge strategy that takes an int or float on
the left and right sides and returns a list with the two values.
>>> from astropy.utils.metadata import MergeStrategy
>>> class MergeNumbersAsList(MergeStrategy):
... types = ((int, float), (int, float)) # (left_types, right_types)
...
... @classmethod
... def merge(cls, left, right):
... return [left, right]
"""
# Set ``enabled = True`` to globally enable applying this merge strategy.
# This is not generally recommended.
enabled = False
# types = [(left_types, right_types), ...]
class MergePlus(MergeStrategy):
"""
Merge ``left`` and ``right`` objects using the plus operator. This
merge strategy is globally enabled by default.
"""
types = [(list, list), (tuple, tuple)]
enabled = True
@classmethod
def merge(cls, left, right):
return left + right
class MergeNpConcatenate(MergeStrategy):
"""
Merge ``left`` and ``right`` objects using np.concatenate. This
merge strategy is globally enabled by default.
This will upcast a list or tuple to np.ndarray and the output is
always ndarray.
"""
types = [(np.ndarray, np.ndarray),
(np.ndarray, (list, tuple)),
((list, tuple), np.ndarray)]
enabled = True
@classmethod
def merge(cls, left, right):
left, right = np.asanyarray(left), np.asanyarray(right)
common_dtype([left, right]) # Ensure left and right have compatible dtype
return np.concatenate([left, right])
def _both_isinstance(left, right, cls):
return isinstance(left, cls) and isinstance(right, cls)
def _not_equal(left, right):
try:
return bool(left != right)
except Exception:
return True
class _EnableMergeStrategies:
def __init__(self, *merge_strategies):
self.merge_strategies = merge_strategies
self.orig_enabled = {}
for left_type, right_type, merge_strategy in MERGE_STRATEGIES:
if issubclass(merge_strategy, merge_strategies):
self.orig_enabled[merge_strategy] = merge_strategy.enabled
merge_strategy.enabled = True
def __enter__(self):
pass
def __exit__(self, type, value, tb):
for merge_strategy, enabled in self.orig_enabled.items():
merge_strategy.enabled = enabled
def enable_merge_strategies(*merge_strategies):
"""
Context manager to temporarily enable one or more custom metadata merge
strategies.
Examples
--------
Here we define a custom merge strategy that takes an int or float on
the left and right sides and returns a list with the two values.
>>> from astropy.utils.metadata import MergeStrategy
>>> class MergeNumbersAsList(MergeStrategy):
... types = ((int, float), # left side types
... (int, float)) # right side types
... @classmethod
... def merge(cls, left, right):
... return [left, right]
By defining this class the merge strategy is automatically registered to be
available for use in merging. However, by default new merge strategies are
*not enabled*. This prevents inadvertently changing the behavior of
unrelated code that is performing metadata merge operations.
In order to use the new merge strategy, use this context manager as in the
following example::
>>> from astropy.table import Table, vstack
>>> from astropy.utils.metadata import enable_merge_strategies
>>> t1 = Table([[1]], names=['a'])
>>> t2 = Table([[2]], names=['a'])
>>> t1.meta = {'m': 1}
>>> t2.meta = {'m': 2}
>>> with enable_merge_strategies(MergeNumbersAsList):
... t12 = vstack([t1, t2])
>>> t12.meta['m']
[1, 2]
One can supply further merge strategies as additional arguments to the
context manager.
As a convenience, the enabling operation is actually done by checking
whether the registered strategies are subclasses of the context manager
arguments. This means one can define a related set of merge strategies and
then enable them all at once by enabling the base class. As a trivial
example, *all* registered merge strategies can be enabled with::
>>> with enable_merge_strategies(MergeStrategy):
... t12 = vstack([t1, t2])
Parameters
----------
merge_strategies : one or more `~astropy.utils.metadata.MergeStrategy` args
Merge strategies that will be enabled.
"""
return _EnableMergeStrategies(*merge_strategies)
def _warn_str_func(key, left, right):
out = ('Cannot merge meta key {0!r} types {1!r}'
' and {2!r}, choosing {0}={3!r}'
.format(key, type(left), type(right), right))
return out
def _error_str_func(key, left, right):
out = ('Cannot merge meta key {0!r} '
'types {1!r} and {2!r}'
.format(key, type(left), type(right)))
return out
def merge(left, right, merge_func=None, metadata_conflicts='warn',
warn_str_func=_warn_str_func,
error_str_func=_error_str_func):
"""
Merge the ``left`` and ``right`` metadata objects.
This is a simplistic and limited implementation at this point.
"""
if not _both_isinstance(left, right, dict):
raise MergeConflictError('Can only merge two dict-based objects')
out = deepcopy(left)
for key, val in right.items():
# If no conflict then insert val into out dict and continue
if key not in out:
out[key] = deepcopy(val)
continue
# There is a conflict that must be resolved
if _both_isinstance(left[key], right[key], dict):
out[key] = merge(left[key], right[key], merge_func,
metadata_conflicts=metadata_conflicts)
else:
try:
if merge_func is None:
for left_type, right_type, merge_cls in MERGE_STRATEGIES:
if not merge_cls.enabled:
continue
if (isinstance(left[key], left_type) and
isinstance(right[key], right_type)):
out[key] = merge_cls.merge(left[key], right[key])
break
else:
raise MergeConflictError
else:
out[key] = merge_func(left[key], right[key])
except MergeConflictError:
# Pick the metadata item that is not None, or they are both not
# None, then if they are equal, there is no conflict, and if
# they are different, there is a conflict and we pick the one
# on the right (or raise an error).
if left[key] is None:
# This may not seem necessary since out[key] gets set to
# right[key], but not all objects support != which is
# needed for one of the if clauses.
out[key] = right[key]
elif right[key] is None:
out[key] = left[key]
elif _not_equal(left[key], right[key]):
if metadata_conflicts == 'warn':
warnings.warn(warn_str_func(key, left[key], right[key]),
MergeConflictWarning)
elif metadata_conflicts == 'error':
raise MergeConflictError(error_str_func(key, left[key], right[key]))
elif metadata_conflicts != 'silent':
raise ValueError('metadata_conflicts argument must be one '
'of "silent", "warn", or "error"')
out[key] = right[key]
else:
out[key] = right[key]
return out
class MetaData:
"""
A descriptor for classes that have a ``meta`` property.
This can be set to any valid `~collections.abc.Mapping`.
Parameters
----------
doc : `str`, optional
Documentation for the attribute of the class.
Default is ``""``.
.. versionadded:: 1.2
copy : `bool`, optional
If ``True`` the the value is deepcopied before setting, otherwise it
is saved as reference.
Default is ``True``.
.. versionadded:: 1.2
"""
def __init__(self, doc="", copy=True):
self.__doc__ = doc
self.copy = copy
def __get__(self, instance, owner):
if instance is None:
return self
if not hasattr(instance, '_meta'):
instance._meta = OrderedDict()
return instance._meta
def __set__(self, instance, value):
if value is None:
instance._meta = OrderedDict()
else:
if isinstance(value, Mapping):
if self.copy:
instance._meta = deepcopy(value)
else:
instance._meta = value
else:
raise TypeError("meta attribute must be dict-like")
|
e45e5647600b45571f62374d82744fe95243dfdd8892a66005cb60ceca49b3e7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains errors/exceptions and warnings of general use for
astropy. Exceptions that are specific to a given subpackage should *not* be
here, but rather in the particular subpackage. Exception is the _erfa module
as we rather have the users import those exceptions from here.
"""
class AstropyWarning(Warning):
"""
The base warning class from which all Astropy warnings should inherit.
Any warning inheriting from this class is handled by the Astropy logger.
"""
class AstropyUserWarning(UserWarning, AstropyWarning):
"""
The primary warning class for Astropy.
Use this if you do not need a specific sub-class.
"""
class AstropyDeprecationWarning(AstropyWarning):
"""
A warning class to indicate a deprecated feature.
"""
class AstropyPendingDeprecationWarning(PendingDeprecationWarning, AstropyWarning):
"""
A warning class to indicate a soon-to-be deprecated feature.
"""
class AstropyBackwardsIncompatibleChangeWarning(AstropyWarning):
"""
A warning class indicating a change in astropy that is incompatible
with previous versions.
The suggested procedure is to issue this warning for the version in
which the change occurs, and remove it for all following versions.
"""
class ErfaError(ValueError):
"""
A class for errors triggered by ERFA functions (status codes < 0)
Note: this class should *not* be referenced by fully-qualified name, because
it may move to ERFA in a future version. In a future such move it will
still be imported here as an alias, but the true namespace of the class may
change.
"""
class ErfaWarning(AstropyUserWarning):
"""
A class for warnings triggered by ERFA functions (status codes > 0)
Note: this class should *not* be referenced by fully-qualified name, because
it may move to ERFA in a future version. In a future such move it will
still be imported here as an alias, but the true namespace of the class may
change.
"""
class _NoValue:
"""Special keyword value.
This class may be used as the default value assigned to a
deprecated keyword in order to check if it has been given a user
defined value.
"""
def __repr__(self):
return 'astropy.utils.exceptions.NoValue'
NoValue = _NoValue()
|
4ea86e31f8f0d4ab248e88c0ee37f5c8969041df0711c16cea8d461550e3151a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from distutils.core import Extension
from os.path import dirname, join, relpath
ASTROPY_UTILS_ROOT = dirname(__file__)
def get_extensions():
return [
Extension('astropy.utils._compiler',
[relpath(join(ASTROPY_UTILS_ROOT, 'src', 'compiler.c'))])
]
|
e3bf2b11ed43e17da1b0b3251819db3f69e2fc476db1a4460af0fa5410cd4a6a | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""This module contains functions and methods that relate to the DataInfo class
which provides a container for informational attributes as well as summary info
methods.
A DataInfo object is attached to the Quantity, SkyCoord, and Time classes in
astropy. Here it allows those classes to be used in Tables and uniformly carry
table column attributes such as name, format, dtype, meta, and description.
"""
# Note: these functions and classes are tested extensively in astropy table
# tests via their use in providing mixin column info, and in
# astropy/tests/test_info for providing table and column info summary data.
import os
import re
import sys
import weakref
import warnings
from io import StringIO
from copy import deepcopy
from functools import partial
from collections import OrderedDict
from contextlib import contextmanager
import numpy as np
from . import metadata
__all__ = ['data_info_factory', 'dtype_info_name', 'BaseColumnInfo',
'DataInfo', 'MixinInfo', 'ParentDtypeInfo']
# Tuple of filterwarnings kwargs to ignore when calling info
IGNORE_WARNINGS = (dict(category=RuntimeWarning, message='All-NaN|'
'Mean of empty slice|Degrees of freedom <= 0|'
'invalid value encountered in sqrt'),)
STRING_TYPE_NAMES = {(False, 'S'): 'str', # not PY3
(False, 'U'): 'unicode',
(True, 'S'): 'bytes', # PY3
(True, 'U'): 'str'}
@contextmanager
def serialize_context_as(context):
"""Set context for serialization.
This will allow downstream code to understand the context in which a column
is being serialized. Objects like Time or SkyCoord will have different
default serialization representations depending on context.
Parameters
----------
context : str
Context name, e.g. 'fits', 'hdf5', 'ecsv', 'yaml'
"""
old_context = BaseColumnInfo._serialize_context
BaseColumnInfo._serialize_context = context
yield
BaseColumnInfo._serialize_context = old_context
def dtype_info_name(dtype):
"""Return a human-oriented string name of the ``dtype`` arg.
This can be use by astropy methods that present type information about
a data object.
The output is mostly equivalent to ``dtype.name`` which takes the form
<type_name>[B] where <type_name> is like ``int`` or ``bool`` and [B] is an
optional number of bits which gets included only for numeric types.
For bytes, string and unicode types, the output is shown below, where <N>
is the number of characters. This representation corresponds to the Python
type that matches the dtype::
Numpy S<N> U<N>
Python bytes<N> str<N>
Parameters
----------
dtype : str, np.dtype, type
Input dtype as an object that can be converted via np.dtype()
Returns
-------
dtype_info_name : str
String name of ``dtype``
"""
dtype = np.dtype(dtype)
if dtype.kind in ('S', 'U'):
length = re.search(r'(\d+)', dtype.str).group(1)
type_name = STRING_TYPE_NAMES[(True, dtype.kind)]
out = type_name + length
else:
out = dtype.name
return out
def data_info_factory(names, funcs):
"""
Factory to create a function that can be used as an ``option``
for outputting data object summary information.
Examples
--------
>>> from astropy.utils.data_info import data_info_factory
>>> from astropy.table import Column
>>> c = Column([4., 3., 2., 1.])
>>> mystats = data_info_factory(names=['min', 'median', 'max'],
... funcs=[np.min, np.median, np.max])
>>> c.info(option=mystats)
min = 1.0
median = 2.5
max = 4.0
n_bad = 0
length = 4
Parameters
----------
names : list
List of information attribute names
funcs : list
List of functions that compute the corresponding information attribute
Returns
-------
func : function
Function that can be used as a data info option
"""
def func(dat):
outs = []
for name, func in zip(names, funcs):
try:
if isinstance(func, str):
out = getattr(dat, func)()
else:
out = func(dat)
except Exception:
outs.append('--')
else:
outs.append(str(out))
return OrderedDict(zip(names, outs))
return func
def _get_obj_attrs_map(obj, attrs):
"""
Get the values for object ``attrs`` and return as a dict. This
ignores any attributes that are None and in Py2 converts any unicode
attribute names or values to str. In the context of serializing the
supported core astropy classes this conversion will succeed and results
in more succinct and less python-specific YAML.
"""
out = {}
for attr in attrs:
val = getattr(obj, attr, None)
if val is not None:
out[attr] = val
return out
def _get_data_attribute(dat, attr=None):
"""
Get a data object attribute for the ``attributes`` info summary method
"""
if attr == 'class':
val = type(dat).__name__
elif attr == 'dtype':
val = dtype_info_name(dat.info.dtype)
elif attr == 'shape':
datshape = dat.shape[1:]
val = datshape if datshape else ''
else:
val = getattr(dat.info, attr)
if val is None:
val = ''
return str(val)
class DataInfo:
"""
Descriptor that data classes use to add an ``info`` attribute for storing
data attributes in a uniform and portable way. Note that it *must* be
called ``info`` so that the DataInfo() object can be stored in the
``instance`` using the ``info`` key. Because owner_cls.x is a descriptor,
Python doesn't use __dict__['x'] normally, and the descriptor can safely
store stuff there. Thanks to http://nbviewer.ipython.org/urls/gist.github.com/ChrisBeaumont/5758381/raw/descriptor_writeup.ipynb
for this trick that works for non-hashable classes.
Parameters
----------
bound : bool
If True this is a descriptor attribute in a class definition, else it
is a DataInfo() object that is bound to a data object instance. Default is False.
"""
_stats = ['mean', 'std', 'min', 'max']
attrs_from_parent = set()
attr_names = set(['name', 'unit', 'dtype', 'format', 'description', 'meta'])
_attrs_no_copy = set()
_info_summary_attrs = ('dtype', 'shape', 'unit', 'format', 'description', 'class')
_parent_ref = None
# This specifies the list of object attributes which must be stored in
# order to re-create the object after serialization. This is independent
# of normal `info` attributes like name or description. Subclasses will
# generally either define this statically (QuantityInfo) or dynamically
# (SkyCoordInfo). These attributes may be scalars or arrays. If arrays
# that match the object length they will be serialized as an independent
# column.
_represent_as_dict_attrs = ()
# This specifies attributes which are to be provided to the class
# initializer as ordered args instead of keyword args. This is needed
# for Quantity subclasses where the keyword for data varies (e.g.
# between Quantity and Angle).
_construct_from_dict_args = ()
# This specifies the name of an attribute which is the "primary" data.
# Then when representing as columns
# (table.serialize._represent_mixin_as_column) the output for this
# attribute will be written with the just name of the mixin instead of the
# usual "<name>.<attr>".
_represent_as_dict_primary_data = None
def __init__(self, bound=False):
# If bound to a data object instance then create the dict of attributes
# which stores the info attribute values.
if bound:
self._attrs = dict((attr, None) for attr in self.attr_names)
@property
def _parent(self):
if self._parent_ref is None:
return None
else:
parent = self._parent_ref()
if parent is not None:
return parent
else:
raise AttributeError("""\
failed access "info" attribute on a temporary object.
It looks like you have done something like ``col[3:5].info``, i.e.
you accessed ``info`` from a temporary slice object ``col[3:5]`` that
only exists momentarily. This has failed because the reference to
that temporary object is now lost. Instead force a permanent
reference with ``c = col[3:5]`` followed by ``c.info``.""")
@_parent.setter
def _parent(self, value):
if value is None:
self._parent_ref = None
else:
self._parent_ref = weakref.ref(value)
def __get__(self, instance, owner_cls):
if instance is None:
# This is an unbound descriptor on the class
info = self
info._parent_cls = owner_cls
else:
info = instance.__dict__.get('info')
if info is None:
info = instance.__dict__['info'] = self.__class__(bound=True)
info._parent = instance
return info
def __set__(self, instance, value):
if instance is None:
# This is an unbound descriptor on the class
raise ValueError('cannot set unbound descriptor')
if isinstance(value, DataInfo):
info = instance.__dict__['info'] = self.__class__(bound=True)
for attr in info.attr_names - info.attrs_from_parent - info._attrs_no_copy:
info._attrs[attr] = deepcopy(getattr(value, attr))
else:
raise TypeError('info must be set with a DataInfo instance')
def __getstate__(self):
return self._attrs
def __setstate__(self, state):
self._attrs = state
def __getattr__(self, attr):
if attr.startswith('_'):
return super().__getattribute__(attr)
if attr in self.attrs_from_parent:
return getattr(self._parent, attr)
try:
value = self._attrs[attr]
except KeyError:
super().__getattribute__(attr) # Generate AttributeError
# Weak ref for parent table
if attr == 'parent_table' and callable(value):
value = value()
# Mixins have a default dtype of Object if nothing else was set
if attr == 'dtype' and value is None:
value = np.dtype('O')
return value
def __setattr__(self, attr, value):
propobj = getattr(self.__class__, attr, None)
# If attribute is taken from parent properties and there is not a
# class property (getter/setter) for this attribute then set
# attribute directly in parent.
if attr in self.attrs_from_parent and not isinstance(propobj, property):
setattr(self._parent, attr, value)
return
# Check if there is a property setter and use it if possible.
if isinstance(propobj, property):
if propobj.fset is None:
raise AttributeError("can't set attribute")
propobj.fset(self, value)
return
# Private attr names get directly set
if attr.startswith('_'):
super().__setattr__(attr, value)
return
# Finally this must be an actual data attribute that this class is handling.
if attr not in self.attr_names:
raise AttributeError("attribute must be one of {0}".format(self.attr_names))
if attr == 'parent_table':
value = None if value is None else weakref.ref(value)
self._attrs[attr] = value
def _represent_as_dict(self):
"""Get the values for the parent ``attrs`` and return as a dict."""
return _get_obj_attrs_map(self._parent, self._represent_as_dict_attrs)
def _construct_from_dict(self, map):
args = [map.pop(attr) for attr in self._construct_from_dict_args]
return self._parent_cls(*args, **map)
info_summary_attributes = staticmethod(
data_info_factory(names=_info_summary_attrs,
funcs=[partial(_get_data_attribute, attr=attr)
for attr in _info_summary_attrs]))
# No nan* methods in numpy < 1.8
info_summary_stats = staticmethod(
data_info_factory(names=_stats,
funcs=[getattr(np, 'nan' + stat)
for stat in _stats]))
def __call__(self, option='attributes', out=''):
"""
Write summary information about data object to the ``out`` filehandle.
By default this prints to standard output via sys.stdout.
The ``option`` argument specifies what type of information
to include. This can be a string, a function, or a list of
strings or functions. Built-in options are:
- ``attributes``: data object attributes like ``dtype`` and ``format``
- ``stats``: basic statistics: min, mean, and max
If a function is specified then that function will be called with the
data object as its single argument. The function must return an
OrderedDict containing the information attributes.
If a list is provided then the information attributes will be
appended for each of the options, in order.
Examples
--------
>>> from astropy.table import Column
>>> c = Column([1, 2], unit='m', dtype='int32')
>>> c.info()
dtype = int32
unit = m
class = Column
n_bad = 0
length = 2
>>> c.info(['attributes', 'stats'])
dtype = int32
unit = m
class = Column
mean = 1.5
std = 0.5
min = 1
max = 2
n_bad = 0
length = 2
Parameters
----------
option : str, function, list of (str or function)
Info option, defaults to 'attributes'.
out : file-like object, None
Output destination, defaults to sys.stdout. If None then the
OrderedDict with information attributes is returned
Returns
-------
info : OrderedDict if out==None else None
"""
if out == '':
out = sys.stdout
dat = self._parent
info = OrderedDict()
name = dat.info.name
if name is not None:
info['name'] = name
options = option if isinstance(option, (list, tuple)) else [option]
for option in options:
if isinstance(option, str):
if hasattr(self, 'info_summary_' + option):
option = getattr(self, 'info_summary_' + option)
else:
raise ValueError('option={0} is not an allowed information type'
.format(option))
with warnings.catch_warnings():
for ignore_kwargs in IGNORE_WARNINGS:
warnings.filterwarnings('ignore', **ignore_kwargs)
info.update(option(dat))
if hasattr(dat, 'mask'):
n_bad = np.count_nonzero(dat.mask)
else:
try:
n_bad = np.count_nonzero(np.isinf(dat) | np.isnan(dat))
except Exception:
n_bad = 0
info['n_bad'] = n_bad
try:
info['length'] = len(dat)
except TypeError:
pass
if out is None:
return info
for key, val in info.items():
if val != '':
out.write('{0} = {1}'.format(key, val) + os.linesep)
def __repr__(self):
if self._parent is None:
return super().__repr__()
out = StringIO()
self.__call__(out=out)
return out.getvalue()
class BaseColumnInfo(DataInfo):
"""
Base info class for anything that can be a column in an astropy
Table. There are at least two classes that inherit from this:
ColumnInfo: for native astropy Column / MaskedColumn objects
MixinInfo: for mixin column objects
Note that this class is defined here so that mixins can use it
without importing the table package.
"""
attr_names = DataInfo.attr_names.union(['parent_table', 'indices'])
_attrs_no_copy = set(['parent_table'])
# Context for serialization. This can be set temporarily via
# ``serialize_context_as(context)`` context manager to allow downstream
# code to understand the context in which a column is being serialized.
# Typical values are 'fits', 'hdf5', 'ecsv', 'yaml'. Objects like Time or
# SkyCoord will have different default serialization representations
# depending on context.
_serialize_context = None
def __init__(self, bound=False):
super().__init__(bound=bound)
# If bound to a data object instance then add a _format_funcs dict
# for caching functions for print formatting.
if bound:
self._format_funcs = {}
def iter_str_vals(self):
"""
This is a mixin-safe version of Column.iter_str_vals.
"""
col = self._parent
if self.parent_table is None:
from astropy.table.column import FORMATTER as formatter
else:
formatter = self.parent_table.formatter
_pformat_col_iter = formatter._pformat_col_iter
for str_val in _pformat_col_iter(col, -1, False, False, {}):
yield str_val
def adjust_indices(self, index, value, col_len):
'''
Adjust info indices after column modification.
Parameters
----------
index : slice, int, list, or ndarray
Element(s) of column to modify. This parameter can
be a single row number, a list of row numbers, an
ndarray of row numbers, a boolean ndarray (a mask),
or a column slice.
value : int, list, or ndarray
New value(s) to insert
col_len : int
Length of the column
'''
if not self.indices:
return
if isinstance(index, slice):
# run through each key in slice
t = index.indices(col_len)
keys = list(range(*t))
elif isinstance(index, np.ndarray) and index.dtype.kind == 'b':
# boolean mask
keys = np.where(index)[0]
else: # single int
keys = [index]
value = np.atleast_1d(value) # turn array(x) into array([x])
if value.size == 1:
# repeat single value
value = list(value) * len(keys)
for key, val in zip(keys, value):
for col_index in self.indices:
col_index.replace(key, self.name, val)
def slice_indices(self, col_slice, item, col_len):
'''
Given a sliced object, modify its indices
to correctly represent the slice.
Parameters
----------
col_slice : Column or mixin
Sliced object
item : slice, list, or ndarray
Slice used to create col_slice
col_len : int
Length of original object
'''
from astropy.table.sorted_array import SortedArray
if not getattr(self, '_copy_indices', True):
# Necessary because MaskedArray will perform a shallow copy
col_slice.info.indices = []
return col_slice
elif isinstance(item, slice):
col_slice.info.indices = [x[item] for x in self.indices]
elif self.indices:
if isinstance(item, np.ndarray) and item.dtype.kind == 'b':
# boolean mask
item = np.where(item)[0]
threshold = 0.6
# Empirical testing suggests that recreating a BST/RBT index is
# more effective than relabelling when less than ~60% of
# the total number of rows are involved, and is in general
# more effective for SortedArray.
small = len(item) <= 0.6 * col_len
col_slice.info.indices = []
for index in self.indices:
if small or isinstance(index, SortedArray):
new_index = index.get_slice(col_slice, item)
else:
new_index = deepcopy(index)
new_index.replace_rows(item)
col_slice.info.indices.append(new_index)
return col_slice
@staticmethod
def merge_cols_attributes(cols, metadata_conflicts, name, attrs):
"""
Utility method to merge and validate the attributes ``attrs`` for the
input table columns ``cols``.
Note that ``dtype`` and ``shape`` attributes are handled specially.
These should not be passed in ``attrs`` but will always be in the
returned dict of merged attributes.
Parameters
----------
cols : list
List of input Table column objects
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
attrs : list
List of attribute names to be merged
Returns
-------
attrs : dict of merged attributes
"""
from astropy.table.np_utils import TableMergeError
def warn_str_func(key, left, right):
out = ("In merged column '{}' the '{}' attribute does not match "
"({} != {}). Using {} for merged output"
.format(name, key, left, right, right))
return out
def getattrs(col):
return {attr: getattr(col.info, attr) for attr in attrs
if getattr(col.info, attr, None) is not None}
out = getattrs(cols[0])
for col in cols[1:]:
out = metadata.merge(out, getattrs(col), metadata_conflicts=metadata_conflicts,
warn_str_func=warn_str_func)
# Output dtype is the superset of all dtypes in in_cols
out['dtype'] = metadata.common_dtype(cols)
# Make sure all input shapes are the same
uniq_shapes = set(col.shape[1:] for col in cols)
if len(uniq_shapes) != 1:
raise TableMergeError('columns have different shapes')
out['shape'] = uniq_shapes.pop()
return out
class MixinInfo(BaseColumnInfo):
def __setattr__(self, attr, value):
# For mixin columns that live within a table, rename the column in the
# table when setting the name attribute. This mirrors the same
# functionality in the BaseColumn class.
if attr == 'name' and self.parent_table is not None:
from astropy.table.np_utils import fix_column_name
new_name = fix_column_name(value) # Ensure col name is numpy compatible
self.parent_table.columns._rename_column(self.name, new_name)
super().__setattr__(attr, value)
class ParentDtypeInfo(MixinInfo):
"""Mixin that gets info.dtype from parent"""
attrs_from_parent = set(['dtype']) # dtype and unit taken from parent
|
221dcbc89e52de33fc1b9f44a3fbe461874671026df97354d762b10f0165ee8c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
""" This module contains helper functions for accessing, downloading, and
caching data files.
"""
import atexit
import contextlib
import fnmatch
import hashlib
import os
import io
import pathlib
import shutil
import socket
import sys
import time
import urllib.request
import urllib.error
import urllib.parse
import shelve
from tempfile import NamedTemporaryFile, gettempdir
from warnings import warn
from astropy import config as _config
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.introspection import find_current_module, resolve_name
__all__ = [
'Conf', 'conf', 'get_readable_fileobj', 'get_file_contents',
'get_pkg_data_fileobj', 'get_pkg_data_filename',
'get_pkg_data_contents', 'get_pkg_data_fileobjs',
'get_pkg_data_filenames', 'compute_hash', 'clear_download_cache',
'CacheMissingWarning', 'get_free_space_in_dir',
'check_free_space_in_dir', 'download_file',
'download_files_in_parallel', 'is_url_in_cache', 'get_cached_urls']
_dataurls_to_alias = {}
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.utils.data`.
"""
dataurl = _config.ConfigItem(
'http://data.astropy.org/',
'Primary URL for astropy remote data site.')
dataurl_mirror = _config.ConfigItem(
'http://www.astropy.org/astropy-data/',
'Mirror URL for astropy remote data site.')
remote_timeout = _config.ConfigItem(
10.,
'Time to wait for remote data queries (in seconds).',
aliases=['astropy.coordinates.name_resolve.name_resolve_timeout'])
compute_hash_block_size = _config.ConfigItem(
2 ** 16, # 64K
'Block size for computing MD5 file hashes.')
download_block_size = _config.ConfigItem(
2 ** 16, # 64K
'Number of bytes of remote data to download per step.')
download_cache_lock_attempts = _config.ConfigItem(
5,
'Number of times to try to get the lock ' +
'while accessing the data cache before giving up.')
delete_temporary_downloads_at_exit = _config.ConfigItem(
True,
'If True, temporary download files created when the cache is '
'inaccessible will be deleted at the end of the python session.')
conf = Conf()
class CacheMissingWarning(AstropyWarning):
"""
This warning indicates the standard cache directory is not accessible, with
the first argument providing the warning message. If args[1] is present, it
is a filename indicating the path to a temporary file that was created to
store a remote data download in the absence of the cache.
"""
def _is_url(string):
"""
Test whether a string is a valid URL
Parameters
----------
string : str
The string to test
"""
url = urllib.parse.urlparse(string)
# we can't just check that url.scheme is not an empty string, because
# file paths in windows would return a non-empty scheme (e.g. e:\\
# returns 'e').
return url.scheme.lower() in ['http', 'https', 'ftp', 'sftp', 'ssh', 'file']
def _is_inside(path, parent_path):
# We have to try realpath too to avoid issues with symlinks, but we leave
# abspath because some systems like debian have the absolute path (with no
# symlinks followed) match, but the real directories in different
# locations, so need to try both cases.
return os.path.abspath(path).startswith(os.path.abspath(parent_path)) \
or os.path.realpath(path).startswith(os.path.realpath(parent_path))
@contextlib.contextmanager
def get_readable_fileobj(name_or_obj, encoding=None, cache=False,
show_progress=True, remote_timeout=None):
"""
Given a filename, pathlib.Path object or a readable file-like object, return a context
manager that yields a readable file-like object.
This supports passing filenames, URLs, and readable file-like objects,
any of which can be compressed in gzip, bzip2 or lzma (xz) if the
appropriate compression libraries are provided by the Python installation.
Notes
-----
This function is a context manager, and should be used for example
as::
with get_readable_fileobj('file.dat') as f:
contents = f.read()
Parameters
----------
name_or_obj : str or file-like object
The filename of the file to access (if given as a string), or
the file-like object to access.
If a file-like object, it must be opened in binary mode.
encoding : str, optional
When `None` (default), returns a file-like object with a
``read`` method that returns `str` (``unicode``) objects, using
`locale.getpreferredencoding` as an encoding. This matches
the default behavior of the built-in `open` when no ``mode``
argument is provided.
When ``'binary'``, returns a file-like object where its ``read``
method returns `bytes` objects.
When another string, it is the name of an encoding, and the
file-like object's ``read`` method will return `str` (``unicode``)
objects, decoded from binary using the given encoding.
cache : bool, optional
Whether to cache the contents of remote URLs.
show_progress : bool, optional
Whether to display a progress bar if the file is downloaded
from a remote server. Default is `True`.
remote_timeout : float
Timeout for remote requests in seconds (default is the configurable
`astropy.utils.data.Conf.remote_timeout`, which is 3s by default)
Returns
-------
file : readable file-like object
"""
# close_fds is a list of file handles created by this function
# that need to be closed. We don't want to always just close the
# returned file handle, because it may simply be the file handle
# passed in. In that case it is not the responsibility of this
# function to close it: doing so could result in a "double close"
# and an "invalid file descriptor" exception.
PATH_TYPES = (str, pathlib.Path)
close_fds = []
delete_fds = []
if remote_timeout is None:
# use configfile default
remote_timeout = conf.remote_timeout
# Get a file object to the content
if isinstance(name_or_obj, PATH_TYPES):
# name_or_obj could be a Path object if pathlib is available
name_or_obj = str(name_or_obj)
is_url = _is_url(name_or_obj)
if is_url:
name_or_obj = download_file(
name_or_obj, cache=cache, show_progress=show_progress,
timeout=remote_timeout)
fileobj = io.FileIO(name_or_obj, 'r')
if is_url and not cache:
delete_fds.append(fileobj)
close_fds.append(fileobj)
else:
fileobj = name_or_obj
# Check if the file object supports random access, and if not,
# then wrap it in a BytesIO buffer. It would be nicer to use a
# BufferedReader to avoid reading loading the whole file first,
# but that is not compatible with streams or urllib2.urlopen
# objects on Python 2.x.
if not hasattr(fileobj, 'seek'):
fileobj = io.BytesIO(fileobj.read())
# Now read enough bytes to look at signature
signature = fileobj.read(4)
fileobj.seek(0)
if signature[:3] == b'\x1f\x8b\x08': # gzip
import struct
try:
import gzip
fileobj_new = gzip.GzipFile(fileobj=fileobj, mode='rb')
fileobj_new.read(1) # need to check that the file is really gzip
except (OSError, EOFError, struct.error): # invalid gzip file
fileobj.seek(0)
fileobj_new.close()
else:
fileobj_new.seek(0)
fileobj = fileobj_new
elif signature[:3] == b'BZh': # bzip2
try:
import bz2
except ImportError:
for fd in close_fds:
fd.close()
raise ValueError(
".bz2 format files are not supported since the Python "
"interpreter does not include the bz2 module")
try:
# bz2.BZ2File does not support file objects, only filenames, so we
# need to write the data to a temporary file
with NamedTemporaryFile("wb", delete=False) as tmp:
tmp.write(fileobj.read())
tmp.close()
fileobj_new = bz2.BZ2File(tmp.name, mode='rb')
fileobj_new.read(1) # need to check that the file is really bzip2
except OSError: # invalid bzip2 file
fileobj.seek(0)
fileobj_new.close()
# raise
else:
fileobj_new.seek(0)
close_fds.append(fileobj_new)
fileobj = fileobj_new
elif signature[:3] == b'\xfd7z': # xz
try:
import lzma
fileobj_new = lzma.LZMAFile(fileobj, mode='rb')
fileobj_new.read(1) # need to check that the file is really xz
except ImportError:
for fd in close_fds:
fd.close()
raise ValueError(
".xz format files are not supported since the Python "
"interpreter does not include the lzma module.")
except (OSError, EOFError) as e: # invalid xz file
fileobj.seek(0)
fileobj_new.close()
# should we propagate this to the caller to signal bad content?
# raise ValueError(e)
else:
fileobj_new.seek(0)
fileobj = fileobj_new
# By this point, we have a file, io.FileIO, gzip.GzipFile, bz2.BZ2File
# or lzma.LZMAFile instance opened in binary mode (that is, read
# returns bytes). Now we need to, if requested, wrap it in a
# io.TextIOWrapper so read will return unicode based on the
# encoding parameter.
needs_textio_wrapper = encoding != 'binary'
if needs_textio_wrapper:
# A bz2.BZ2File can not be wrapped by a TextIOWrapper,
# so we decompress it to a temporary file and then
# return a handle to that.
try:
import bz2
except ImportError:
pass
else:
if isinstance(fileobj, bz2.BZ2File):
tmp = NamedTemporaryFile("wb", delete=False)
data = fileobj.read()
tmp.write(data)
tmp.close()
delete_fds.append(tmp)
fileobj = io.FileIO(tmp.name, 'r')
close_fds.append(fileobj)
fileobj = io.BufferedReader(fileobj)
fileobj = io.TextIOWrapper(fileobj, encoding=encoding)
# Ensure that file is at the start - io.FileIO will for
# example not always be at the start:
# >>> import io
# >>> f = open('test.fits', 'rb')
# >>> f.read(4)
# 'SIMP'
# >>> f.seek(0)
# >>> fileobj = io.FileIO(f.fileno())
# >>> fileobj.tell()
# 4096L
fileobj.seek(0)
try:
yield fileobj
finally:
for fd in close_fds:
fd.close()
for fd in delete_fds:
os.remove(fd.name)
def get_file_contents(*args, **kwargs):
"""
Retrieves the contents of a filename or file-like object.
See the `get_readable_fileobj` docstring for details on parameters.
Returns
-------
content
The content of the file (as requested by ``encoding``).
"""
with get_readable_fileobj(*args, **kwargs) as f:
return f.read()
@contextlib.contextmanager
def get_pkg_data_fileobj(data_name, package=None, encoding=None, cache=True):
"""
Retrieves a data file from the standard locations for the package and
provides the file as a file-like object that reads bytes.
Parameters
----------
data_name : str
Name/location of the desired data file. One of the following:
* The name of a data file included in the source
distribution. The path is relative to the module
calling this function. For example, if calling from
``astropy.pkname``, use ``'data/file.dat'`` to get the
file in ``astropy/pkgname/data/file.dat``. Double-dots
can be used to go up a level. In the same example, use
``'../data/file.dat'`` to get ``astropy/data/file.dat``.
* If a matching local file does not exist, the Astropy
data server will be queried for the file.
* A hash like that produced by `compute_hash` can be
requested, prefixed by 'hash/'
e.g. 'hash/34c33b3eb0d56eb9462003af249eff28'. The hash
will first be searched for locally, and if not found,
the Astropy data server will be queried.
package : str, optional
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
encoding : str, optional
When `None` (default), returns a file-like object with a
``read`` method returns `str` (``unicode``) objects, using
`locale.getpreferredencoding` as an encoding. This matches
the default behavior of the built-in `open` when no ``mode``
argument is provided.
When ``'binary'``, returns a file-like object where its ``read``
method returns `bytes` objects.
When another string, it is the name of an encoding, and the
file-like object's ``read`` method will return `str` (``unicode``)
objects, decoded from binary using the given encoding.
cache : bool
If True, the file will be downloaded and saved locally or the
already-cached local copy will be accessed. If False, the
file-like object will directly access the resource (e.g. if a
remote URL is accessed, an object like that from
`urllib.request.urlopen` is returned).
Returns
-------
fileobj : file-like
An object with the contents of the data file available via
``read`` function. Can be used as part of a ``with`` statement,
automatically closing itself after the ``with`` block.
Raises
------
urllib2.URLError, urllib.error.URLError
If a remote file cannot be found.
OSError
If problems occur writing or reading a local file.
Examples
--------
This will retrieve a data file and its contents for the `astropy.wcs`
tests::
>>> from astropy.utils.data import get_pkg_data_fileobj
>>> with get_pkg_data_fileobj('data/3d_cd.hdr',
... package='astropy.wcs.tests') as fobj:
... fcontents = fobj.read()
...
This next example would download a data file from the astropy data server
because the ``allsky/allsky_rosat.fits`` file is not present in the
source distribution. It will also save the file locally so the
next time it is accessed it won't need to be downloaded.::
>>> from astropy.utils.data import get_pkg_data_fileobj
>>> with get_pkg_data_fileobj('allsky/allsky_rosat.fits',
... encoding='binary') as fobj: # doctest: +REMOTE_DATA +IGNORE_OUTPUT
... fcontents = fobj.read()
...
Downloading http://data.astropy.org/allsky/allsky_rosat.fits [Done]
This does the same thing but does *not* cache it locally::
>>> with get_pkg_data_fileobj('allsky/allsky_rosat.fits',
... encoding='binary', cache=False) as fobj: # doctest: +REMOTE_DATA +IGNORE_OUTPUT
... fcontents = fobj.read()
...
Downloading http://data.astropy.org/allsky/allsky_rosat.fits [Done]
See Also
--------
get_pkg_data_contents : returns the contents of a file or url as a bytes object
get_pkg_data_filename : returns a local name for a file containing the data
"""
datafn = _find_pkg_data_path(data_name, package=package)
if os.path.isdir(datafn):
raise OSError("Tried to access a data file that's actually "
"a package data directory")
elif os.path.isfile(datafn): # local file
with get_readable_fileobj(datafn, encoding=encoding) as fileobj:
yield fileobj
else: # remote file
all_urls = (conf.dataurl, conf.dataurl_mirror)
for url in all_urls:
try:
with get_readable_fileobj(url + data_name, encoding=encoding,
cache=cache) as fileobj:
# We read a byte to trigger any URLErrors
fileobj.read(1)
fileobj.seek(0)
yield fileobj
break
except urllib.error.URLError:
pass
else:
urls = '\n'.join(' - {0}'.format(url) for url in all_urls)
raise urllib.error.URLError("Failed to download {0} from the following "
"repositories:\n\n{1}".format(data_name, urls))
def get_pkg_data_filename(data_name, package=None, show_progress=True,
remote_timeout=None):
"""
Retrieves a data file from the standard locations for the package and
provides a local filename for the data.
This function is similar to `get_pkg_data_fileobj` but returns the
file *name* instead of a readable file-like object. This means
that this function must always cache remote files locally, unlike
`get_pkg_data_fileobj`.
Parameters
----------
data_name : str
Name/location of the desired data file. One of the following:
* The name of a data file included in the source
distribution. The path is relative to the module
calling this function. For example, if calling from
``astropy.pkname``, use ``'data/file.dat'`` to get the
file in ``astropy/pkgname/data/file.dat``. Double-dots
can be used to go up a level. In the same example, use
``'../data/file.dat'`` to get ``astropy/data/file.dat``.
* If a matching local file does not exist, the Astropy
data server will be queried for the file.
* A hash like that produced by `compute_hash` can be
requested, prefixed by 'hash/'
e.g. 'hash/34c33b3eb0d56eb9462003af249eff28'. The hash
will first be searched for locally, and if not found,
the Astropy data server will be queried.
package : str, optional
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
show_progress : bool, optional
Whether to display a progress bar if the file is downloaded
from a remote server. Default is `True`.
remote_timeout : float
Timeout for the requests in seconds (default is the
configurable `astropy.utils.data.Conf.remote_timeout`, which
is 3s by default)
Raises
------
urllib2.URLError, urllib.error.URLError
If a remote file cannot be found.
OSError
If problems occur writing or reading a local file.
Returns
-------
filename : str
A file path on the local file system corresponding to the data
requested in ``data_name``.
Examples
--------
This will retrieve the contents of the data file for the `astropy.wcs`
tests::
>>> from astropy.utils.data import get_pkg_data_filename
>>> fn = get_pkg_data_filename('data/3d_cd.hdr',
... package='astropy.wcs.tests')
>>> with open(fn) as f:
... fcontents = f.read()
...
This retrieves a data file by hash either locally or from the astropy data
server::
>>> from astropy.utils.data import get_pkg_data_filename
>>> fn = get_pkg_data_filename('hash/34c33b3eb0d56eb9462003af249eff28') # doctest: +SKIP
>>> with open(fn) as f:
... fcontents = f.read()
...
See Also
--------
get_pkg_data_contents : returns the contents of a file or url as a bytes object
get_pkg_data_fileobj : returns a file-like object with the data
"""
if remote_timeout is None:
# use configfile default
remote_timeout = conf.remote_timeout
if data_name.startswith('hash/'):
# first try looking for a local version if a hash is specified
hashfn = _find_hash_fn(data_name[5:])
if hashfn is None:
all_urls = (conf.dataurl, conf.dataurl_mirror)
for url in all_urls:
try:
return download_file(url + data_name, cache=True,
show_progress=show_progress,
timeout=remote_timeout)
except urllib.error.URLError:
pass
urls = '\n'.join(' - {0}'.format(url) for url in all_urls)
raise urllib.error.URLError("Failed to download {0} from the following "
"repositories:\n\n{1}\n\n".format(data_name, urls))
else:
return hashfn
else:
fs_path = os.path.normpath(data_name)
datafn = _find_pkg_data_path(fs_path, package=package)
if os.path.isdir(datafn):
raise OSError("Tried to access a data file that's actually "
"a package data directory")
elif os.path.isfile(datafn): # local file
return datafn
else: # remote file
all_urls = (conf.dataurl, conf.dataurl_mirror)
for url in all_urls:
try:
return download_file(url + data_name, cache=True,
show_progress=show_progress,
timeout=remote_timeout)
except urllib.error.URLError:
pass
urls = '\n'.join(' - {0}'.format(url) for url in all_urls)
raise urllib.error.URLError("Failed to download {0} from the following "
"repositories:\n\n{1}".format(data_name, urls))
def get_pkg_data_contents(data_name, package=None, encoding=None, cache=True):
"""
Retrieves a data file from the standard locations and returns its
contents as a bytes object.
Parameters
----------
data_name : str
Name/location of the desired data file. One of the following:
* The name of a data file included in the source
distribution. The path is relative to the module
calling this function. For example, if calling from
``astropy.pkname``, use ``'data/file.dat'`` to get the
file in ``astropy/pkgname/data/file.dat``. Double-dots
can be used to go up a level. In the same example, use
``'../data/file.dat'`` to get ``astropy/data/file.dat``.
* If a matching local file does not exist, the Astropy
data server will be queried for the file.
* A hash like that produced by `compute_hash` can be
requested, prefixed by 'hash/'
e.g. 'hash/34c33b3eb0d56eb9462003af249eff28'. The hash
will first be searched for locally, and if not found,
the Astropy data server will be queried.
* A URL to some other file.
package : str, optional
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
encoding : str, optional
When `None` (default), returns a file-like object with a
``read`` method that returns `str` (``unicode``) objects, using
`locale.getpreferredencoding` as an encoding. This matches
the default behavior of the built-in `open` when no ``mode``
argument is provided.
When ``'binary'``, returns a file-like object where its ``read``
method returns `bytes` objects.
When another string, it is the name of an encoding, and the
file-like object's ``read`` method will return `str` (``unicode``)
objects, decoded from binary using the given encoding.
cache : bool
If True, the file will be downloaded and saved locally or the
already-cached local copy will be accessed. If False, the
file-like object will directly access the resource (e.g. if a
remote URL is accessed, an object like that from
`urllib.request.urlopen` is returned).
Returns
-------
contents : bytes
The complete contents of the file as a bytes object.
Raises
------
urllib2.URLError, urllib.error.URLError
If a remote file cannot be found.
OSError
If problems occur writing or reading a local file.
See Also
--------
get_pkg_data_fileobj : returns a file-like object with the data
get_pkg_data_filename : returns a local name for a file containing the data
"""
with get_pkg_data_fileobj(data_name, package=package, encoding=encoding,
cache=cache) as fd:
contents = fd.read()
return contents
def get_pkg_data_filenames(datadir, package=None, pattern='*'):
"""
Returns the path of all of the data files in a given directory
that match a given glob pattern.
Parameters
----------
datadir : str
Name/location of the desired data files. One of the following:
* The name of a directory included in the source
distribution. The path is relative to the module
calling this function. For example, if calling from
``astropy.pkname``, use ``'data'`` to get the
files in ``astropy/pkgname/data``.
* Remote URLs are not currently supported.
package : str, optional
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
pattern : str, optional
A UNIX-style filename glob pattern to match files. See the
`glob` module in the standard library for more information.
By default, matches all files.
Returns
-------
filenames : iterator of str
Paths on the local filesystem in *datadir* matching *pattern*.
Examples
--------
This will retrieve the contents of the data file for the `astropy.wcs`
tests::
>>> from astropy.utils.data import get_pkg_data_filenames
>>> for fn in get_pkg_data_filenames('data/maps', 'astropy.wcs.tests',
... '*.hdr'):
... with open(fn) as f:
... fcontents = f.read()
...
"""
path = _find_pkg_data_path(datadir, package=package)
if os.path.isfile(path):
raise OSError(
"Tried to access a data directory that's actually "
"a package data file")
elif os.path.isdir(path):
for filename in os.listdir(path):
if fnmatch.fnmatch(filename, pattern):
yield os.path.join(path, filename)
else:
raise OSError("Path not found")
def get_pkg_data_fileobjs(datadir, package=None, pattern='*', encoding=None):
"""
Returns readable file objects for all of the data files in a given
directory that match a given glob pattern.
Parameters
----------
datadir : str
Name/location of the desired data files. One of the following:
* The name of a directory included in the source
distribution. The path is relative to the module
calling this function. For example, if calling from
``astropy.pkname``, use ``'data'`` to get the
files in ``astropy/pkgname/data``
* Remote URLs are not currently supported
package : str, optional
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
pattern : str, optional
A UNIX-style filename glob pattern to match files. See the
`glob` module in the standard library for more information.
By default, matches all files.
encoding : str, optional
When `None` (default), returns a file-like object with a
``read`` method that returns `str` (``unicode``) objects, using
`locale.getpreferredencoding` as an encoding. This matches
the default behavior of the built-in `open` when no ``mode``
argument is provided.
When ``'binary'``, returns a file-like object where its ``read``
method returns `bytes` objects.
When another string, it is the name of an encoding, and the
file-like object's ``read`` method will return `str` (``unicode``)
objects, decoded from binary using the given encoding.
Returns
-------
fileobjs : iterator of file objects
File objects for each of the files on the local filesystem in
*datadir* matching *pattern*.
Examples
--------
This will retrieve the contents of the data file for the `astropy.wcs`
tests::
>>> from astropy.utils.data import get_pkg_data_filenames
>>> for fd in get_pkg_data_fileobjs('data/maps', 'astropy.wcs.tests',
... '*.hdr'):
... fcontents = fd.read()
...
"""
for fn in get_pkg_data_filenames(datadir, package=package,
pattern=pattern):
with get_readable_fileobj(fn, encoding=encoding) as fd:
yield fd
def compute_hash(localfn):
""" Computes the MD5 hash for a file.
The hash for a data file is used for looking up data files in a unique
fashion. This is of particular use for tests; a test may require a
particular version of a particular file, in which case it can be accessed
via hash to get the appropriate version.
Typically, if you wish to write a test that requires a particular data
file, you will want to submit that file to the astropy data servers, and
use
e.g. ``get_pkg_data_filename('hash/34c33b3eb0d56eb9462003af249eff28')``,
but with the hash for your file in place of the hash in the example.
Parameters
----------
localfn : str
The path to the file for which the hash should be generated.
Returns
-------
md5hash : str
The hex digest of the MD5 hash for the contents of the ``localfn``
file.
"""
with open(localfn, 'rb') as f:
h = hashlib.md5()
block = f.read(conf.compute_hash_block_size)
while block:
h.update(block)
block = f.read(conf.compute_hash_block_size)
return h.hexdigest()
def _find_pkg_data_path(data_name, package=None):
"""
Look for data in the source-included data directories and return the
path.
"""
if package is None:
module = find_current_module(1, finddiff=['astropy.utils.data', 'contextlib'])
if module is None:
# not called from inside an astropy package. So just pass name
# through
return data_name
if not hasattr(module, '__package__') or not module.__package__:
# The __package__ attribute may be missing or set to None; see
# PEP-366, also astropy issue #1256
if '.' in module.__name__:
package = module.__name__.rpartition('.')[0]
else:
package = module.__name__
else:
package = module.__package__
else:
module = resolve_name(package)
rootpkgname = package.partition('.')[0]
rootpkg = resolve_name(rootpkgname)
module_path = os.path.dirname(module.__file__)
path = os.path.join(module_path, data_name)
root_dir = os.path.dirname(rootpkg.__file__)
if not _is_inside(path, root_dir):
raise RuntimeError("attempted to get a local data file outside "
"of the {} tree.".format(rootpkgname))
return path
def _find_hash_fn(hash):
"""
Looks for a local file by hash - returns file name if found and a valid
file, otherwise returns None.
"""
try:
dldir, urlmapfn = _get_download_cache_locs()
except OSError as e:
msg = 'Could not access cache directory to search for data file: '
warn(CacheMissingWarning(msg + str(e)))
return None
hashfn = os.path.join(dldir, hash)
if os.path.isfile(hashfn):
return hashfn
else:
return None
def get_free_space_in_dir(path):
"""
Given a path to a directory, returns the amount of free space (in
bytes) on that filesystem.
Parameters
----------
path : str
The path to a directory
Returns
-------
bytes : int
The amount of free space on the partition that the directory
is on.
"""
if sys.platform.startswith('win'):
import ctypes
free_bytes = ctypes.c_ulonglong(0)
retval = ctypes.windll.kernel32.GetDiskFreeSpaceExW(
ctypes.c_wchar_p(path), None, None, ctypes.pointer(free_bytes))
if retval == 0:
raise OSError('Checking free space on {!r} failed '
'unexpectedly.'.format(path))
return free_bytes.value
else:
stat = os.statvfs(path)
return stat.f_bavail * stat.f_frsize
def check_free_space_in_dir(path, size):
"""
Determines if a given directory has enough space to hold a file of
a given size. Raises an OSError if the file would be too large.
Parameters
----------
path : str
The path to a directory
size : int
A proposed filesize (in bytes)
Raises
-------
OSError : There is not enough room on the filesystem
"""
from astropy.utils.console import human_file_size
space = get_free_space_in_dir(path)
if space < size:
raise OSError(
"Not enough free space in '{0}' "
"to download a {1} file".format(
path, human_file_size(size)))
def download_file(remote_url, cache=False, show_progress=True, timeout=None):
"""
Accepts a URL, downloads and optionally caches the result
returning the filename, with a name determined by the file's MD5
hash. If ``cache=True`` and the file is present in the cache, just
returns the filename.
Parameters
----------
remote_url : str
The URL of the file to download
cache : bool, optional
Whether to use the cache
show_progress : bool, optional
Whether to display a progress bar during the download (default
is `True`). Regardless of this setting, the progress bar is only
displayed when outputting to a terminal.
timeout : float, optional
The timeout, in seconds. Otherwise, use
`astropy.utils.data.Conf.remote_timeout`.
Returns
-------
local_path : str
Returns the local path that the file was download to.
Raises
------
urllib2.URLError, urllib.error.URLError
Whenever there's a problem getting the remote file.
"""
from astropy.utils.console import ProgressBarOrSpinner
if timeout is None:
timeout = conf.remote_timeout
missing_cache = False
if cache:
try:
dldir, urlmapfn = _get_download_cache_locs()
except OSError as e:
msg = 'Remote data cache could not be accessed due to '
estr = '' if len(e.args) < 1 else (': ' + str(e))
warn(CacheMissingWarning(msg + e.__class__.__name__ + estr))
cache = False
missing_cache = True # indicates that the cache is missing to raise a warning later
url_key = remote_url
# Check if URL is Astropy data server, which has alias, and cache it.
if (url_key.startswith(conf.dataurl) and
conf.dataurl not in _dataurls_to_alias):
try:
with urllib.request.urlopen(conf.dataurl, timeout=timeout) as remote:
_dataurls_to_alias[conf.dataurl] = [conf.dataurl, remote.geturl()]
except urllib.error.URLError: # Host unreachable
_dataurls_to_alias[conf.dataurl] = [conf.dataurl]
try:
if cache:
# We don't need to acquire the lock here, since we are only reading
with shelve.open(urlmapfn) as url2hash:
if url_key in url2hash:
return url2hash[url_key]
# If there is a cached copy from mirror, use it.
else:
for cur_url in _dataurls_to_alias.get(conf.dataurl, []):
if url_key.startswith(cur_url):
url_mirror = url_key.replace(cur_url,
conf.dataurl_mirror)
if url_mirror in url2hash:
return url2hash[url_mirror]
with urllib.request.urlopen(remote_url, timeout=timeout) as remote:
# keep a hash to rename the local file to the hashed name
hash = hashlib.md5()
info = remote.info()
if 'Content-Length' in info:
try:
size = int(info['Content-Length'])
except ValueError:
size = None
else:
size = None
if size is not None:
check_free_space_in_dir(gettempdir(), size)
if cache:
check_free_space_in_dir(dldir, size)
if show_progress and sys.stdout.isatty():
progress_stream = sys.stdout
else:
progress_stream = io.StringIO()
dlmsg = "Downloading {0}".format(remote_url)
with ProgressBarOrSpinner(size, dlmsg, file=progress_stream) as p:
with NamedTemporaryFile(delete=False) as f:
try:
bytes_read = 0
block = remote.read(conf.download_block_size)
while block:
f.write(block)
hash.update(block)
bytes_read += len(block)
p.update(bytes_read)
block = remote.read(conf.download_block_size)
except BaseException:
if os.path.exists(f.name):
os.remove(f.name)
raise
if cache:
_acquire_download_cache_lock()
try:
with shelve.open(urlmapfn) as url2hash:
# We check now to see if another process has
# inadvertently written the file underneath us
# already
if url_key in url2hash:
return url2hash[url_key]
local_path = os.path.join(dldir, hash.hexdigest())
shutil.move(f.name, local_path)
url2hash[url_key] = local_path
finally:
_release_download_cache_lock()
else:
local_path = f.name
if missing_cache:
msg = ('File downloaded to temporary location due to problem '
'with cache directory and will not be cached.')
warn(CacheMissingWarning(msg, local_path))
if conf.delete_temporary_downloads_at_exit:
global _tempfilestodel
_tempfilestodel.append(local_path)
except urllib.error.URLError as e:
if hasattr(e, 'reason') and hasattr(e.reason, 'errno') and e.reason.errno == 8:
e.reason.strerror = e.reason.strerror + '. requested URL: ' + remote_url
e.reason.args = (e.reason.errno, e.reason.strerror)
raise e
except socket.timeout as e:
# this isn't supposed to happen, but occasionally a socket.timeout gets
# through. It's supposed to be caught in `urrlib2` and raised in this
# way, but for some reason in mysterious circumstances it doesn't. So
# we'll just re-raise it here instead
raise urllib.error.URLError(e)
return local_path
def is_url_in_cache(url_key):
"""
Check if a download from ``url_key`` is in the cache.
Parameters
----------
url_key : string
The URL retrieved
Returns
-------
in_cache : bool
`True` if a download from ``url_key`` is in the cache
"""
# The code below is modified from astropy.utils.data.download_file()
try:
dldir, urlmapfn = _get_download_cache_locs()
except OSError as e:
msg = 'Remote data cache could not be accessed due to '
estr = '' if len(e.args) < 1 else (': ' + str(e))
warn(CacheMissingWarning(msg + e.__class__.__name__ + estr))
return False
with shelve.open(urlmapfn) as url2hash:
if url_key in url2hash:
return True
return False
def _do_download_files_in_parallel(args):
return download_file(*args)
def download_files_in_parallel(urls, cache=True, show_progress=True,
timeout=None):
"""
Downloads multiple files in parallel from the given URLs. Blocks until
all files have downloaded. The result is a list of local file paths
corresponding to the given urls.
Parameters
----------
urls : list of str
The URLs to retrieve.
cache : bool, optional
Whether to use the cache (default is `True`).
.. versionchanged:: 3.0
The default was changed to ``True`` and setting it to ``False`` will
print a Warning and set it to ``True`` again, because the function
will not work properly without cache.
show_progress : bool, optional
Whether to display a progress bar during the download (default
is `True`)
timeout : float, optional
Timeout for each individual requests in seconds (default is the
configurable `astropy.utils.data.Conf.remote_timeout`).
Returns
-------
paths : list of str
The local file paths corresponding to the downloaded URLs.
"""
from .console import ProgressBar
if timeout is None:
timeout = conf.remote_timeout
if not cache:
# See issue #6662, on windows won't work because the files are removed
# again before they can be used. On *NIX systems it will behave as if
# cache was set to True because multiprocessing cannot insert the items
# in the list of to-be-removed files.
warn("Disabling the cache does not work because of multiprocessing, it "
"will be set to ``True``. You may need to manually remove the "
"cached files afterwards.", AstropyWarning)
cache = True
if show_progress:
progress = sys.stdout
else:
progress = io.BytesIO()
# Combine duplicate URLs
combined_urls = list(set(urls))
combined_paths = ProgressBar.map(
_do_download_files_in_parallel,
[(x, cache, False, timeout) for x in combined_urls],
file=progress,
multiprocess=True)
paths = []
for url in urls:
paths.append(combined_paths[combined_urls.index(url)])
return paths
# This is used by download_file and _deltemps to determine the files to delete
# when the interpreter exits
_tempfilestodel = []
@atexit.register
def _deltemps():
global _tempfilestodel
if _tempfilestodel is not None:
while len(_tempfilestodel) > 0:
fn = _tempfilestodel.pop()
if os.path.isfile(fn):
os.remove(fn)
def clear_download_cache(hashorurl=None):
""" Clears the data file cache by deleting the local file(s).
Parameters
----------
hashorurl : str or None
If None, the whole cache is cleared. Otherwise, either specifies a
hash for the cached file that is supposed to be deleted, or a URL that
should be removed from the cache if present.
"""
try:
dldir, urlmapfn = _get_download_cache_locs()
except OSError as e:
msg = 'Not clearing data cache - cache inacessable due to '
estr = '' if len(e.args) < 1 else (': ' + str(e))
warn(CacheMissingWarning(msg + e.__class__.__name__ + estr))
return
_acquire_download_cache_lock()
try:
if hashorurl is None:
# dldir includes both the download files and the urlmapfn. This structure
# is required since we cannot know a priori the actual file name corresponding
# to the shelve map named urlmapfn.
if os.path.exists(dldir):
shutil.rmtree(dldir)
else:
with shelve.open(urlmapfn) as url2hash:
filepath = os.path.join(dldir, hashorurl)
if not _is_inside(filepath, dldir):
raise RuntimeError("attempted to use clear_download_cache on"
" a path outside the data cache directory")
hash_key = hashorurl
if os.path.exists(filepath):
for k, v in url2hash.items():
if v == filepath:
del url2hash[k]
os.unlink(filepath)
elif hash_key in url2hash:
filepath = url2hash[hash_key]
del url2hash[hash_key]
if os.path.exists(filepath):
# Make sure the filepath still actually exists (perhaps user removed it)
os.unlink(filepath)
# Otherwise could not find file or url, but no worries.
# Clearing download cache just makes sure that the file or url
# is no longer in the cache regardless of starting condition.
finally:
# the lock will be gone if rmtree was used above, but release otherwise
if os.path.exists(os.path.join(dldir, 'lock')):
_release_download_cache_lock()
def _get_download_cache_locs():
""" Finds the path to the data cache directory and makes them if
they don't exist.
Returns
-------
datadir : str
The path to the data cache directory.
shelveloc : str
The path to the shelve object that stores the cache info.
"""
from astropy.config.paths import get_cache_dir
# datadir includes both the download files and the shelveloc. This structure
# is required since we cannot know a priori the actual file name corresponding
# to the shelve map named shelveloc. (The backend can vary and is allowed to
# do whatever it wants with the filename. Filename munging can and does happen
# in practice).
py_version = 'py' + str(sys.version_info.major)
datadir = os.path.join(get_cache_dir(), 'download', py_version)
shelveloc = os.path.join(datadir, 'urlmap')
if not os.path.exists(datadir):
try:
os.makedirs(datadir)
except OSError as e:
if not os.path.exists(datadir):
raise
elif not os.path.isdir(datadir):
msg = 'Data cache directory {0} is not a directory'
raise OSError(msg.format(datadir))
if os.path.isdir(shelveloc):
msg = 'Data cache shelve object location {0} is a directory'
raise OSError(msg.format(shelveloc))
return datadir, shelveloc
# the cache directory must be locked before any writes are performed. Same for
# the hash shelve, so this should be used for both.
def _acquire_download_cache_lock():
"""
Uses the lock directory method. This is good because `mkdir` is
atomic at the system call level, so it's thread-safe.
"""
lockdir = os.path.join(_get_download_cache_locs()[0], 'lock')
for i in range(conf.download_cache_lock_attempts):
try:
os.mkdir(lockdir)
# write the pid of this process for informational purposes
with open(os.path.join(lockdir, 'pid'), 'w') as f:
f.write(str(os.getpid()))
except OSError:
time.sleep(1)
else:
return
msg = ("Unable to acquire lock for cache directory ({0} exists). "
"You may need to delete the lock if the python interpreter wasn't "
"shut down properly.")
raise RuntimeError(msg.format(lockdir))
def _release_download_cache_lock():
lockdir = os.path.join(_get_download_cache_locs()[0], 'lock')
if os.path.isdir(lockdir):
# if the pid file is present, be sure to remove it
pidfn = os.path.join(lockdir, 'pid')
if os.path.exists(pidfn):
os.remove(pidfn)
os.rmdir(lockdir)
else:
msg = 'Error releasing lock. "{0}" either does not exist or is not ' +\
'a directory.'
raise RuntimeError(msg.format(lockdir))
def get_cached_urls():
"""
Get the list of URLs in the cache. Especially useful for looking up what
files are stored in your cache when you don't have internet access.
Returns
-------
cached_urls : list
List of cached URLs.
"""
# The code below is modified from astropy.utils.data.download_file()
try:
dldir, urlmapfn = _get_download_cache_locs()
except OSError as e:
msg = 'Remote data cache could not be accessed due to '
estr = '' if len(e.args) < 1 else (': ' + str(e))
warn(CacheMissingWarning(msg + e.__class__.__name__ + estr))
return False
with shelve.open(urlmapfn) as url2hash:
return list(url2hash.keys())
|
372ecc9c8c87667b7768399d02bc7ccaaca4e4e873ca4a79a8394583430ddc36 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
A "grab bag" of relatively small general-purpose utilities that don't have
a clear module/package to live in.
"""
import abc
import contextlib
import difflib
import inspect
import json
import os
import signal
import sys
import traceback
import unicodedata
import locale
import threading
import re
from itertools import zip_longest
from contextlib import contextmanager
from collections import defaultdict, OrderedDict
__all__ = ['isiterable', 'silence', 'format_exception', 'NumpyRNGContext',
'find_api_page', 'is_path_hidden', 'walk_skip_hidden',
'JsonCustomEncoder', 'indent', 'InheritDocstrings',
'OrderedDescriptor', 'OrderedDescriptorContainer', 'set_locale',
'ShapedLikeNDArray', 'check_broadcast', 'IncompatibleShapeError',
'dtype_bytes_or_chars']
def isiterable(obj):
"""Returns `True` if the given object is iterable."""
try:
iter(obj)
return True
except TypeError:
return False
def indent(s, shift=1, width=4):
"""Indent a block of text. The indentation is applied to each line."""
indented = '\n'.join(' ' * (width * shift) + l if l else ''
for l in s.splitlines())
if s[-1] == '\n':
indented += '\n'
return indented
class _DummyFile:
"""A noop writeable object."""
def write(self, s):
pass
@contextlib.contextmanager
def silence():
"""A context manager that silences sys.stdout and sys.stderr."""
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = _DummyFile()
sys.stderr = _DummyFile()
yield
sys.stdout = old_stdout
sys.stderr = old_stderr
def format_exception(msg, *args, **kwargs):
"""
Given an exception message string, uses new-style formatting arguments
``{filename}``, ``{lineno}``, ``{func}`` and/or ``{text}`` to fill in
information about the exception that occurred. For example:
try:
1/0
except:
raise ZeroDivisionError(
format_except('A divide by zero occurred in {filename} at '
'line {lineno} of function {func}.'))
Any additional positional or keyword arguments passed to this function are
also used to format the message.
.. note::
This uses `sys.exc_info` to gather up the information needed to fill
in the formatting arguments. Since `sys.exc_info` is not carried
outside a handled exception, it's not wise to use this
outside of an ``except`` clause - if it is, this will substitute
'<unkonwn>' for the 4 formatting arguments.
"""
tb = traceback.extract_tb(sys.exc_info()[2], limit=1)
if len(tb) > 0:
filename, lineno, func, text = tb[0]
else:
filename = lineno = func = text = '<unknown>'
return msg.format(*args, filename=filename, lineno=lineno, func=func,
text=text, **kwargs)
class NumpyRNGContext:
"""
A context manager (for use with the ``with`` statement) that will seed the
numpy random number generator (RNG) to a specific value, and then restore
the RNG state back to whatever it was before.
This is primarily intended for use in the astropy testing suit, but it
may be useful in ensuring reproducibility of Monte Carlo simulations in a
science context.
Parameters
----------
seed : int
The value to use to seed the numpy RNG
Examples
--------
A typical use case might be::
with NumpyRNGContext(<some seed value you pick>):
from numpy import random
randarr = random.randn(100)
... run your test using `randarr` ...
#Any code using numpy.random at this indent level will act just as it
#would have if it had been before the with statement - e.g. whatever
#the default seed is.
"""
def __init__(self, seed):
self.seed = seed
def __enter__(self):
from numpy import random
self.startstate = random.get_state()
random.seed(self.seed)
def __exit__(self, exc_type, exc_value, traceback):
from numpy import random
random.set_state(self.startstate)
def find_api_page(obj, version=None, openinbrowser=True, timeout=None):
"""
Determines the URL of the API page for the specified object, and
optionally open that page in a web browser.
.. note::
You must be connected to the internet for this to function even if
``openinbrowser`` is `False`, unless you provide a local version of
the documentation to ``version`` (e.g., ``file:///path/to/docs``).
Parameters
----------
obj
The object to open the docs for or its fully-qualified name
(as a str).
version : str
The doc version - either a version number like '0.1', 'dev' for
the development/latest docs, or a URL to point to a specific
location that should be the *base* of the documentation. Defaults to
latest if you are on aren't on a release, otherwise, the version you
are on.
openinbrowser : bool
If `True`, the `webbrowser` package will be used to open the doc
page in a new web browser window.
timeout : number, optional
The number of seconds to wait before timing-out the query to
the astropy documentation. If not given, the default python
stdlib timeout will be used.
Returns
-------
url : str
The loaded URL
Raises
------
ValueError
If the documentation can't be found
"""
import webbrowser
import urllib.request
from zlib import decompress
if (not isinstance(obj, str) and
hasattr(obj, '__module__') and
hasattr(obj, '__name__')):
obj = obj.__module__ + '.' + obj.__name__
elif inspect.ismodule(obj):
obj = obj.__name__
if version is None:
from astropy import version
if version.release:
version = 'v' + version.version
else:
version = 'dev'
if '://' in version:
if version.endswith('index.html'):
baseurl = version[:-10]
elif version.endswith('/'):
baseurl = version
else:
baseurl = version + '/'
elif version == 'dev' or version == 'latest':
baseurl = 'http://devdocs.astropy.org/'
else:
baseurl = 'http://docs.astropy.org/en/{vers}/'.format(vers=version)
if timeout is None:
uf = urllib.request.urlopen(baseurl + 'objects.inv')
else:
uf = urllib.request.urlopen(baseurl + 'objects.inv', timeout=timeout)
try:
oiread = uf.read()
# need to first read/remove the first four lines, which have info before
# the compressed section with the actual object inventory
idx = -1
headerlines = []
for _ in range(4):
oldidx = idx
idx = oiread.index(b'\n', oldidx + 1)
headerlines.append(oiread[(oldidx+1):idx].decode('utf-8'))
# intersphinx version line, project name, and project version
ivers, proj, vers, compr = headerlines
if 'The remainder of this file is compressed using zlib' not in compr:
raise ValueError('The file downloaded from {0} does not seem to be'
'the usual Sphinx objects.inv format. Maybe it '
'has changed?'.format(baseurl + 'objects.inv'))
compressed = oiread[(idx+1):]
finally:
uf.close()
decompressed = decompress(compressed).decode('utf-8')
resurl = None
for l in decompressed.strip().splitlines():
ls = l.split()
name = ls[0]
loc = ls[3]
if loc.endswith('$'):
loc = loc[:-1] + name
if name == obj:
resurl = baseurl + loc
break
if resurl is None:
raise ValueError('Could not find the docs for the object {obj}'.format(obj=obj))
elif openinbrowser:
webbrowser.open(resurl)
return resurl
def signal_number_to_name(signum):
"""
Given an OS signal number, returns a signal name. If the signal
number is unknown, returns ``'UNKNOWN'``.
"""
# Since these numbers and names are platform specific, we use the
# builtin signal module and build a reverse mapping.
signal_to_name_map = dict((k, v) for v, k in signal.__dict__.items()
if v.startswith('SIG'))
return signal_to_name_map.get(signum, 'UNKNOWN')
if sys.platform == 'win32':
import ctypes
def _has_hidden_attribute(filepath):
"""
Returns True if the given filepath has the hidden attribute on
MS-Windows. Based on a post here:
http://stackoverflow.com/questions/284115/cross-platform-hidden-file-detection
"""
if isinstance(filepath, bytes):
filepath = filepath.decode(sys.getfilesystemencoding())
try:
attrs = ctypes.windll.kernel32.GetFileAttributesW(filepath)
result = bool(attrs & 2) and attrs != -1
except AttributeError:
result = False
return result
else:
def _has_hidden_attribute(filepath):
return False
def is_path_hidden(filepath):
"""
Determines if a given file or directory is hidden.
Parameters
----------
filepath : str
The path to a file or directory
Returns
-------
hidden : bool
Returns `True` if the file is hidden
"""
name = os.path.basename(os.path.abspath(filepath))
if isinstance(name, bytes):
is_dotted = name.startswith(b'.')
else:
is_dotted = name.startswith('.')
return is_dotted or _has_hidden_attribute(filepath)
def walk_skip_hidden(top, onerror=None, followlinks=False):
"""
A wrapper for `os.walk` that skips hidden files and directories.
This function does not have the parameter ``topdown`` from
`os.walk`: the directories must always be recursed top-down when
using this function.
See also
--------
os.walk : For a description of the parameters
"""
for root, dirs, files in os.walk(
top, topdown=True, onerror=onerror,
followlinks=followlinks):
# These lists must be updated in-place so os.walk will skip
# hidden directories
dirs[:] = [d for d in dirs if not is_path_hidden(d)]
files[:] = [f for f in files if not is_path_hidden(f)]
yield root, dirs, files
class JsonCustomEncoder(json.JSONEncoder):
"""Support for data types that JSON default encoder
does not do.
This includes:
* Numpy array or number
* Complex number
* Set
* Bytes
* astropy.UnitBase
* astropy.Quantity
Examples
--------
>>> import json
>>> import numpy as np
>>> from astropy.utils.misc import JsonCustomEncoder
>>> json.dumps(np.arange(3), cls=JsonCustomEncoder)
'[0, 1, 2]'
"""
def default(self, obj):
from astropy import units as u
import numpy as np
if isinstance(obj, u.Quantity):
return dict(value=obj.value, unit=obj.unit.to_string())
if isinstance(obj, (np.number, np.ndarray)):
return obj.tolist()
elif isinstance(obj, complex):
return [obj.real, obj.imag]
elif isinstance(obj, set):
return list(obj)
elif isinstance(obj, bytes): # pragma: py3
return obj.decode()
elif isinstance(obj, (u.UnitBase, u.FunctionUnitBase)):
if obj == u.dimensionless_unscaled:
obj = 'dimensionless_unit'
else:
return obj.to_string()
return json.JSONEncoder.default(self, obj)
def strip_accents(s):
"""
Remove accents from a Unicode string.
This helps with matching "ångström" to "angstrom", for example.
"""
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
def did_you_mean(s, candidates, n=3, cutoff=0.8, fix=None):
"""
When a string isn't found in a set of candidates, we can be nice
to provide a list of alternatives in the exception. This
convenience function helps to format that part of the exception.
Parameters
----------
s : str
candidates : sequence of str or dict of str keys
n : int
The maximum number of results to include. See
`difflib.get_close_matches`.
cutoff : float
In the range [0, 1]. Possibilities that don't score at least
that similar to word are ignored. See
`difflib.get_close_matches`.
fix : callable
A callable to modify the results after matching. It should
take a single string and return a sequence of strings
containing the fixed matches.
Returns
-------
message : str
Returns the string "Did you mean X, Y, or Z?", or the empty
string if no alternatives were found.
"""
if isinstance(s, str):
s = strip_accents(s)
s_lower = s.lower()
# Create a mapping from the lower case name to all capitalization
# variants of that name.
candidates_lower = {}
for candidate in candidates:
candidate_lower = candidate.lower()
candidates_lower.setdefault(candidate_lower, [])
candidates_lower[candidate_lower].append(candidate)
# The heuristic here is to first try "singularizing" the word. If
# that doesn't match anything use difflib to find close matches in
# original, lower and upper case.
if s_lower.endswith('s') and s_lower[:-1] in candidates_lower:
matches = [s_lower[:-1]]
else:
matches = difflib.get_close_matches(
s_lower, candidates_lower, n=n, cutoff=cutoff)
if len(matches):
capitalized_matches = set()
for match in matches:
capitalized_matches.update(candidates_lower[match])
matches = capitalized_matches
if fix is not None:
mapped_matches = []
for match in matches:
mapped_matches.extend(fix(match))
matches = mapped_matches
matches = list(set(matches))
matches = sorted(matches)
if len(matches) == 1:
matches = matches[0]
else:
matches = (', '.join(matches[:-1]) + ' or ' +
matches[-1])
return 'Did you mean {0}?'.format(matches)
return ''
class InheritDocstrings(type):
"""
This metaclass makes methods of a class automatically have their
docstrings filled in from the methods they override in the base
class.
If the class uses multiple inheritance, the docstring will be
chosen from the first class in the bases list, in the same way as
methods are normally resolved in Python. If this results in
selecting the wrong docstring, the docstring will need to be
explicitly included on the method.
For example::
>>> from astropy.utils.misc import InheritDocstrings
>>> class A(metaclass=InheritDocstrings):
... def wiggle(self):
... "Wiggle the thingamajig"
... pass
>>> class B(A):
... def wiggle(self):
... pass
>>> B.wiggle.__doc__
u'Wiggle the thingamajig'
"""
def __init__(cls, name, bases, dct):
def is_public_member(key):
return (
(key.startswith('__') and key.endswith('__')
and len(key) > 4) or
not key.startswith('_'))
for key, val in dct.items():
if ((inspect.isfunction(val) or inspect.isdatadescriptor(val)) and
is_public_member(key) and
val.__doc__ is None):
for base in cls.__mro__[1:]:
super_method = getattr(base, key, None)
if super_method is not None:
val.__doc__ = super_method.__doc__
break
super().__init__(name, bases, dct)
class OrderedDescriptor(metaclass=abc.ABCMeta):
"""
Base class for descriptors whose order in the class body should be
preserved. Intended for use in concert with the
`OrderedDescriptorContainer` metaclass.
Subclasses of `OrderedDescriptor` must define a value for a class attribute
called ``_class_attribute_``. This is the name of a class attribute on the
*container* class for these descriptors, which will be set to an
`~collections.OrderedDict` at class creation time. This
`~collections.OrderedDict` will contain a mapping of all class attributes
that were assigned instances of the `OrderedDescriptor` subclass, to the
instances themselves. See the documentation for
`OrderedDescriptorContainer` for a concrete example.
Optionally, subclasses of `OrderedDescriptor` may define a value for a
class attribute called ``_name_attribute_``. This should be the name of
an attribute on instances of the subclass. When specified, during
creation of a class containing these descriptors, the name attribute on
each instance will be set to the name of the class attribute it was
assigned to on the class.
.. note::
Although this class is intended for use with *descriptors* (i.e.
classes that define any of the ``__get__``, ``__set__``, or
``__delete__`` magic methods), this base class is not itself a
descriptor, and technically this could be used for classes that are
not descriptors too. However, use with descriptors is the original
intended purpose.
"""
# This id increments for each OrderedDescriptor instance created, so they
# are always ordered in the order they were created. Class bodies are
# guaranteed to be executed from top to bottom. Not sure if this is
# thread-safe though.
_nextid = 1
@property
@abc.abstractmethod
def _class_attribute_(self):
"""
Subclasses should define this attribute to the name of an attribute on
classes containing this subclass. That attribute will contain the mapping
of all instances of that `OrderedDescriptor` subclass defined in the class
body. If the same descriptor needs to be used with different classes,
each with different names of this attribute, multiple subclasses will be
needed.
"""
_name_attribute_ = None
"""
Subclasses may optionally define this attribute to specify the name of an
attribute on instances of the class that should be filled with the
instance's attribute name at class creation time.
"""
def __init__(self, *args, **kwargs):
# The _nextid attribute is shared across all subclasses so that
# different subclasses of OrderedDescriptors can be sorted correctly
# between themselves
self.__order = OrderedDescriptor._nextid
OrderedDescriptor._nextid += 1
super().__init__()
def __lt__(self, other):
"""
Defined for convenient sorting of `OrderedDescriptor` instances, which
are defined to sort in their creation order.
"""
if (isinstance(self, OrderedDescriptor) and
isinstance(other, OrderedDescriptor)):
try:
return self.__order < other.__order
except AttributeError:
raise RuntimeError(
'Could not determine ordering for {0} and {1}; at least '
'one of them is not calling super().__init__ in its '
'__init__.'.format(self, other))
else:
return NotImplemented
class OrderedDescriptorContainer(type):
"""
Classes should use this metaclass if they wish to use `OrderedDescriptor`
attributes, which are class attributes that "remember" the order in which
they were defined in the class body.
Every subclass of `OrderedDescriptor` has an attribute called
``_class_attribute_``. For example, if we have
.. code:: python
class ExampleDecorator(OrderedDescriptor):
_class_attribute_ = '_examples_'
Then when a class with the `OrderedDescriptorContainer` metaclass is
created, it will automatically be assigned a class attribute ``_examples_``
referencing an `~collections.OrderedDict` containing all instances of
``ExampleDecorator`` defined in the class body, mapped to by the names of
the attributes they were assigned to.
When subclassing a class with this metaclass, the descriptor dict (i.e.
``_examples_`` in the above example) will *not* contain descriptors
inherited from the base class. That is, this only works by default with
decorators explicitly defined in the class body. However, the subclass
*may* define an attribute ``_inherit_decorators_`` which lists
`OrderedDescriptor` classes that *should* be added from base classes.
See the examples section below for an example of this.
Examples
--------
>>> from astropy.utils import OrderedDescriptor, OrderedDescriptorContainer
>>> class TypedAttribute(OrderedDescriptor):
... \"\"\"
... Attributes that may only be assigned objects of a specific type,
... or subclasses thereof. For some reason we care about their order.
... \"\"\"
...
... _class_attribute_ = 'typed_attributes'
... _name_attribute_ = 'name'
... # A default name so that instances not attached to a class can
... # still be repr'd; useful for debugging
... name = '<unbound>'
...
... def __init__(self, type):
... # Make sure not to forget to call the super __init__
... super().__init__()
... self.type = type
...
... def __get__(self, obj, objtype=None):
... if obj is None:
... return self
... if self.name in obj.__dict__:
... return obj.__dict__[self.name]
... else:
... raise AttributeError(self.name)
...
... def __set__(self, obj, value):
... if not isinstance(value, self.type):
... raise ValueError('{0}.{1} must be of type {2!r}'.format(
... obj.__class__.__name__, self.name, self.type))
... obj.__dict__[self.name] = value
...
... def __delete__(self, obj):
... if self.name in obj.__dict__:
... del obj.__dict__[self.name]
... else:
... raise AttributeError(self.name)
...
... def __repr__(self):
... if isinstance(self.type, tuple) and len(self.type) > 1:
... typestr = '({0})'.format(
... ', '.join(t.__name__ for t in self.type))
... else:
... typestr = self.type.__name__
... return '<{0}(name={1}, type={2})>'.format(
... self.__class__.__name__, self.name, typestr)
...
Now let's create an example class that uses this ``TypedAttribute``::
>>> class Point2D(metaclass=OrderedDescriptorContainer):
... x = TypedAttribute((float, int))
... y = TypedAttribute((float, int))
...
... def __init__(self, x, y):
... self.x, self.y = x, y
...
>>> p1 = Point2D(1.0, 2.0)
>>> p1.x
1.0
>>> p1.y
2.0
>>> p2 = Point2D('a', 'b') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: Point2D.x must be of type (float, int>)
We see that ``TypedAttribute`` works more or less as advertised, but
there's nothing special about that. Let's see what
`OrderedDescriptorContainer` did for us::
>>> Point2D.typed_attributes
OrderedDict([('x', <TypedAttribute(name=x, type=(float, int))>),
('y', <TypedAttribute(name=y, type=(float, int))>)])
If we create a subclass, it does *not* by default add inherited descriptors
to ``typed_attributes``::
>>> class Point3D(Point2D):
... z = TypedAttribute((float, int))
...
>>> Point3D.typed_attributes
OrderedDict([('z', <TypedAttribute(name=z, type=(float, int))>)])
However, if we specify ``_inherit_descriptors_`` from ``Point2D`` then
it will do so::
>>> class Point3D(Point2D):
... _inherit_descriptors_ = (TypedAttribute,)
... z = TypedAttribute((float, int))
...
>>> Point3D.typed_attributes
OrderedDict([('x', <TypedAttribute(name=x, type=(float, int))>),
('y', <TypedAttribute(name=y, type=(float, int))>),
('z', <TypedAttribute(name=z, type=(float, int))>)])
.. note::
Hopefully it is clear from these examples that this construction
also allows a class of type `OrderedDescriptorContainer` to use
multiple different `OrderedDescriptor` classes simultaneously.
"""
_inherit_descriptors_ = ()
def __init__(cls, cls_name, bases, members):
descriptors = defaultdict(list)
seen = set()
inherit_descriptors = ()
descr_bases = {}
for mro_cls in cls.__mro__:
for name, obj in mro_cls.__dict__.items():
if name in seen:
# Checks if we've already seen an attribute of the given
# name (if so it will override anything of the same name in
# any base class)
continue
seen.add(name)
if (not isinstance(obj, OrderedDescriptor) or
(inherit_descriptors and
not isinstance(obj, inherit_descriptors))):
# The second condition applies when checking any
# subclasses, to see if we can inherit any descriptors of
# the given type from subclasses (by default inheritance is
# disabled unless the class has _inherit_descriptors_
# defined)
continue
if obj._name_attribute_ is not None:
setattr(obj, obj._name_attribute_, name)
# Don't just use the descriptor's class directly; instead go
# through its MRO and find the class on which _class_attribute_
# is defined directly. This way subclasses of some
# OrderedDescriptor *may* override _class_attribute_ and have
# its own _class_attribute_, but by default all subclasses of
# some OrderedDescriptor are still grouped together
# TODO: It might be worth clarifying this in the docs
if obj.__class__ not in descr_bases:
for obj_cls_base in obj.__class__.__mro__:
if '_class_attribute_' in obj_cls_base.__dict__:
descr_bases[obj.__class__] = obj_cls_base
descriptors[obj_cls_base].append((obj, name))
break
else:
# Make sure to put obj first for sorting purposes
obj_cls_base = descr_bases[obj.__class__]
descriptors[obj_cls_base].append((obj, name))
if not getattr(mro_cls, '_inherit_descriptors_', False):
# If _inherit_descriptors_ is undefined then we don't inherit
# any OrderedDescriptors from any of the base classes, and
# there's no reason to continue through the MRO
break
else:
inherit_descriptors = mro_cls._inherit_descriptors_
for descriptor_cls, instances in descriptors.items():
instances.sort()
instances = OrderedDict((key, value) for value, key in instances)
setattr(cls, descriptor_cls._class_attribute_, instances)
super().__init__(cls_name, bases, members)
LOCALE_LOCK = threading.Lock()
@contextmanager
def set_locale(name):
"""
Context manager to temporarily set the locale to ``name``.
An example is setting locale to "C" so that the C strtod()
function will use "." as the decimal point to enable consistent
numerical string parsing.
Note that one cannot nest multiple set_locale() context manager
statements as this causes a threading lock.
This code taken from https://stackoverflow.com/questions/18593661/how-do-i-strftime-a-date-object-in-a-different-locale.
Parameters
==========
name : str
Locale name, e.g. "C" or "fr_FR".
"""
name = str(name)
with LOCALE_LOCK:
saved = locale.setlocale(locale.LC_ALL)
if saved == name:
# Don't do anything if locale is already the requested locale
yield
else:
try:
locale.setlocale(locale.LC_ALL, name)
yield
finally:
locale.setlocale(locale.LC_ALL, saved)
class ShapedLikeNDArray(metaclass=abc.ABCMeta):
"""Mixin class to provide shape-changing methods.
The class proper is assumed to have some underlying data, which are arrays
or array-like structures. It must define a ``shape`` property, which gives
the shape of those data, as well as an ``_apply`` method that creates a new
instance in which a `~numpy.ndarray` method has been applied to those.
Furthermore, for consistency with `~numpy.ndarray`, it is recommended to
define a setter for the ``shape`` property, which, like the
`~numpy.ndarray.shape` property allows in-place reshaping the internal data
(and, unlike the ``reshape`` method raises an exception if this is not
possible).
This class also defines default implementations for ``ndim`` and ``size``
properties, calculating those from the ``shape``. These can be overridden
by subclasses if there are faster ways to obtain those numbers.
"""
# Note to developers: if new methods are added here, be sure to check that
# they work properly with the classes that use this, such as Time and
# BaseRepresentation, i.e., look at their ``_apply`` methods and add
# relevant tests. This is particularly important for methods that imply
# copies rather than views of data (see the special-case treatment of
# 'flatten' in Time).
@property
@abc.abstractmethod
def shape(self):
"""The shape of the instance and underlying arrays."""
@abc.abstractmethod
def _apply(method, *args, **kwargs):
"""Create a new instance, with ``method`` applied to underlying data.
The method is any of the shape-changing methods for `~numpy.ndarray`
(``reshape``, ``swapaxes``, etc.), as well as those picking particular
elements (``__getitem__``, ``take``, etc.). It will be applied to the
underlying arrays (e.g., ``jd1`` and ``jd2`` in `~astropy.time.Time`),
with the results used to create a new instance.
Parameters
----------
method : str
Method to be applied to the instance's internal data arrays.
args : tuple
Any positional arguments for ``method``.
kwargs : dict
Any keyword arguments for ``method``.
"""
@property
def ndim(self):
"""The number of dimensions of the instance and underlying arrays."""
return len(self.shape)
@property
def size(self):
"""The size of the object, as calculated from its shape."""
size = 1
for sh in self.shape:
size *= sh
return size
@property
def isscalar(self):
return self.shape == ()
def __len__(self):
if self.isscalar:
raise TypeError("Scalar {0!r} object has no len()"
.format(self.__class__.__name__))
return self.shape[0]
def __bool__(self):
"""Any instance should evaluate to True, except when it is empty."""
return self.size > 0
def __getitem__(self, item):
try:
return self._apply('__getitem__', item)
except IndexError:
if self.isscalar:
raise TypeError('scalar {0!r} object is not subscriptable.'
.format(self.__class__.__name__))
else:
raise
def __iter__(self):
if self.isscalar:
raise TypeError('scalar {0!r} object is not iterable.'
.format(self.__class__.__name__))
# We cannot just write a generator here, since then the above error
# would only be raised once we try to use the iterator, rather than
# upon its definition using iter(self).
def self_iter():
for idx in range(len(self)):
yield self[idx]
return self_iter()
def copy(self, *args, **kwargs):
"""Return an instance containing copies of the internal data.
Parameters are as for :meth:`~numpy.ndarray.copy`.
"""
return self._apply('copy', *args, **kwargs)
def reshape(self, *args, **kwargs):
"""Returns an instance containing the same data with a new shape.
Parameters are as for :meth:`~numpy.ndarray.reshape`. Note that it is
not always possible to change the shape of an array without copying the
data (see :func:`~numpy.reshape` documentation). If you want an error
to be raise if the data is copied, you should assign the new shape to
the shape attribute (note: this may not be implemented for all classes
using ``ShapedLikeNDArray``).
"""
return self._apply('reshape', *args, **kwargs)
def ravel(self, *args, **kwargs):
"""Return an instance with the array collapsed into one dimension.
Parameters are as for :meth:`~numpy.ndarray.ravel`. Note that it is
not always possible to unravel an array without copying the data.
If you want an error to be raise if the data is copied, you should
should assign shape ``(-1,)`` to the shape attribute.
"""
return self._apply('ravel', *args, **kwargs)
def flatten(self, *args, **kwargs):
"""Return a copy with the array collapsed into one dimension.
Parameters are as for :meth:`~numpy.ndarray.flatten`.
"""
return self._apply('flatten', *args, **kwargs)
def transpose(self, *args, **kwargs):
"""Return an instance with the data transposed.
Parameters are as for :meth:`~numpy.ndarray.transpose`. All internal
data are views of the data of the original.
"""
return self._apply('transpose', *args, **kwargs)
@property
def T(self):
"""Return an instance with the data transposed.
Parameters are as for :attr:`~numpy.ndarray.T`. All internal
data are views of the data of the original.
"""
if self.ndim < 2:
return self
else:
return self.transpose()
def swapaxes(self, *args, **kwargs):
"""Return an instance with the given axes interchanged.
Parameters are as for :meth:`~numpy.ndarray.swapaxes`:
``axis1, axis2``. All internal data are views of the data of the
original.
"""
return self._apply('swapaxes', *args, **kwargs)
def diagonal(self, *args, **kwargs):
"""Return an instance with the specified diagonals.
Parameters are as for :meth:`~numpy.ndarray.diagonal`. All internal
data are views of the data of the original.
"""
return self._apply('diagonal', *args, **kwargs)
def squeeze(self, *args, **kwargs):
"""Return an instance with single-dimensional shape entries removed
Parameters are as for :meth:`~numpy.ndarray.squeeze`. All internal
data are views of the data of the original.
"""
return self._apply('squeeze', *args, **kwargs)
def take(self, indices, axis=None, mode='raise'):
"""Return a new instance formed from the elements at the given indices.
Parameters are as for :meth:`~numpy.ndarray.take`, except that,
obviously, no output array can be given.
"""
return self._apply('take', indices, axis=axis, mode=mode)
class IncompatibleShapeError(ValueError):
def __init__(self, shape_a, shape_a_idx, shape_b, shape_b_idx):
super().__init__(shape_a, shape_a_idx, shape_b, shape_b_idx)
def check_broadcast(*shapes):
"""
Determines whether two or more Numpy arrays can be broadcast with each
other based on their shape tuple alone.
Parameters
----------
*shapes : tuple
All shapes to include in the comparison. If only one shape is given it
is passed through unmodified. If no shapes are given returns an empty
`tuple`.
Returns
-------
broadcast : `tuple`
If all shapes are mutually broadcastable, returns a tuple of the full
broadcast shape.
"""
if len(shapes) == 0:
return ()
elif len(shapes) == 1:
return shapes[0]
reversed_shapes = (reversed(shape) for shape in shapes)
full_shape = []
for dims in zip_longest(*reversed_shapes, fillvalue=1):
max_dim = 1
max_dim_idx = None
for idx, dim in enumerate(dims):
if dim == 1:
continue
if max_dim == 1:
# The first dimension of size greater than 1
max_dim = dim
max_dim_idx = idx
elif dim != max_dim:
raise IncompatibleShapeError(
shapes[max_dim_idx], max_dim_idx, shapes[idx], idx)
full_shape.append(max_dim)
return tuple(full_shape[::-1])
def dtype_bytes_or_chars(dtype):
"""
Parse the number out of a dtype.str value like '<U5' or '<f8'.
See #5819 for discussion on the need for this function for getting
the number of characters corresponding to a string dtype.
Parameters
----------
dtype : numpy dtype object
Input dtype
Returns
-------
bytes_or_chars : int or None
Bits (for numeric types) or characters (for string types)
"""
match = re.search(r'(\d+)$', dtype.str)
out = int(match.group(1)) if match else None
return out
def pizza(): # pragma: no cover
"""
Open browser loaded with pizza options near you.
*Disclaimers: Payments not included. Astropy is not
responsible for any liability from using this function.*
.. note:: Accuracy depends on your browser settings.
"""
import webbrowser
webbrowser.open('https://www.google.com/search?q=pizza+near+me')
|
db03c9c7a8fd1f343d02f45b5cfb58d6743bb725a25f718d39a94f77120b6335 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Handle loading six package from system or from the bundled copy
"""
import imp
import warnings
from distutils.version import StrictVersion
from astropy.utils.exceptions import AstropyDeprecationWarning
warnings.warn('astropy.extern.six will be removed in 4.0, use the '
'six module directly if it is still needed',
AstropyDeprecationWarning)
_SIX_MIN_VERSION = StrictVersion('1.10.0')
# Update this to prevent Astropy from using its bundled copy of six
# (but only if some other version of at least _SIX_MIN_VERSION can
# be provided)
_SIX_SEARCH_PATH = ['astropy.extern.bundled.six', 'six']
def _find_module(name, path=None):
"""
Alternative to `imp.find_module` that can also search in subpackages.
"""
parts = name.split('.')
for part in parts:
if path is not None:
path = [path]
fh, path, descr = imp.find_module(part, path)
return fh, path, descr
def _import_six(search_path=_SIX_SEARCH_PATH):
for mod_name in search_path:
try:
mod_info = _find_module(mod_name)
except ImportError:
continue
mod = imp.load_module(__name__, *mod_info)
try:
if StrictVersion(mod.__version__) >= _SIX_MIN_VERSION:
break
except (AttributeError, ValueError):
# Attribute error if the six module isn't what it should be and
# doesn't have a .__version__; ValueError if the version string
# exists but is somehow bogus/unparseable
continue
else:
raise ImportError(
"Astropy requires the 'six' module of minimum version {0}; "
"normally this is bundled with the astropy package so if you get "
"this warning consult the packager of your Astropy "
"distribution.".format(_SIX_MIN_VERSION))
_import_six()
|
d5e902aa06eb0b1d87750fd2d0c8facb73026701ffd646ca7a754d92b2b6f986 | """
Normalization class for Matplotlib that can be used to produce
colorbars.
"""
import inspect
import numpy as np
from numpy import ma
from .interval import (PercentileInterval, AsymmetricPercentileInterval,
ManualInterval, MinMaxInterval, BaseInterval)
from .stretch import (LinearStretch, SqrtStretch, PowerStretch, LogStretch,
AsinhStretch, BaseStretch)
try:
import matplotlib # pylint: disable=W0611
from matplotlib.colors import Normalize
from matplotlib import pyplot as plt
except ImportError:
class Normalize:
def __init__(self, *args, **kwargs):
raise ImportError('matplotlib is required in order to use this '
'class.')
__all__ = ['ImageNormalize', 'simple_norm', 'imshow_norm']
__doctest_requires__ = {'*': ['matplotlib']}
class ImageNormalize(Normalize):
"""
Normalization class to be used with Matplotlib.
Parameters
----------
data : `~numpy.ndarray`, optional
The image array. This input is used only if ``interval`` is
also input. ``data`` and ``interval`` are used to compute the
vmin and/or vmax values only if ``vmin`` or ``vmax`` are not
input.
interval : `~astropy.visualization.BaseInterval` subclass instance, optional
The interval object to apply to the input ``data`` to determine
the ``vmin`` and ``vmax`` values. This input is used only if
``data`` is also input. ``data`` and ``interval`` are used to
compute the vmin and/or vmax values only if ``vmin`` or ``vmax``
are not input.
vmin, vmax : float
The minimum and maximum levels to show for the data. The
``vmin`` and ``vmax`` inputs override any calculated values from
the ``interval`` and ``data`` inputs.
stretch : `~astropy.visualization.BaseStretch` subclass instance, optional
The stretch object to apply to the data. The default is
`~astropy.visualization.LinearStretch`.
clip : bool, optional
If `True` (default), data values outside the [0:1] range are
clipped to the [0:1] range.
"""
def __init__(self, data=None, interval=None, vmin=None, vmax=None,
stretch=LinearStretch(), clip=True):
# this super call checks for matplotlib
super().__init__(vmin=vmin, vmax=vmax, clip=clip)
self.vmin = vmin
self.vmax = vmax
if data is not None and interval is not None:
_vmin, _vmax = interval.get_limits(data)
if self.vmin is None:
self.vmin = _vmin
if self.vmax is None:
self.vmax = _vmax
if stretch is not None and not isinstance(stretch, BaseStretch):
raise TypeError('stretch must be an instance of a BaseStretch '
'subclass')
self.stretch = stretch
if interval is not None and not isinstance(interval, BaseInterval):
raise TypeError('interval must be an instance of a BaseInterval '
'subclass')
self.interval = interval
self.inverse_stretch = stretch.inverse
self.clip = clip
def __call__(self, values, clip=None):
if clip is None:
clip = self.clip
if isinstance(values, ma.MaskedArray):
if clip:
mask = False
else:
mask = values.mask
values = values.filled(self.vmax)
else:
mask = False
# Make sure scalars get broadcast to 1-d
if np.isscalar(values):
values = np.array([values], dtype=float)
else:
# copy because of in-place operations after
values = np.array(values, copy=True, dtype=float)
# Set default values for vmin and vmax if not specified
self.autoscale_None(values)
# Normalize based on vmin and vmax
np.subtract(values, self.vmin, out=values)
np.true_divide(values, self.vmax - self.vmin, out=values)
# Clip to the 0 to 1 range
if self.clip:
values = np.clip(values, 0., 1., out=values)
# Stretch values
values = self.stretch(values, out=values, clip=False)
# Convert to masked array for matplotlib
return ma.array(values, mask=mask)
def inverse(self, values):
# Find unstretched values in range 0 to 1
values_norm = self.inverse_stretch(values, clip=False)
# Scale to original range
return values_norm * (self.vmax - self.vmin) + self.vmin
def simple_norm(data, stretch='linear', power=1.0, asinh_a=0.1, min_cut=None,
max_cut=None, min_percent=None, max_percent=None,
percent=None, clip=True, log_a=1000):
"""
Return a Normalization class that can be used for displaying images
with Matplotlib.
This function enables only a subset of image stretching functions
available in `~astropy.visualization.mpl_normalize.ImageNormalize`.
This function is used by the
``astropy.visualization.scripts.fits2bitmap`` script.
Parameters
----------
data : `~numpy.ndarray`
The image array.
stretch : {'linear', 'sqrt', 'power', log', 'asinh'}, optional
The stretch function to apply to the image. The default is
'linear'.
power : float, optional
The power index for ``stretch='power'``. The default is 1.0.
log : float, optional
The log index for ``stretch='log'``. The default is 1000.
asinh_a : float, optional
For ``stretch='asinh'``, the value where the asinh curve
transitions from linear to logarithmic behavior, expressed as a
fraction of the normalized image. Must be in the range between
0 and 1. The default is 0.1.
min_cut : float, optional
The pixel value of the minimum cut level. Data values less than
``min_cut`` will set to ``min_cut`` before stretching the image.
The default is the image minimum. ``min_cut`` overrides
``min_percent``.
max_cut : float, optional
The pixel value of the maximum cut level. Data values greater
than ``min_cut`` will set to ``min_cut`` before stretching the
image. The default is the image maximum. ``max_cut`` overrides
``max_percent``.
min_percent : float, optional
The percentile value used to determine the pixel value of
minimum cut level. The default is 0.0. ``min_percent``
overrides ``percent``.
max_percent : float, optional
The percentile value used to determine the pixel value of
maximum cut level. The default is 100.0. ``max_percent``
overrides ``percent``.
percent : float, optional
The percentage of the image values used to determine the pixel
values of the minimum and maximum cut levels. The lower cut
level will set at the ``(100 - percent) / 2`` percentile, while
the upper cut level will be set at the ``(100 + percent) / 2``
percentile. The default is 100.0. ``percent`` is ignored if
either ``min_percent`` or ``max_percent`` is input.
clip : bool, optional
If `True` (default), data values outside the [0:1] range are
clipped to the [0:1] range.
Returns
-------
result : `ImageNormalize` instance
An `ImageNormalize` instance that can be used for displaying
images with Matplotlib.
"""
if percent is not None:
interval = PercentileInterval(percent)
elif min_percent is not None or max_percent is not None:
interval = AsymmetricPercentileInterval(min_percent or 0.,
max_percent or 100.)
elif min_cut is not None or max_cut is not None:
interval = ManualInterval(min_cut, max_cut)
else:
interval = MinMaxInterval()
if stretch == 'linear':
stretch = LinearStretch()
elif stretch == 'sqrt':
stretch = SqrtStretch()
elif stretch == 'power':
stretch = PowerStretch(power)
elif stretch == 'log':
stretch = LogStretch(log_a)
elif stretch == 'asinh':
stretch = AsinhStretch(asinh_a)
else:
raise ValueError('Unknown stretch: {0}.'.format(stretch))
vmin, vmax = interval.get_limits(data)
return ImageNormalize(vmin=vmin, vmax=vmax, stretch=stretch, clip=clip)
# used in imshow_norm
_norm_sig = inspect.signature(ImageNormalize)
def imshow_norm(data, ax=None, imshow_only_kwargs={}, **kwargs):
""" A convenience function to call matplotlib's `matplotlib.pyplot.imshow`
function, using an `ImageNormalize` object as the normalization.
Parameters
----------
data : 2D or 3D array-like - see `~matplotlib.pyplot.imshow`
The data to show. Can be whatever `~matplotlib.pyplot.imshow` and
`ImageNormalize` both accept.
ax : None or `~matplotlib.axes.Axes`
If None, use pyplot's imshow. Otherwise, calls ``imshow`` method of the
supplied axes.
imshow_only_kwargs : dict
Arguments to be passed directly to `~matplotlib.pyplot.imshow` without
first trying `ImageNormalize`. This is only for keywords that have the
same name in both `ImageNormalize` and `~matplotlib.pyplot.imshow` - if
you want to set the `~matplotlib.pyplot.imshow` keywords only, supply
them in this dictionary.
All other keyword arguments are parsed first by the `ImageNormalize`
initializer, then to`~matplotlib.pyplot.imshow`.
Notes
-----
The ``norm`` matplotlib keyword is not supported.
"""
if 'X' in kwargs:
raise ValueError('Cannot give both ``X`` and ``data``')
if 'norm' in kwargs:
raise ValueError('There is no point in using imshow_norm if you give '
'the ``norm`` keyword - use imshow directly if you '
'want that.')
imshow_kwargs = dict(kwargs)
norm_kwargs = {'data': data}
for pname in _norm_sig.parameters:
if pname in kwargs:
norm_kwargs[pname] = imshow_kwargs.pop(pname)
for k, v in imshow_only_kwargs.items():
if k not in _norm_sig.parameters:
# the below is not strictly "has to be true", but is here so that
# users don't start using both imshow_only_kwargs *and* keyword
# arguments to this function, as that makes for more confusing
# user code
raise ValueError('Provided a keyword to imshow_only_kwargs ({}) '
'that is not a keyword for ImageNormalize. This is'
' not supported, you should pass the keyword'
'directly into imshow_norm instead'.format(k))
imshow_kwargs[k] = v
imshow_kwargs['norm'] = ImageNormalize(**norm_kwargs)
if ax is None:
imshow_result = plt.imshow(data, **imshow_kwargs)
else:
imshow_result = ax.imshow(data, **imshow_kwargs)
return imshow_result, imshow_kwargs['norm']
|
c4eb779f45bac686fd8d8f0c5a5904dce173ddd9aaa9bed8cde03a71796a2986 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Combine 3 images to produce a properly-scaled RGB image following Lupton et al. (2004).
The three images must be aligned and have the same pixel scale and size.
For details, see : http://adsabs.harvard.edu/abs/2004PASP..116..133L
"""
import numpy as np
from . import ZScaleInterval
__all__ = ['make_lupton_rgb']
def compute_intensity(image_r, image_g=None, image_b=None):
"""
Return a naive total intensity from the red, blue, and green intensities.
Parameters
----------
image_r : `~numpy.ndarray`
Intensity of image to be mapped to red; or total intensity if ``image_g``
and ``image_b`` are None.
image_g : `~numpy.ndarray`, optional
Intensity of image to be mapped to green.
image_b : `~numpy.ndarray`, optional
Intensity of image to be mapped to blue.
Returns
-------
intensity : `~numpy.ndarray`
Total intensity from the red, blue and green intensities, or ``image_r``
if green and blue images are not provided.
"""
if image_g is None or image_b is None:
if not (image_g is None and image_b is None):
raise ValueError("please specify either a single image "
"or red, green, and blue images.")
return image_r
intensity = (image_r + image_g + image_b)/3.0
# Repack into whatever type was passed to us
return np.asarray(intensity, dtype=image_r.dtype)
class Mapping:
"""
Baseclass to map red, blue, green intensities into uint8 values.
Parameters
----------
minimum : float or sequence(3)
Intensity that should be mapped to black (a scalar or array for R, G, B).
image : `~numpy.ndarray`, optional
An image used to calculate some parameters of some mappings.
"""
def __init__(self, minimum=None, image=None):
self._uint8Max = float(np.iinfo(np.uint8).max)
try:
len(minimum)
except TypeError:
minimum = 3*[minimum]
if len(minimum) != 3:
raise ValueError("please provide 1 or 3 values for minimum.")
self.minimum = minimum
self._image = np.asarray(image)
def make_rgb_image(self, image_r, image_g, image_b):
"""
Convert 3 arrays, image_r, image_g, and image_b into an 8-bit RGB image.
Parameters
----------
image_r : `~numpy.ndarray`
Image to map to red.
image_g : `~numpy.ndarray`
Image to map to green.
image_b : `~numpy.ndarray`
Image to map to blue.
Returns
-------
RGBimage : `~numpy.ndarray`
RGB (integer, 8-bits per channel) color image as an NxNx3 numpy array.
"""
image_r = np.asarray(image_r)
image_g = np.asarray(image_g)
image_b = np.asarray(image_b)
if (image_r.shape != image_g.shape) or (image_g.shape != image_b.shape):
msg = "The image shapes must match. r: {}, g: {} b: {}"
raise ValueError(msg.format(image_r.shape, image_g.shape, image_b.shape))
return np.dstack(self._convert_images_to_uint8(image_r, image_g, image_b)).astype(np.uint8)
def intensity(self, image_r, image_g, image_b):
"""
Return the total intensity from the red, blue, and green intensities.
This is a naive computation, and may be overridden by subclasses.
Parameters
----------
image_r : `~numpy.ndarray`
Intensity of image to be mapped to red; or total intensity if
``image_g`` and ``image_b`` are None.
image_g : `~numpy.ndarray`, optional
Intensity of image to be mapped to green.
image_b : `~numpy.ndarray`, optional
Intensity of image to be mapped to blue.
Returns
-------
intensity : `~numpy.ndarray`
Total intensity from the red, blue and green intensities, or
``image_r`` if green and blue images are not provided.
"""
return compute_intensity(image_r, image_g, image_b)
def map_intensity_to_uint8(self, I):
"""
Return an array which, when multiplied by an image, returns that image
mapped to the range of a uint8, [0, 255] (but not converted to uint8).
The intensity is assumed to have had minimum subtracted (as that can be
done per-band).
Parameters
----------
I : `~numpy.ndarray`
Intensity to be mapped.
Returns
-------
mapped_I : `~numpy.ndarray`
``I`` mapped to uint8
"""
with np.errstate(invalid='ignore', divide='ignore'):
return np.clip(I, 0, self._uint8Max)
def _convert_images_to_uint8(self, image_r, image_g, image_b):
"""Use the mapping to convert images image_r, image_g, and image_b to a triplet of uint8 images"""
image_r = image_r - self.minimum[0] # n.b. makes copy
image_g = image_g - self.minimum[1]
image_b = image_b - self.minimum[2]
fac = self.map_intensity_to_uint8(self.intensity(image_r, image_g, image_b))
image_rgb = [image_r, image_g, image_b]
for c in image_rgb:
c *= fac
with np.errstate(invalid='ignore'):
c[c < 0] = 0 # individual bands can still be < 0, even if fac isn't
pixmax = self._uint8Max
r0, g0, b0 = image_rgb # copies -- could work row by row to minimise memory usage
with np.errstate(invalid='ignore', divide='ignore'): # n.b. np.where can't and doesn't short-circuit
for i, c in enumerate(image_rgb):
c = np.where(r0 > g0,
np.where(r0 > b0,
np.where(r0 >= pixmax, c*pixmax/r0, c),
np.where(b0 >= pixmax, c*pixmax/b0, c)),
np.where(g0 > b0,
np.where(g0 >= pixmax, c*pixmax/g0, c),
np.where(b0 >= pixmax, c*pixmax/b0, c))).astype(np.uint8)
c[c > pixmax] = pixmax
image_rgb[i] = c
return image_rgb
class LinearMapping(Mapping):
"""
A linear map map of red, blue, green intensities into uint8 values.
A linear stretch from [minimum, maximum].
If one or both are omitted use image min and/or max to set them.
Parameters
----------
minimum : float
Intensity that should be mapped to black (a scalar or array for R, G, B).
maximum : float
Intensity that should be mapped to white (a scalar).
"""
def __init__(self, minimum=None, maximum=None, image=None):
if minimum is None or maximum is None:
if image is None:
raise ValueError("you must provide an image if you don't "
"set both minimum and maximum")
if minimum is None:
minimum = image.min()
if maximum is None:
maximum = image.max()
Mapping.__init__(self, minimum=minimum, image=image)
self.maximum = maximum
if maximum is None:
self._range = None
else:
if maximum == minimum:
raise ValueError("minimum and maximum values must not be equal")
self._range = float(maximum - minimum)
def map_intensity_to_uint8(self, I):
with np.errstate(invalid='ignore', divide='ignore'): # n.b. np.where can't and doesn't short-circuit
return np.where(I <= 0, 0,
np.where(I >= self._range, self._uint8Max/I, self._uint8Max/self._range))
class AsinhMapping(Mapping):
"""
A mapping for an asinh stretch (preserving colours independent of brightness)
x = asinh(Q (I - minimum)/stretch)/Q
This reduces to a linear stretch if Q == 0
See http://adsabs.harvard.edu/abs/2004PASP..116..133L
Parameters
----------
minimum : float
Intensity that should be mapped to black (a scalar or array for R, G, B).
stretch : float
The linear stretch of the image.
Q : float
The asinh softening parameter.
"""
def __init__(self, minimum, stretch, Q=8):
Mapping.__init__(self, minimum)
epsilon = 1.0/2**23 # 32bit floating point machine epsilon; sys.float_info.epsilon is 64bit
if abs(Q) < epsilon:
Q = 0.1
else:
Qmax = 1e10
if Q > Qmax:
Q = Qmax
frac = 0.1 # gradient estimated using frac*stretch is _slope
self._slope = frac*self._uint8Max/np.arcsinh(frac*Q)
self._soften = Q/float(stretch)
def map_intensity_to_uint8(self, I):
with np.errstate(invalid='ignore', divide='ignore'): # n.b. np.where can't and doesn't short-circuit
return np.where(I <= 0, 0, np.arcsinh(I*self._soften)*self._slope/I)
class AsinhZScaleMapping(AsinhMapping):
"""
A mapping for an asinh stretch, estimating the linear stretch by zscale.
x = asinh(Q (I - z1)/(z2 - z1))/Q
Parameters
----------
image1 : `~numpy.ndarray` or a list of arrays
The image to analyse, or a list of 3 images to be converted to
an intensity image.
image2 : `~numpy.ndarray`, optional
the second image to analyse (must be specified with image3).
image3 : `~numpy.ndarray`, optional
the third image to analyse (must be specified with image2).
Q : float, optional
The asinh softening parameter. Default is 8.
pedestal : float or sequence(3), optional
The value, or array of 3 values, to subtract from the images; or None.
Notes
-----
pedestal, if not None, is removed from the images when calculating the
zscale stretch, and added back into Mapping.minimum[]
"""
def __init__(self, image1, image2=None, image3=None, Q=8, pedestal=None):
"""
"""
if image2 is None or image3 is None:
if not (image2 is None and image3 is None):
raise ValueError("please specify either a single image "
"or three images.")
image = [image1]
else:
image = [image1, image2, image3]
if pedestal is not None:
try:
len(pedestal)
except TypeError:
pedestal = 3*[pedestal]
if len(pedestal) != 3:
raise ValueError("please provide 1 or 3 pedestals.")
image = list(image) # needs to be mutable
for i, im in enumerate(image):
if pedestal[i] != 0.0:
image[i] = im - pedestal[i] # n.b. a copy
else:
pedestal = len(image)*[0.0]
image = compute_intensity(*image)
zscale_limits = ZScaleInterval().get_limits(image)
zscale = LinearMapping(*zscale_limits, image=image)
stretch = zscale.maximum - zscale.minimum[0] # zscale.minimum is always a triple
minimum = zscale.minimum
for i, level in enumerate(pedestal):
minimum[i] += level
AsinhMapping.__init__(self, minimum, stretch, Q)
self._image = image
def make_lupton_rgb(image_r, image_g, image_b, minimum=0, stretch=5, Q=8,
filename=None):
"""
Return a Red/Green/Blue color image from up to 3 images using an asinh stretch.
The input images can be int or float, and in any range or bit-depth.
For a more detailed look at the use of this method, see the document
:ref:`astropy-visualization-rgb`.
Parameters
----------
image_r : `~numpy.ndarray`
Image to map to red.
image_g : `~numpy.ndarray`
Image to map to green.
image_b : `~numpy.ndarray`
Image to map to blue.
minimum : float
Intensity that should be mapped to black (a scalar or array for R, G, B).
stretch : float
The linear stretch of the image.
Q : float
The asinh softening parameter.
filename: str
Write the resulting RGB image to a file (file type determined
from extension).
Returns
-------
rgb : `~numpy.ndarray`
RGB (integer, 8-bits per channel) color image as an NxNx3 numpy array.
"""
asinhMap = AsinhMapping(minimum, stretch, Q)
rgb = asinhMap.make_rgb_image(image_r, image_g, image_b)
if filename:
import matplotlib.image
matplotlib.image.imsave(filename, rgb, origin='lower')
return rgb
|
20612cc4f14e7715a876a4ae831dd7d9dbb55893931e02d3bda7eb568579a2fe | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Classes that deal with computing intervals from arrays of values based on
various criteria.
"""
import abc
import numpy as np
from astropy.utils.misc import InheritDocstrings
from .transform import BaseTransform
__all__ = ['BaseInterval', 'ManualInterval', 'MinMaxInterval',
'AsymmetricPercentileInterval', 'PercentileInterval',
'ZScaleInterval']
class BaseInterval(BaseTransform, metaclass=InheritDocstrings):
"""
Base class for the interval classes, which, when called with an
array of values, return an interval computed following different
algorithms.
"""
@abc.abstractmethod
def get_limits(self, values):
"""
Return the minimum and maximum value in the interval based on
the values provided.
Parameters
----------
values : `~numpy.ndarray`
The image values.
Returns
-------
vmin, vmax : float
The mininium and maximum image value in the interval.
"""
def __call__(self, values, clip=True, out=None):
"""
Transform values using this interval.
Parameters
----------
values : array-like
The input values.
clip : bool, optional
If `True` (default), values outside the [0:1] range are
clipped to the [0:1] range.
out : `~numpy.ndarray`, optional
If specified, the output values will be placed in this array
(typically used for in-place calculations).
Returns
-------
result : `~numpy.ndarray`
The transformed values.
"""
vmin, vmax = self.get_limits(values)
if out is None:
values = np.subtract(values, float(vmin))
else:
if out.dtype.kind != 'f':
raise TypeError('Can only do in-place scaling for '
'floating-point arrays')
values = np.subtract(values, float(vmin), out=out)
if (vmax - vmin) != 0:
np.true_divide(values, vmax - vmin, out=values)
if clip:
np.clip(values, 0., 1., out=values)
return values
class ManualInterval(BaseInterval):
"""
Interval based on user-specified values.
Parameters
----------
vmin : float, optional
The minimum value in the scaling. Defaults to the image
minimum (ignoring NaNs)
vmax : float, optional
The maximum value in the scaling. Defaults to the image
maximum (ignoring NaNs)
"""
def __init__(self, vmin=None, vmax=None):
self.vmin = vmin
self.vmax = vmax
def get_limits(self, values):
vmin = np.nanmin(values) if self.vmin is None else self.vmin
vmax = np.nanmax(values) if self.vmax is None else self.vmax
return vmin, vmax
class MinMaxInterval(BaseInterval):
"""
Interval based on the minimum and maximum values in the data.
"""
def get_limits(self, values):
return np.nanmin(values), np.nanmax(values)
class AsymmetricPercentileInterval(BaseInterval):
"""
Interval based on a keeping a specified fraction of pixels (can be
asymmetric).
Parameters
----------
lower_percentile : float
The lower percentile below which to ignore pixels.
upper_percentile : float
The upper percentile above which to ignore pixels.
n_samples : int, optional
Maximum number of values to use. If this is specified, and there
are more values in the dataset as this, then values are randomly
sampled from the array (with replacement).
"""
def __init__(self, lower_percentile, upper_percentile, n_samples=None):
self.lower_percentile = lower_percentile
self.upper_percentile = upper_percentile
self.n_samples = n_samples
def get_limits(self, values):
# Make sure values is a Numpy array
values = np.asarray(values).ravel()
# If needed, limit the number of samples. We sample with replacement
# since this is much faster.
if self.n_samples is not None and values.size > self.n_samples:
values = np.random.choice(values, self.n_samples)
# Filter out invalid values (inf, nan)
values = values[np.isfinite(values)]
# Determine values at percentiles
vmin, vmax = np.nanpercentile(values,
(self.lower_percentile,
self.upper_percentile))
return vmin, vmax
class PercentileInterval(AsymmetricPercentileInterval):
"""
Interval based on a keeping a specified fraction of pixels.
Parameters
----------
percentile : float
The fraction of pixels to keep. The same fraction of pixels is
eliminated from both ends.
n_samples : int, optional
Maximum number of values to use. If this is specified, and there
are more values in the dataset as this, then values are randomly
sampled from the array (with replacement).
"""
def __init__(self, percentile, n_samples=None):
lower_percentile = (100 - percentile) * 0.5
upper_percentile = 100 - lower_percentile
super().__init__(
lower_percentile, upper_percentile, n_samples=n_samples)
class ZScaleInterval(BaseInterval):
"""
Interval based on IRAF's zscale.
http://iraf.net/forum/viewtopic.php?showtopic=134139
Original implementation:
https://trac.stsci.edu/ssb/stsci_python/browser/stsci_python/trunk/numdisplay/lib/stsci/numdisplay/zscale.py?rev=19347
Licensed under a 3-clause BSD style license (see AURA_LICENSE.rst).
Parameters
----------
nsamples : int, optional
The number of points in the array to sample for determining
scaling factors. Defaults to 1000.
contrast : float, optional
The scaling factor (between 0 and 1) for determining the minimum
and maximum value. Larger values increase the difference
between the minimum and maximum values used for display.
Defaults to 0.25.
max_reject : float, optional
If more than ``max_reject * npixels`` pixels are rejected, then
the returned values are the minimum and maximum of the data.
Defaults to 0.5.
min_npixels : int, optional
If less than ``min_npixels`` pixels are rejected, then the
returned values are the minimum and maximum of the data.
Defaults to 5.
krej : float, optional
The number of sigma used for the rejection. Defaults to 2.5.
max_iterations : int, optional
The maximum number of iterations for the rejection. Defaults to
5.
"""
def __init__(self, nsamples=1000, contrast=0.25, max_reject=0.5,
min_npixels=5, krej=2.5, max_iterations=5):
self.nsamples = nsamples
self.contrast = contrast
self.max_reject = max_reject
self.min_npixels = min_npixels
self.krej = krej
self.max_iterations = max_iterations
def get_limits(self, values):
# Sample the image
values = np.asarray(values)
values = values[np.isfinite(values)]
stride = int(max(1.0, values.size / self.nsamples))
samples = values[::stride][:self.nsamples]
samples.sort()
npix = len(samples)
vmin = samples[0]
vmax = samples[-1]
# Fit a line to the sorted array of samples
minpix = max(self.min_npixels, int(npix * self.max_reject))
x = np.arange(npix)
ngoodpix = npix
last_ngoodpix = npix + 1
# Bad pixels mask used in k-sigma clipping
badpix = np.zeros(npix, dtype=bool)
# Kernel used to dilate the bad pixels mask
ngrow = max(1, int(npix * 0.01))
kernel = np.ones(ngrow, dtype=bool)
for niter in range(self.max_iterations):
if ngoodpix >= last_ngoodpix or ngoodpix < minpix:
break
fit = np.polyfit(x, samples, deg=1, w=(~badpix).astype(int))
fitted = np.poly1d(fit)(x)
# Subtract fitted line from the data array
flat = samples - fitted
# Compute the k-sigma rejection threshold
threshold = self.krej * flat[~badpix].std()
# Detect and reject pixels further than k*sigma from the
# fitted line
badpix[(flat < - threshold) | (flat > threshold)] = True
# Convolve with a kernel of length ngrow
badpix = np.convolve(badpix, kernel, mode='same')
last_ngoodpix = ngoodpix
ngoodpix = np.sum(~badpix)
slope, intercept = fit
if ngoodpix >= minpix:
if self.contrast > 0:
slope = slope / self.contrast
center_pixel = (npix - 1) // 2
median = np.median(samples)
vmin = max(vmin, median - (center_pixel - 1) * slope)
vmax = min(vmax, median + (npix - center_pixel) * slope)
return vmin, vmax
|
25dfc1ee4374e32df985a3948933c3db237bd10ab55e041b8a9578fdf02b8a80 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
__doctest_skip__ = ['quantity_support']
def quantity_support(format='latex_inline'):
"""
Enable support for plotting `astropy.units.Quantity` instances in
matplotlib.
May be (optionally) used with a ``with`` statement.
>>> import matplotlib.pyplot as plt
>>> from astropy import units as u
>>> from astropy import visualization
>>> with visualization.quantity_support():
... plt.figure()
... plt.plot([1, 2, 3] * u.m)
[...]
... plt.plot([101, 125, 150] * u.cm)
[...]
... plt.draw()
Parameters
----------
format : `astropy.units.format.Base` instance or str
The name of a format or a formatter object. If not
provided, defaults to ``latex_inline``.
"""
from astropy import units as u
from matplotlib import units
from matplotlib import ticker
def rad_fn(x, pos=None):
n = int((x / np.pi) * 2.0 + 0.25)
if n == 0:
return '0'
elif n == 1:
return 'π/2'
elif n == 2:
return 'π'
elif n % 2 == 0:
return '{0}π'.format(n / 2)
else:
return '{0}π/2'.format(n)
class MplQuantityConverter(units.ConversionInterface):
def __init__(self):
if u.Quantity not in units.registry:
units.registry[u.Quantity] = self
self._remove = True
else:
self._remove = False
@staticmethod
def axisinfo(unit, axis):
if unit == u.radian:
return units.AxisInfo(
majloc=ticker.MultipleLocator(base=np.pi/2),
majfmt=ticker.FuncFormatter(rad_fn),
label=unit.to_string(),
)
elif unit == u.degree:
return units.AxisInfo(
majloc=ticker.AutoLocator(),
majfmt=ticker.FormatStrFormatter('%i°'),
label=unit.to_string(),
)
elif unit is not None:
return units.AxisInfo(label=unit.to_string(format))
return None
@staticmethod
def convert(val, unit, axis):
if isinstance(val, u.Quantity):
return val.to_value(unit)
elif isinstance(val, list) and isinstance(val[0], u.Quantity):
return [v.to_value(unit) for v in val]
else:
return val
@staticmethod
def default_units(x, axis):
if hasattr(x, 'unit'):
return x.unit
return None
def __enter__(self):
return self
def __exit__(self, type, value, tb):
if self._remove:
del units.registry[u.Quantity]
return MplQuantityConverter()
|
b20453743078a62587d4dc6031afc0ee576330b9132a755e3d649bfad8f46ab9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module contains dictionaries that can be used to set a matplotlib
# plotting style. It is no longer documented/recommended as of Astropy v3.0
# but is kept here for backward-compatibility.
from astropy.utils import minversion
# This returns False if matplotlib cannot be imported
MATPLOTLIB_GE_1_5 = minversion('matplotlib', '1.5')
__all__ = ['astropy_mpl_style_1', 'astropy_mpl_style']
# Version 1 astropy plotting style for matplotlib
astropy_mpl_style_1 = {
# Lines
'lines.linewidth': 1.7,
'lines.antialiased': True,
# Patches
'patch.linewidth': 1.0,
'patch.facecolor': '#348ABD',
'patch.edgecolor': '#CCCCCC',
'patch.antialiased': True,
# Images
'image.cmap': 'gist_heat',
'image.origin': 'upper',
# Font
'font.size': 12.0,
# Axes
'axes.facecolor': '#FFFFFF',
'axes.edgecolor': '#AAAAAA',
'axes.linewidth': 1.0,
'axes.grid': True,
'axes.titlesize': 'x-large',
'axes.labelsize': 'large',
'axes.labelcolor': 'k',
'axes.axisbelow': True,
# Ticks
'xtick.major.size': 0,
'xtick.minor.size': 0,
'xtick.major.pad': 6,
'xtick.minor.pad': 6,
'xtick.color': '#565656',
'xtick.direction': 'in',
'ytick.major.size': 0,
'ytick.minor.size': 0,
'ytick.major.pad': 6,
'ytick.minor.pad': 6,
'ytick.color': '#565656',
'ytick.direction': 'in',
# Legend
'legend.fancybox': True,
'legend.loc': 'best',
# Figure
'figure.figsize': [8, 6],
'figure.facecolor': '1.0',
'figure.edgecolor': '0.50',
'figure.subplot.hspace': 0.5,
# Other
'savefig.dpi': 72,
}
color_cycle = ['#348ABD', # blue
'#7A68A6', # purple
'#A60628', # red
'#467821', # green
'#CF4457', # pink
'#188487', # turquoise
'#E24A33'] # orange
if MATPLOTLIB_GE_1_5:
# This is a dependency of matplotlib, so should be present.
from cycler import cycler
astropy_mpl_style_1['axes.prop_cycle'] = cycler('color', color_cycle)
else:
astropy_mpl_style_1['axes.color_cycle'] = color_cycle
astropy_mpl_style = astropy_mpl_style_1
"""The most recent version of the astropy plotting style."""
|
f6b56d7f291ace9ea044df5ec6daf4d207ab53677ab27ea0ff2e8d535e8219fb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.stats.histogram import calculate_bin_edges
__all__ = ['hist']
def hist(x, bins=10, ax=None, max_bins=1e5, **kwargs):
"""Enhanced histogram function
This is a histogram function that enables the use of more sophisticated
algorithms for determining bins. Aside from the ``bins`` argument allowing
a string specified how bins are computed, the parameters are the same
as pylab.hist().
This function was ported from astroML: http://astroML.org/
Parameters
----------
x : array_like
array of data to be histogrammed
bins : int or list or str (optional)
If bins is a string, then it must be one of:
- 'blocks' : use bayesian blocks for dynamic bin widths
- 'knuth' : use Knuth's rule to determine bins
- 'scott' : use Scott's rule to determine bins
- 'freedman' : use the Freedman-Diaconis rule to determine bins
ax : Axes instance (optional)
specify the Axes on which to draw the histogram. If not specified,
then the current active axes will be used.
max_bins : int (optional)
Maximum number of bins allowed. With more than a few thousand bins
the performance of matplotlib will not be great. If the number of
bins is large *and* the number of input data points is large then
the it will take a very long time to compute the histogram.
**kwargs :
other keyword arguments are described in ``plt.hist()``.
Notes
-----
Return values are the same as for ``plt.hist()``
See Also
--------
astropy.stats.histogram
"""
# Note that we only calculate the bin edges...matplotlib will calculate
# the actual histogram.
range = kwargs.get('range', None)
weights = kwargs.get('weights', None)
bins = calculate_bin_edges(x, bins, range=range, weights=weights)
if len(bins) > max_bins:
raise ValueError('Histogram has too many bins: '
'{nbin}. Use max_bins to increase the number '
'of allowed bins or range to restrict '
'the histogram range.'.format(nbin=len(bins)))
if ax is None:
# optional dependency; only import if strictly needed.
import matplotlib.pyplot as plt
ax = plt.gca()
return ax.hist(x, bins, **kwargs)
|
abb693c394af8726a45061bad60d237c403a224cea498abef0fa77ad049d9aa9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
__all__ = ['BaseTransform', 'CompositeTransform']
class BaseTransform:
"""
A transformation object.
This is used to construct transformations such as scaling, stretching, and
so on.
"""
def __add__(self, other):
return CompositeTransform(other, self)
class CompositeTransform(BaseTransform):
"""
A combination of two transforms.
Parameters
----------
transform_1 : :class:`astropy.visualization.BaseTransform`
The first transform to apply.
transform_2 : :class:`astropy.visualization.BaseTransform`
The second transform to apply.
"""
def __init__(self, transform_1, transform_2):
super().__init__()
self.transform_1 = transform_1
self.transform_2 = transform_2
def __call__(self, values, clip=True):
return self.transform_2(self.transform_1(values, clip=clip), clip=clip)
@property
def inverse(self):
return self.__class__(self.transform_2.inverse,
self.transform_1.inverse)
|
4be730304286e3306a020d0e8615b2d884cdf699b71429b7a10fb4c682002411 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Classes that deal with stretching, i.e. mapping a range of [0:1] values onto
another set of [0:1] values with a transformation
"""
import numpy as np
from astropy.utils.misc import InheritDocstrings
from .transform import BaseTransform
from .transform import CompositeTransform
__all__ = ["BaseStretch", "LinearStretch", "SqrtStretch", "PowerStretch",
"PowerDistStretch", "SquaredStretch", "LogStretch", "AsinhStretch",
"SinhStretch", "HistEqStretch", "ContrastBiasStretch",
"CompositeStretch"]
def _logn(n, x, out=None):
"""Calculate the log base n of x."""
# We define this because numpy.lib.scimath.logn doesn't support out=
if out is None:
return np.log(x) / np.log(n)
else:
np.log(x, out=out)
np.true_divide(out, np.log(n), out=out)
return out
def _prepare(values, clip=True, out=None):
"""
Prepare the data by optionally clipping and copying, and return the
array that should be subsequently used for in-place calculations.
"""
if clip:
return np.clip(values, 0., 1., out=out)
else:
if out is None:
return np.array(values, copy=True)
else:
out[:] = np.asarray(values)
return out
class BaseStretch(BaseTransform, metaclass=InheritDocstrings):
"""
Base class for the stretch classes, which, when called with an array
of values in the range [0:1], return an transformed array of values,
also in the range [0:1].
"""
def __add__(self, other):
return CompositeStretch(other, self)
def __call__(self, values, clip=True, out=None):
"""
Transform values using this stretch.
Parameters
----------
values : array-like
The input values, which should already be normalized to the
[0:1] range.
clip : bool, optional
If `True` (default), values outside the [0:1] range are
clipped to the [0:1] range.
out : `~numpy.ndarray`, optional
If specified, the output values will be placed in this array
(typically used for in-place calculations).
Returns
-------
result : `~numpy.ndarray`
The transformed values.
"""
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
class LinearStretch(BaseStretch):
"""
A linear stretch with a slope and offset.
The stretch is given by:
.. math::
y = slope x + intercept
Parameters
----------
slope : float, optional
The ``slope`` parameter used in the above formula. Default is 1.
intercept : float, optional
The ``intercept`` parameter used in the above formula. Default is 0.
"""
def __init__(self, slope=1, intercept=0):
super().__init__()
self.slope = slope
self.intercept = intercept
def __call__(self, values, clip=True, out=None):
values = _prepare(values, clip=clip, out=out)
if self.slope != 1:
np.multiply(values, self.slope, out=values)
if self.intercept != 0:
np.add(values, self.intercept, out=values)
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return LinearStretch(1. / self.slope, - self.intercept / self.slope)
class SqrtStretch(BaseStretch):
r"""
A square root stretch.
The stretch is given by:
.. math::
y = \sqrt{x}
"""
def __call__(self, values, clip=True, out=None):
values = _prepare(values, clip=clip, out=out)
with np.errstate(invalid='ignore'):
np.sqrt(values, out=values)
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return PowerStretch(2)
class PowerStretch(BaseStretch):
r"""
A power stretch.
The stretch is given by:
.. math::
y = x^a
Parameters
----------
a : float
The power index (see the above formula).
"""
def __init__(self, a):
super().__init__()
self.power = a
def __call__(self, values, clip=True, out=None):
values = _prepare(values, clip=clip, out=out)
np.power(values, self.power, out=values)
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return PowerStretch(1. / self.power)
class PowerDistStretch(BaseStretch):
r"""
An alternative power stretch.
The stretch is given by:
.. math::
y = \frac{a^x - 1}{a - 1}
Parameters
----------
a : float, optional
The ``a`` parameter used in the above formula. Default is 1000.
``a`` cannot be set to 1.
"""
def __init__(self, a=1000.0):
if a == 1: # singularity
raise ValueError("a cannot be set to 1")
super().__init__()
self.exp = a
def __call__(self, values, clip=True, out=None):
values = _prepare(values, clip=clip, out=out)
np.power(self.exp, values, out=values)
np.subtract(values, 1, out=values)
np.true_divide(values, self.exp - 1.0, out=values)
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return InvertedPowerDistStretch(a=self.exp)
class InvertedPowerDistStretch(BaseStretch):
r"""
Inverse transformation for
`~astropy.image.scaling.PowerDistStretch`.
The stretch is given by:
.. math::
y = \frac{\log(y (a-1) + 1)}{\log a}
Parameters
----------
a : float, optional
The ``a`` parameter used in the above formula. Default is 1000.
``a`` cannot be set to 1.
"""
def __init__(self, a=1000.0):
if a == 1: # singularity
raise ValueError("a cannot be set to 1")
super().__init__()
self.exp = a
def __call__(self, values, clip=True, out=None):
values = _prepare(values, clip=clip, out=out)
np.multiply(values, self.exp - 1.0, out=values)
np.add(values, 1, out=values)
_logn(self.exp, values, out=values)
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return PowerDistStretch(a=self.exp)
class SquaredStretch(PowerStretch):
r"""
A convenience class for a power stretch of 2.
The stretch is given by:
.. math::
y = x^2
"""
def __init__(self):
super().__init__(2)
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return SqrtStretch()
class LogStretch(BaseStretch):
r"""
A log stretch.
The stretch is given by:
.. math::
y = \frac{\log{(a x + 1)}}{\log{(a + 1)}}.
Parameters
----------
a : float
The ``a`` parameter used in the above formula. Default is 1000.
"""
def __init__(self, a=1000.0):
super().__init__()
self.exp = a
def __call__(self, values, clip=True, out=None):
values = _prepare(values, clip=clip, out=out)
np.multiply(values, self.exp, out=values)
np.add(values, 1., out=values)
np.log(values, out=values)
np.true_divide(values, np.log(self.exp + 1.), out=values)
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return InvertedLogStretch(self.exp)
class InvertedLogStretch(BaseStretch):
r"""
Inverse transformation for `~astropy.image.scaling.LogStretch`.
The stretch is given by:
.. math::
y = \frac{e^{y} (a + 1) -1}{a}
Parameters
----------
a : float, optional
The ``a`` parameter used in the above formula. Default is 1000.
"""
def __init__(self, a):
super().__init__()
self.exp = a
def __call__(self, values, clip=True, out=None):
values = _prepare(values, clip=clip, out=out)
np.multiply(values, np.log(self.exp + 1.), out=values)
np.exp(values, out=values)
np.subtract(values, 1., out=values)
np.true_divide(values, self.exp, out=values)
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return LogStretch(self.exp)
class AsinhStretch(BaseStretch):
r"""
An asinh stretch.
The stretch is given by:
.. math::
y = \frac{{\rm asinh}(x / a)}{{\rm asinh}(1 / a)}.
Parameters
----------
a : float, optional
The ``a`` parameter used in the above formula. The value of
this parameter is where the asinh curve transitions from linear
to logarithmic behavior, expressed as a fraction of the
normalized image. Must be in the range between 0 and 1.
Default is 0.1
"""
def __init__(self, a=0.1):
super().__init__()
self.a = a
def __call__(self, values, clip=True, out=None):
values = _prepare(values, clip=clip, out=out)
np.true_divide(values, self.a, out=values)
np.arcsinh(values, out=values)
np.true_divide(values, np.arcsinh(1. / self.a), out=values)
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return SinhStretch(a=1. / np.arcsinh(1. / self.a))
class SinhStretch(BaseStretch):
r"""
A sinh stretch.
The stretch is given by:
.. math::
y = \frac{{\rm sinh}(x / a)}{{\rm sinh}(1 / a)}
Parameters
----------
a : float, optional
The ``a`` parameter used in the above formula. Default is 1/3.
"""
def __init__(self, a=1./3.):
super().__init__()
self.a = a
def __call__(self, values, clip=True, out=None):
values = _prepare(values, clip=clip, out=out)
np.true_divide(values, self.a, out=values)
np.sinh(values, out=values)
np.true_divide(values, np.sinh(1. / self.a), out=values)
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return AsinhStretch(a=1. / np.sinh(1. / self.a))
class HistEqStretch(BaseStretch):
"""
A histogram equalization stretch.
Parameters
----------
data : array-like
The data defining the equalization.
values : array-like, optional
The input image values, which should already be normalized to
the [0:1] range.
"""
def __init__(self, data, values=None):
# Assume data is not necessarily normalized at this point
self.data = np.sort(data.ravel())
vmin = self.data.min()
vmax = self.data.max()
self.data = (self.data - vmin) / (vmax - vmin)
# Compute relative position of each pixel
if values is None:
self.values = np.linspace(0., 1., len(self.data))
else:
self.values = values
def __call__(self, values, clip=True, out=None):
values = _prepare(values, clip=clip, out=out)
values[:] = np.interp(values, self.data, self.values)
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return InvertedHistEqStretch(self.data, values=self.values)
class InvertedHistEqStretch(BaseStretch):
"""
Inverse transformation for `~astropy.image.scaling.HistEqStretch`.
Parameters
----------
data : array-like
The data defining the equalization.
values : array-like, optional
The input image values, which should already be normalized to
the [0:1] range.
"""
def __init__(self, data, values=None):
self.data = data
if values is None:
self.values = np.linspace(0., 1., len(self.data))
else:
self.values = values
def __call__(self, values, clip=True, out=None):
values = _prepare(values, clip=clip, out=out)
values[:] = np.interp(values, self.values, self.data)
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return HistEqStretch(self.data, values=self.values)
class ContrastBiasStretch(BaseStretch):
r"""
A stretch that takes into account contrast and bias.
The stretch is given by:
.. math::
y = (x - {\rm bias}) * {\rm contrast} + 0.5
and the output values are clipped to the [0:1] range.
Parameters
----------
contrast : float
The contrast parameter (see the above formula).
bias : float
The bias parameter (see the above formula).
"""
def __init__(self, contrast, bias):
super().__init__()
self.contrast = contrast
self.bias = bias
def __call__(self, values, clip=True, out=None):
# As a special case here, we only clip *after* the
# transformation since it does not map [0:1] to [0:1]
values = _prepare(values, clip=False, out=out)
np.subtract(values, self.bias, out=values)
np.multiply(values, self.contrast, out=values)
np.add(values, 0.5, out=values)
if clip:
np.clip(values, 0, 1, out=values)
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return InvertedContrastBiasStretch(self.contrast, self.bias)
class InvertedContrastBiasStretch(BaseStretch):
"""
Inverse transformation for ContrastBiasStretch.
Parameters
----------
contrast : float
The contrast parameter (see
`~astropy.visualization.ConstrastBiasStretch).
bias : float
The bias parameter (see
`~astropy.visualization.ConstrastBiasStretch).
"""
def __init__(self, contrast, bias):
super().__init__()
self.contrast = contrast
self.bias = bias
def __call__(self, values, clip=True, out=None):
# As a special case here, we only clip *after* the
# transformation since it does not map [0:1] to [0:1]
values = _prepare(values, clip=False, out=out)
np.subtract(values, 0.5, out=values)
np.true_divide(values, self.contrast, out=values)
np.add(values, self.bias, out=values)
if clip:
np.clip(values, 0, 1, out=values)
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return ContrastBiasStretch(self.contrast, self.bias)
class CompositeStretch(CompositeTransform, BaseStretch):
"""
A combination of two stretches.
Parameters
----------
stretch_1 : :class:`astropy.visualization.BaseStretch`
The first stretch to apply.
stretch_2 : :class:`astropy.visualization.BaseStretch`
The second stretch to apply.
"""
def __call__(self, values, clip=True, out=None):
return self.transform_2(
self.transform_1(values, clip=clip, out=out), clip=clip, out=out)
|
f0d4f89ccd72f38dd891ccf0cea033108880243345ec952fc1b909416e30b3d7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from copy import deepcopy
from inspect import signature
from itertools import islice
import warnings
from functools import wraps
from astropy.utils.exceptions import AstropyUserWarning
from .nddata import NDData
__all__ = ['support_nddata']
# All supported properties are optional except "data" which is mandatory!
SUPPORTED_PROPERTIES = ['data', 'uncertainty', 'mask', 'meta', 'unit', 'wcs',
'flags']
def support_nddata(_func=None, accepts=NDData,
repack=False, returns=None, keeps=None,
**attribute_argument_mapping):
"""Decorator to wrap functions that could accept an NDData instance with
its properties passed as function arguments.
Parameters
----------
_func : callable, None, optional
The function to decorate or ``None`` if used as factory. The first
positional argument should be ``data`` and take a numpy array. It is
possible to overwrite the name, see ``attribute_argument_mapping``
argument.
Default is ``None``.
accepts : cls, optional
The class or subclass of ``NDData`` that should be unpacked before
calling the function.
Default is ``NDData``
repack : bool, optional
Should be ``True`` if the return should be converted to the input
class again after the wrapped function call.
Default is ``False``.
.. note::
Must be ``True`` if either one of ``returns`` or ``keeps``
is specified.
returns : iterable, None, optional
An iterable containing strings which returned value should be set
on the class. For example if a function returns data and mask, this
should be ``['data', 'mask']``. If ``None`` assume the function only
returns one argument: ``'data'``.
Default is ``None``.
.. note::
Must be ``None`` if ``repack=False``.
keeps : iterable. None, optional
An iterable containing strings that indicate which values should be
copied from the original input to the returned class. If ``None``
assume that no attributes are copied.
Default is ``None``.
.. note::
Must be ``None`` if ``repack=False``.
attribute_argument_mapping :
Keyword parameters that optionally indicate which function argument
should be interpreted as which attribute on the input. By default
it assumes the function takes a ``data`` argument as first argument,
but if the first argument is called ``input`` one should pass
``support_nddata(..., data='input')`` to the function.
Returns
-------
decorator_factory or decorated_function : callable
If ``_func=None`` this returns a decorator, otherwise it returns the
decorated ``_func``.
Notes
-----
If properties of ``NDData`` are set but have no corresponding function
argument a Warning is shown.
If a property is set of the ``NDData`` are set and an explicit argument is
given, the explicitly given argument is used and a Warning is shown.
The supported properties are:
- ``mask``
- ``unit``
- ``wcs``
- ``meta``
- ``uncertainty``
- ``flags``
Examples
--------
This function takes a Numpy array for the data, and some WCS information
with the ``wcs`` keyword argument::
def downsample(data, wcs=None):
# downsample data and optionally WCS here
pass
However, you might have an NDData instance that has the ``wcs`` property
set and you would like to be able to call the function with
``downsample(my_nddata)`` and have the WCS information, if present,
automatically be passed to the ``wcs`` keyword argument.
This decorator can be used to make this possible::
@support_nddata
def downsample(data, wcs=None):
# downsample data and optionally WCS here
pass
This function can now either be called as before, specifying the data and
WCS separately, or an NDData instance can be passed to the ``data``
argument.
"""
if (returns is not None or keeps is not None) and not repack:
raise ValueError('returns or keeps should only be set if repack=True.')
elif returns is None and repack:
raise ValueError('returns should be set if repack=True.')
else:
# Use empty lists for returns and keeps so we don't need to check
# if any of those is None later on.
if returns is None:
returns = []
if keeps is None:
keeps = []
# Short version to avoid the long variable name later.
attr_arg_map = attribute_argument_mapping
if any(keep in returns for keep in keeps):
raise ValueError("cannot specify the same attribute in `returns` and "
"`keeps`.")
all_returns = returns + keeps
def support_nddata_decorator(func):
# Find out args and kwargs
func_args, func_kwargs = [], []
sig = signature(func).parameters
for param_name, param in sig.items():
if param.kind in (param.VAR_POSITIONAL, param.VAR_KEYWORD):
raise ValueError("func may not have *args or **kwargs.")
try:
if param.default == param.empty:
func_args.append(param_name)
else:
func_kwargs.append(param_name)
# The comparison to param.empty may fail if the default is a
# numpy array or something similar. So if the comparison fails then
# it's quite obvious that there was a default and it should be
# appended to the "func_kwargs".
except ValueError as exc:
if ('The truth value of an array with more than one element '
'is ambiguous.') in str(exc):
func_kwargs.append(param_name)
else:
raise
# First argument should be data
if not func_args or func_args[0] != attr_arg_map.get('data', 'data'):
raise ValueError("Can only wrap functions whose first positional "
"argument is `{0}`"
"".format(attr_arg_map.get('data', 'data')))
@wraps(func)
def wrapper(data, *args, **kwargs):
bound_args = signature(func).bind(data, *args, **kwargs)
unpack = isinstance(data, accepts)
input_data = data
ignored = []
if not unpack and isinstance(data, NDData):
raise TypeError("Only NDData sub-classes that inherit from {0}"
" can be used by this function"
"".format(accepts.__name__))
# If data is an NDData instance, we can try and find properties
# that can be passed as kwargs.
if unpack:
# We loop over a list of pre-defined properties
for prop in islice(SUPPORTED_PROPERTIES, 1, None):
# We only need to do something if the property exists on
# the NDData object
try:
value = getattr(data, prop)
except AttributeError:
continue
# Skip if the property exists but is None or empty.
if prop == 'meta' and not value:
continue
elif value is None:
continue
# Warn if the property is set but not used by the function.
propmatch = attr_arg_map.get(prop, prop)
if propmatch not in func_kwargs:
ignored.append(prop)
continue
# Check if the property was explicitly given and issue a
# Warning if it is.
if propmatch in bound_args.arguments:
# If it's in the func_args it's trivial but if it was
# in the func_kwargs we need to compare it to the
# default.
# Comparison to the default is done by comparing their
# identity, this works because defaults in function
# signatures are only created once and always reference
# the same item.
# FIXME: Python interns some values, for example the
# integers from -5 to 255 (any maybe some other types
# as well). In that case the default is
# indistinguishable from an explicitly passed kwarg
# and it won't notice that and use the attribute of the
# NDData.
if (propmatch in func_args or
(propmatch in func_kwargs and
(bound_args.arguments[propmatch] is not
sig[propmatch].default))):
warnings.warn(
"Property {0} has been passed explicitly and "
"as an NDData property{1}, using explicitly "
"specified value"
"".format(propmatch, '' if prop == propmatch
else ' ' + prop),
AstropyUserWarning)
continue
# Otherwise use the property as input for the function.
kwargs[propmatch] = value
# Finally, replace data by the data attribute
data = data.data
if ignored:
warnings.warn("The following attributes were set on the "
"data object, but will be ignored by the "
"function: " + ", ".join(ignored),
AstropyUserWarning)
result = func(data, *args, **kwargs)
if unpack and repack:
# If there are multiple required returned arguments make sure
# the result is a tuple (because we don't want to unpack
# numpy arrays or compare their length, never!) and has the
# same length.
if len(returns) > 1:
if (not isinstance(result, tuple) or
len(returns) != len(result)):
raise ValueError("Function did not return the "
"expected number of arguments.")
elif len(returns) == 1:
result = [result]
if keeps is not None:
for keep in keeps:
result.append(deepcopy(getattr(input_data, keep)))
resultdata = result[all_returns.index('data')]
resultkwargs = {ret: res
for ret, res in zip(all_returns, result)
if ret != 'data'}
return input_data.__class__(resultdata, **resultkwargs)
else:
return result
return wrapper
# If _func is set, this means that the decorator was used without
# parameters so we have to return the result of the
# support_nddata_decorator decorator rather than the decorator itself
if _func is not None:
return support_nddata_decorator(_func)
else:
return support_nddata_decorator
|
bba04011a1785eabce2e3c50b300bf80638d11a447a06f8e3c4d13def0130175 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module implements the base NDDataBase class.
from abc import ABCMeta, abstractmethod
__all__ = ['NDDataBase']
class NDDataBase(metaclass=ABCMeta):
"""Base metaclass that defines the interface for N-dimensional datasets
with associated meta information used in ``astropy``.
All properties and ``__init__`` have to be overridden in subclasses. See
`NDData` for a subclass that defines this interface on `numpy.ndarray`-like
``data``.
See also: http://docs.astropy.org/en/stable/nddata/
"""
@abstractmethod
def __init__(self):
pass
@property
@abstractmethod
def data(self):
"""The stored dataset.
"""
pass
@property
@abstractmethod
def mask(self):
"""Mask for the dataset.
Masks should follow the ``numpy`` convention that **valid** data points
are marked by ``False`` and **invalid** ones with ``True``.
"""
return None
@property
@abstractmethod
def unit(self):
"""Unit for the dataset.
"""
return None
@property
@abstractmethod
def wcs(self):
"""World coordinate system (WCS) for the dataset.
"""
return None
@property
@abstractmethod
def meta(self):
"""Additional meta information about the dataset.
Should be `dict`-like.
"""
return None
@property
@abstractmethod
def uncertainty(self):
"""Uncertainty in the dataset.
Should have an attribute ``uncertainty_type`` that defines what kind of
uncertainty is stored, such as ``"std"`` for standard deviation or
``"var"`` for variance.
"""
return None
|
28e2837dd98e3383ec4126b02da4d1a6e3800de203fa75089d74e89fad481c53 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from .nddata_base import NDDataBase
import numpy as np
from abc import ABCMeta, abstractmethod
from copy import deepcopy
import weakref
# from astropy.utils.compat import ignored
from astropy import log
from astropy.units import Unit, Quantity, UnitConversionError
__all__ = ['MissingDataAssociationException',
'IncompatibleUncertaintiesException', 'NDUncertainty',
'StdDevUncertainty', 'UnknownUncertainty',
'VarianceUncertainty', 'InverseVariance']
class IncompatibleUncertaintiesException(Exception):
"""This exception should be used to indicate cases in which uncertainties
with two different classes can not be propagated.
"""
class MissingDataAssociationException(Exception):
"""This exception should be used to indicate that an uncertainty instance
has not been associated with a parent `~astropy.nddata.NDData` object.
"""
class NDUncertainty(metaclass=ABCMeta):
"""This is the metaclass for uncertainty classes used with `NDData`.
Parameters
----------
array : any type, optional
The array or value (the parameter name is due to historical reasons) of
the uncertainty. `numpy.ndarray`, `~astropy.units.Quantity` or
`NDUncertainty` subclasses are recommended.
If the `array` is `list`-like or `numpy.ndarray`-like it will be cast
to a plain `numpy.ndarray`.
Default is ``None``.
unit : `~astropy.units.Unit` or str, optional
Unit for the uncertainty ``array``. Strings that can be converted to a
`~astropy.units.Unit` are allowed.
Default is ``None``.
copy : `bool`, optional
Indicates whether to save the `array` as a copy. ``True`` copies it
before saving, while ``False`` tries to save every parameter as
reference. Note however that it is not always possible to save the
input as reference.
Default is ``True``.
Raises
------
IncompatibleUncertaintiesException
If given another `NDUncertainty`-like class as ``array`` if their
``uncertainty_type`` is different.
"""
def __init__(self, array=None, copy=True, unit=None):
if isinstance(array, NDUncertainty):
# Given an NDUncertainty class or subclass check that the type
# is the same.
if array.uncertainty_type != self.uncertainty_type:
raise IncompatibleUncertaintiesException
# Check if two units are given and take the explicit one then.
if (unit is not None and unit != array._unit):
# TODO : Clarify it (see NDData.init for same problem)?
log.info("overwriting Uncertainty's current "
"unit with specified unit.")
elif array._unit is not None:
unit = array.unit
array = array.array
elif isinstance(array, Quantity):
# Check if two units are given and take the explicit one then.
if (unit is not None and array.unit is not None and
unit != array.unit):
log.info("overwriting Quantity's current "
"unit with specified unit.")
elif array.unit is not None:
unit = array.unit
array = array.value
if unit is None:
self._unit = None
else:
self._unit = Unit(unit)
if copy:
array = deepcopy(array)
unit = deepcopy(unit)
self.array = array
self.parent_nddata = None # no associated NDData - until it is set!
@property
@abstractmethod
def uncertainty_type(self):
"""`str` : Short description of the type of uncertainty.
Defined as abstract property so subclasses *have* to override this.
"""
return None
@property
def supports_correlated(self):
"""`bool` : Supports uncertainty propagation with correlated \
uncertainties?
.. versionadded:: 1.2
"""
return False
@property
def array(self):
"""`numpy.ndarray` : the uncertainty's value.
"""
return self._array
@array.setter
def array(self, value):
if isinstance(value, (list, np.ndarray)):
value = np.array(value, subok=False, copy=False)
self._array = value
@property
def unit(self):
"""`~astropy.units.Unit` : The unit of the uncertainty, if any.
"""
return self._unit
@unit.setter
def unit(self, value):
"""
The unit should be set to a value consistent with the parent NDData
unit and the uncertainty type.
"""
if value is not None:
# Check the hidden attribute below, not the property. The property
# raises an exception if there is no parent_nddata.
if self._parent_nddata is not None:
parent_unit = self.parent_nddata.unit
try:
# Check for consistency with the unit of the parent_nddata
self._data_unit_to_uncertainty_unit(parent_unit).to(value)
except UnitConversionError:
raise UnitConversionError("Unit {} is incompatible "
"with unit {} of parent "
"nddata".format(value,
parent_unit))
self._unit = Unit(value)
else:
self._unit = value
@property
def quantity(self):
"""
This uncertainty as an `~astropy.units.Quantity` object.
"""
return Quantity(self.array, self.unit, copy=False, dtype=self.array.dtype)
@property
def parent_nddata(self):
"""`NDData` : reference to `NDData` instance with this uncertainty.
In case the reference is not set uncertainty propagation will not be
possible since propagation might need the uncertain data besides the
uncertainty.
"""
no_parent_message = "uncertainty is not associated with an NDData object"
parent_lost_message = (
"the associated NDData object was deleted and cannot be accessed "
"anymore. You can prevent the NDData object from being deleted by "
"assigning it to a variable. If this happened after unpickling "
"make sure you pickle the parent not the uncertainty directly."
)
try:
parent = self._parent_nddata
except AttributeError:
raise MissingDataAssociationException(no_parent_message)
else:
if parent is None:
raise MissingDataAssociationException(no_parent_message)
else:
# The NDData is saved as weak reference so we must call it
# to get the object the reference points to. However because
# we have a weak reference here it's possible that the parent
# was deleted because its reference count dropped to zero.
if isinstance(self._parent_nddata, weakref.ref):
resolved_parent = self._parent_nddata()
if resolved_parent is None:
log.info(parent_lost_message)
return resolved_parent
else:
log.info("parent_nddata should be a weakref to an NDData "
"object.")
return self._parent_nddata
@parent_nddata.setter
def parent_nddata(self, value):
if value is not None and not isinstance(value, weakref.ref):
# Save a weak reference on the uncertainty that points to this
# instance of NDData. Direct references should NOT be used:
# https://github.com/astropy/astropy/pull/4799#discussion_r61236832
value = weakref.ref(value)
# Set _parent_nddata here and access below with the property because value
# is a weakref
self._parent_nddata = value
# set uncertainty unit to that of the parent if it was not already set, unless initializing
# with empty parent (Value=None)
if value is not None:
parent_unit = self.parent_nddata.unit
if self.unit is None:
if parent_unit is None:
self.unit = None
else:
# Set the uncertainty's unit to the appropriate value
self.unit = self._data_unit_to_uncertainty_unit(parent_unit)
else:
# Check that units of uncertainty are compatible with those of
# the parent. If they are, no need to change units of the
# uncertainty or the data. If they are not, let the user know.
unit_from_data = self._data_unit_to_uncertainty_unit(parent_unit)
try:
unit_from_data.to(self.unit)
except UnitConversionError:
raise UnitConversionError("Unit {} of uncertainty "
"incompatible with unit {} of "
"data".format(self.unit,
parent_unit))
@abstractmethod
def _data_unit_to_uncertainty_unit(self, value):
"""
Subclasses must override this property. It should take in a data unit
and return the correct unit for the uncertainty given the uncertainty
type.
"""
return None
def __repr__(self):
prefix = self.__class__.__name__ + '('
try:
body = np.array2string(self.array, separator=', ', prefix=prefix)
except AttributeError:
# In case it wasn't possible to use array2string
body = str(self.array)
return ''.join([prefix, body, ')'])
def __getstate__(self):
# Because of the weak reference the class wouldn't be picklable.
try:
return self._array, self._unit, self.parent_nddata
except MissingDataAssociationException:
# In case there's no parent
return self._array, self._unit, None
def __setstate__(self, state):
if len(state) != 3:
raise TypeError('The state should contain 3 items.')
self._array = state[0]
self._unit = state[1]
parent = state[2]
if parent is not None:
parent = weakref.ref(parent)
self._parent_nddata = parent
def __getitem__(self, item):
"""Normal slicing on the array, keep the unit and return a reference.
"""
return self.__class__(self.array[item], unit=self.unit, copy=False)
def propagate(self, operation, other_nddata, result_data, correlation):
"""Calculate the resulting uncertainty given an operation on the data.
.. versionadded:: 1.2
Parameters
----------
operation : callable
The operation that is performed on the `NDData`. Supported are
`numpy.add`, `numpy.subtract`, `numpy.multiply` and
`numpy.true_divide` (or `numpy.divide`).
other_nddata : `NDData` instance
The second operand in the arithmetic operation.
result_data : `~astropy.units.Quantity` or `numpy.ndarray`
The result of the arithmetic operations on the data.
correlation : `numpy.ndarray` or number
The correlation (rho) is defined between the uncertainties in
sigma_AB = sigma_A * sigma_B * rho. A value of ``0`` means
uncorrelated operands.
Returns
-------
resulting_uncertainty : `NDUncertainty` instance
Another instance of the same `NDUncertainty` subclass containing
the uncertainty of the result.
Raises
------
ValueError
If the ``operation`` is not supported or if correlation is not zero
but the subclass does not support correlated uncertainties.
Notes
-----
First this method checks if a correlation is given and the subclass
implements propagation with correlated uncertainties.
Then the second uncertainty is converted (or an Exception is raised)
to the same class in order to do the propagation.
Then the appropriate propagation method is invoked and the result is
returned.
"""
# Check if the subclass supports correlation
if not self.supports_correlated:
if isinstance(correlation, np.ndarray) or correlation != 0:
raise ValueError("{0} does not support uncertainty propagation"
" with correlation."
"".format(self.__class__.__name__))
# Get the other uncertainty (and convert it to a matching one)
other_uncert = self._convert_uncertainty(other_nddata.uncertainty)
if operation.__name__ == 'add':
result = self._propagate_add(other_uncert, result_data,
correlation)
elif operation.__name__ == 'subtract':
result = self._propagate_subtract(other_uncert, result_data,
correlation)
elif operation.__name__ == 'multiply':
result = self._propagate_multiply(other_uncert, result_data,
correlation)
elif operation.__name__ in ['true_divide', 'divide']:
result = self._propagate_divide(other_uncert, result_data,
correlation)
else:
raise ValueError('unsupported operation')
return self.__class__(result, copy=False)
def _convert_uncertainty(self, other_uncert):
"""Checks if the uncertainties are compatible for propagation.
Checks if the other uncertainty is `NDUncertainty`-like and if so
verify that the uncertainty_type is equal. If the latter is not the
case try returning ``self.__class__(other_uncert)``.
Parameters
----------
other_uncert : `NDUncertainty` subclass
The other uncertainty.
Returns
-------
other_uncert : `NDUncertainty` subclass
but converted to a compatible `NDUncertainty` subclass if
possible and necessary.
Raises
------
IncompatibleUncertaintiesException:
If the other uncertainty cannot be converted to a compatible
`NDUncertainty` subclass.
"""
if isinstance(other_uncert, NDUncertainty):
if self.uncertainty_type == other_uncert.uncertainty_type:
return other_uncert
else:
return self.__class__(other_uncert)
else:
raise IncompatibleUncertaintiesException
@abstractmethod
def _propagate_add(self, other_uncert, result_data, correlation):
return None
@abstractmethod
def _propagate_subtract(self, other_uncert, result_data, correlation):
return None
@abstractmethod
def _propagate_multiply(self, other_uncert, result_data, correlation):
return None
@abstractmethod
def _propagate_divide(self, other_uncert, result_data, correlation):
return None
class UnknownUncertainty(NDUncertainty):
"""This class implements any unknown uncertainty type.
The main purpose of having an unknown uncertainty class is to prevent
uncertainty propagation.
Parameters
----------
args, kwargs :
see `NDUncertainty`
"""
@property
def supports_correlated(self):
"""`False` : Uncertainty propagation is *not* possible for this class.
"""
return False
@property
def uncertainty_type(self):
"""``"unknown"`` : `UnknownUncertainty` implements any unknown \
uncertainty type.
"""
return 'unknown'
def _data_unit_to_uncertainty_unit(self, value):
"""
No way to convert if uncertainty is unknown.
"""
return None
def _convert_uncertainty(self, other_uncert):
"""Raise an Exception because unknown uncertainty types cannot
implement propagation.
"""
msg = "Uncertainties of unknown type cannot be propagated."
raise IncompatibleUncertaintiesException(msg)
def _propagate_add(self, other_uncert, result_data, correlation):
"""Not possible for unknown uncertainty types.
"""
return None
def _propagate_subtract(self, other_uncert, result_data, correlation):
return None
def _propagate_multiply(self, other_uncert, result_data, correlation):
return None
def _propagate_divide(self, other_uncert, result_data, correlation):
return None
class _VariancePropagationMixin:
"""
Propagation of uncertainties for variances, also used to perform error
propagation for variance-like uncertainties (standard deviation and inverse
variance).
"""
def _propagate_add_sub(self, other_uncert, result_data, correlation,
subtract=False,
to_variance=lambda x: x, from_variance=lambda x: x):
"""
Error propagation for addition or subtraction of variance or
variance-like uncertainties. Uncertainties are calculated using the
formulae for variance but can be used for uncertainty convertible to
a variance.
Parameters
----------
other_uncert : `~astropy.nddata.NDUncertainty` instance
The uncertainty, if any, of the other operand.
result_data : `~astropy.nddata.NDData` instance
The results of the operation on the data.
correlation : float or `numpy.ndarray`-like
Correlation of the uncertainties.
subtract : bool, optional
If ``True``, propagate for subtraction, otherwise propagate for
addition.
to_variance : function, optional
Function that will transform the input uncertainties to variance.
The default assumes the uncertainty is the variance.
from_variance : function, optional
Function that will convert from variance to the input uncertainty.
The default assumes the uncertainty is the variance.
"""
if subtract:
correlation_sign = -1
else:
correlation_sign = 1
try:
result_unit_sq = result_data.unit ** 2
except AttributeError:
result_unit_sq = None
if other_uncert.array is not None:
# Formula: sigma**2 = dB
if (other_uncert.unit is not None and
result_unit_sq != to_variance(other_uncert.unit)):
# If the other uncertainty has a unit and this unit differs
# from the unit of the result convert it to the results unit
other = to_variance(other_uncert.array *
other_uncert.unit).to(result_unit_sq).value
else:
other = to_variance(other_uncert.array)
else:
other = 0
if self.array is not None:
# Formula: sigma**2 = dA
if self.unit is not None and to_variance(self.unit) != self.parent_nddata.unit**2:
# If the uncertainty has a different unit than the result we
# need to convert it to the results unit.
this = to_variance(self.array * self.unit).to(result_unit_sq).value
else:
this = to_variance(self.array)
else:
this = 0
# Formula: sigma**2 = dA + dB +/- 2*cor*sqrt(dA*dB)
# Formula: sigma**2 = sigma_other + sigma_self +/- 2*cor*sqrt(dA*dB)
# (sign depends on whether addition or subtraction)
# Determine the result depending on the correlation
if isinstance(correlation, np.ndarray) or correlation != 0:
corr = 2 * correlation * np.sqrt(this * other)
result = this + other + correlation_sign * corr
else:
result = this + other
return from_variance(result)
def _propagate_multiply_divide(self, other_uncert, result_data,
correlation,
divide=False,
to_variance=lambda x: x,
from_variance=lambda x: x):
"""
Error propagation for multiplication or division of variance or
variance-like uncertainties. Uncertainties are calculated using the
formulae for variance but can be used for uncertainty convertible to
a variance.
Parameters
----------
other_uncert : `~astropy.nddata.NDUncertainty` instance
The uncertainty, if any, of the other operand.
result_data : `~astropy.nddata.NDData` instance
The results of the operation on the data.
correlation : float or `numpy.ndarray`-like
Correlation of the uncertainties.
divide : bool, optional
If ``True``, propagate for division, otherwise propagate for
multiplication.
to_variance : function, optional
Function that will transform the input uncertainties to variance.
The default assumes the uncertainty is the variance.
from_variance : function, optional
Function that will convert from variance to the input uncertainty.
The default assumes the uncertainty is the variance.
"""
# For multiplication we don't need the result as quantity
if isinstance(result_data, Quantity):
result_data = result_data.value
if divide:
correlation_sign = -1
else:
correlation_sign = 1
if other_uncert.array is not None:
# We want the result to have a unit consistent with the parent, so
# we only need to convert the unit of the other uncertainty if it
# is different from its data's unit.
if (other_uncert.unit and
to_variance(1 * other_uncert.unit) !=
((1 * other_uncert.parent_nddata.unit)**2).unit):
d_b = to_variance((other_uncert.array * other_uncert.unit)).to(
(1 * other_uncert.parent_nddata.unit)**2).value
else:
d_b = to_variance(other_uncert.array)
# Formula: sigma**2 = |A|**2 * d_b
right = np.abs(self.parent_nddata.data**2 * d_b)
else:
right = 0
if self.array is not None:
# Just the reversed case
if (self.unit and
to_variance(1 * self.unit) !=
((1 * self.parent_nddata.unit)**2).unit):
d_a = to_variance(self.array * self.unit).to(
(1 * self.parent_nddata.unit)**2).value
else:
d_a = to_variance(self.array)
# Formula: sigma**2 = |B|**2 * d_a
left = np.abs(other_uncert.parent_nddata.data**2 * d_a)
else:
left = 0
# Multiplication
#
# The fundamental formula is:
# sigma**2 = |AB|**2*(d_a/A**2+d_b/B**2+2*sqrt(d_a)/A*sqrt(d_b)/B*cor)
#
# This formula is not very handy since it generates NaNs for every
# zero in A and B. So we rewrite it:
#
# Multiplication Formula:
# sigma**2 = (d_a*B**2 + d_b*A**2 + (2 * cor * ABsqrt(dAdB)))
# sigma**2 = (left + right + (2 * cor * ABsqrt(dAdB)))
#
# Division
#
# The fundamental formula for division is:
# sigma**2 = |A/B|**2*(d_a/A**2+d_b/B**2-2*sqrt(d_a)/A*sqrt(d_b)/B*cor)
#
# As with multiplication, it is convenient to rewrite this to avoid
# nans where A is zero.
#
# Division formula (rewritten):
# sigma**2 = d_a/B**2 + (A/B)**2 * d_b/B**2
# - 2 * cor * A *sqrt(dAdB) / B**3
# sigma**2 = d_a/B**2 + (A/B)**2 * d_b/B**2
# - 2*cor * sqrt(d_a)/B**2 * sqrt(d_b) * A / B
# sigma**2 = multiplication formula/B**4 (and sign change in
# the correlation)
if isinstance(correlation, np.ndarray) or correlation != 0:
corr = (2 * correlation * np.sqrt(d_a * d_b) *
self.parent_nddata.data *
other_uncert.parent_nddata.data)
else:
corr = 0
if divide:
return from_variance((left + right + correlation_sign * corr) /
other_uncert.parent_nddata.data**4)
else:
return from_variance(left + right + correlation_sign * corr)
class StdDevUncertainty(_VariancePropagationMixin, NDUncertainty):
"""Standard deviation uncertainty assuming first order gaussian error
propagation.
This class implements uncertainty propagation for ``addition``,
``subtraction``, ``multiplication`` and ``division`` with other instances
of `StdDevUncertainty`. The class can handle if the uncertainty has a
unit that differs from (but is convertible to) the parents `NDData` unit.
The unit of the resulting uncertainty will have the same unit as the
resulting data. Also support for correlation is possible but requires the
correlation as input. It cannot handle correlation determination itself.
Parameters
----------
args, kwargs :
see `NDUncertainty`
Examples
--------
`StdDevUncertainty` should always be associated with an `NDData`-like
instance, either by creating it during initialization::
>>> from astropy.nddata import NDData, StdDevUncertainty
>>> ndd = NDData([1,2,3], unit='m',
... uncertainty=StdDevUncertainty([0.1, 0.1, 0.1]))
>>> ndd.uncertainty # doctest: +FLOAT_CMP
StdDevUncertainty([0.1, 0.1, 0.1])
or by setting it manually on the `NDData` instance::
>>> ndd.uncertainty = StdDevUncertainty([0.2], unit='m', copy=True)
>>> ndd.uncertainty # doctest: +FLOAT_CMP
StdDevUncertainty([0.2])
the uncertainty ``array`` can also be set directly::
>>> ndd.uncertainty.array = 2
>>> ndd.uncertainty
StdDevUncertainty(2)
.. note::
The unit will not be displayed.
"""
@property
def supports_correlated(self):
"""`True` : `StdDevUncertainty` allows to propagate correlated \
uncertainties.
``correlation`` must be given, this class does not implement computing
it by itself.
"""
return True
@property
def uncertainty_type(self):
"""``"std"`` : `StdDevUncertainty` implements standard deviation.
"""
return 'std'
def _convert_uncertainty(self, other_uncert):
if isinstance(other_uncert, StdDevUncertainty):
return other_uncert
else:
raise IncompatibleUncertaintiesException
def _propagate_add(self, other_uncert, result_data, correlation):
return super()._propagate_add_sub(other_uncert, result_data,
correlation, subtract=False,
to_variance=np.square,
from_variance=np.sqrt)
def _propagate_subtract(self, other_uncert, result_data, correlation):
return super()._propagate_add_sub(other_uncert, result_data,
correlation, subtract=True,
to_variance=np.square,
from_variance=np.sqrt)
def _propagate_multiply(self, other_uncert, result_data, correlation):
return super()._propagate_multiply_divide(other_uncert,
result_data, correlation,
divide=False,
to_variance=np.square,
from_variance=np.sqrt)
def _propagate_divide(self, other_uncert, result_data, correlation):
return super()._propagate_multiply_divide(other_uncert,
result_data, correlation,
divide=True,
to_variance=np.square,
from_variance=np.sqrt)
def _data_unit_to_uncertainty_unit(self, value):
return value
class VarianceUncertainty(_VariancePropagationMixin, NDUncertainty):
"""
Variance uncertainty assuming first order Gaussian error
propagation.
This class implements uncertainty propagation for ``addition``,
``subtraction``, ``multiplication`` and ``division`` with other instances
of `VarianceUncertainty`. The class can handle if the uncertainty has a
unit that differs from (but is convertible to) the parents `NDData` unit.
The unit of the resulting uncertainty will be the square of the unit of the
resulting data. Also support for correlation is possible but requires the
correlation as input. It cannot handle correlation determination itself.
Parameters
----------
args, kwargs :
see `NDUncertainty`
Examples
--------
Compare this example to that in `StdDevUncertainty`; the uncertainties
in the examples below are equivalent to the uncertainties in
`StdDevUncertainty`.
`VarianceUncertainty` should always be associated with an `NDData`-like
instance, either by creating it during initialization::
>>> from astropy.nddata import NDData, VarianceUncertainty
>>> ndd = NDData([1,2,3], unit='m',
... uncertainty=VarianceUncertainty([0.01, 0.01, 0.01]))
>>> ndd.uncertainty # doctest: +FLOAT_CMP
VarianceUncertainty([0.01, 0.01, 0.01])
or by setting it manually on the `NDData` instance::
>>> ndd.uncertainty = VarianceUncertainty([0.04], unit='m^2', copy=True)
>>> ndd.uncertainty # doctest: +FLOAT_CMP
VarianceUncertainty([0.04])
the uncertainty ``array`` can also be set directly::
>>> ndd.uncertainty.array = 4
>>> ndd.uncertainty
VarianceUncertainty(4)
.. note::
The unit will not be displayed.
"""
@property
def uncertainty_type(self):
"""``"var"`` : `VarianceUncertainty` implements variance.
"""
return 'var'
@property
def supports_correlated(self):
"""`True` : `VarianceUncertainty` allows to propagate correlated \
uncertainties.
``correlation`` must be given, this class does not implement computing
it by itself.
"""
return True
def _propagate_add(self, other_uncert, result_data, correlation):
return super()._propagate_add_sub(other_uncert, result_data,
correlation, subtract=False)
def _propagate_subtract(self, other_uncert, result_data, correlation):
return super()._propagate_add_sub(other_uncert, result_data,
correlation, subtract=True)
def _propagate_multiply(self, other_uncert, result_data, correlation):
return super()._propagate_multiply_divide(other_uncert,
result_data, correlation,
divide=False)
def _propagate_divide(self, other_uncert, result_data, correlation):
return super()._propagate_multiply_divide(other_uncert,
result_data, correlation,
divide=True)
def _data_unit_to_uncertainty_unit(self, value):
return value ** 2
def _inverse(x):
"""Just a simple inverse for use in the InverseVariance"""
return 1 / x
class InverseVariance(_VariancePropagationMixin, NDUncertainty):
"""
Inverse variance uncertainty assuming first order Gaussian error
propagation.
This class implements uncertainty propagation for ``addition``,
``subtraction``, ``multiplication`` and ``division`` with other instances
of `InverseVariance`. The class can handle if the uncertainty has a unit
that differs from (but is convertible to) the parents `NDData` unit. The
unit of the resulting uncertainty will the inverse square of the unit of
the resulting data. Also support for correlation is possible but requires
the correlation as input. It cannot handle correlation determination
itself.
Parameters
----------
args, kwargs :
see `NDUncertainty`
Examples
--------
Compare this example to that in `StdDevUncertainty`; the uncertainties
in the examples below are equivalent to the uncertainties in
`StdDevUncertainty`.
`InverseVariance` should always be associated with an `NDData`-like
instance, either by creating it during initialization::
>>> from astropy.nddata import NDData, InverseVariance
>>> ndd = NDData([1,2,3], unit='m',
... uncertainty=InverseVariance([100, 100, 100]))
>>> ndd.uncertainty # doctest: +FLOAT_CMP
InverseVariance([100, 100, 100])
or by setting it manually on the `NDData` instance::
>>> ndd.uncertainty = InverseVariance([25], unit='1/m^2', copy=True)
>>> ndd.uncertainty # doctest: +FLOAT_CMP
InverseVariance([25])
the uncertainty ``array`` can also be set directly::
>>> ndd.uncertainty.array = 0.25
>>> ndd.uncertainty
InverseVariance(0.25)
.. note::
The unit will not be displayed.
"""
@property
def uncertainty_type(self):
"""``"ivar"`` : `InverseVariance` implements inverse variance.
"""
return 'ivar'
@property
def supports_correlated(self):
"""`True` : `InverseVariance` allows to propagate correlated \
uncertainties.
``correlation`` must be given, this class does not implement computing
it by itself.
"""
return True
def _propagate_add(self, other_uncert, result_data, correlation):
return super()._propagate_add_sub(other_uncert, result_data,
correlation, subtract=False,
to_variance=_inverse,
from_variance=_inverse)
def _propagate_subtract(self, other_uncert, result_data, correlation):
return super()._propagate_add_sub(other_uncert, result_data,
correlation, subtract=True,
to_variance=_inverse,
from_variance=_inverse)
def _propagate_multiply(self, other_uncert, result_data, correlation):
return super()._propagate_multiply_divide(other_uncert,
result_data, correlation,
divide=False,
to_variance=_inverse,
from_variance=_inverse)
def _propagate_divide(self, other_uncert, result_data, correlation):
return super()._propagate_multiply_divide(other_uncert,
result_data, correlation,
divide=True,
to_variance=_inverse,
from_variance=_inverse)
def _data_unit_to_uncertainty_unit(self, value):
return 1 / value ** 2
|
0cc06f75e992dd3689ffab2395cbcdcc7d4f5a5bc496daa9eb0115c7362eba76 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from collections import OrderedDict
import numpy as np
from astropy.utils.misc import isiterable
__all__ = ['FlagCollection']
class FlagCollection(OrderedDict):
"""
The purpose of this class is to provide a dictionary for
containing arrays of flags for the `NDData` class. Flags should be
stored in Numpy arrays that have the same dimensions as the parent
data, so the `FlagCollection` class adds shape checking to an
ordered dictionary class.
The `FlagCollection` should be initialized like an
`~collections.OrderedDict`, but with the addition of a ``shape=``
keyword argument used to pass the NDData shape.
"""
def __init__(self, *args, **kwargs):
if 'shape' in kwargs:
self.shape = kwargs.pop('shape')
if not isiterable(self.shape):
raise ValueError("FlagCollection shape should be "
"an iterable object")
else:
raise Exception("FlagCollection should be initialized with "
"the shape of the data")
OrderedDict.__init__(self, *args, **kwargs)
def __setitem__(self, item, value, **kwargs):
if isinstance(value, np.ndarray):
if value.shape == self.shape:
OrderedDict.__setitem__(self, item, value, **kwargs)
else:
raise ValueError("flags array shape {0} does not match data "
"shape {1}".format(value.shape, self.shape))
else:
raise TypeError("flags should be given as a Numpy array")
|
ec04fb156b4112892d6b2df1f89c7a5b01d304a856e83a092605feb5e8f42f5f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module implements the base NDData class.
import numpy as np
from copy import deepcopy
from .nddata_base import NDDataBase
from .nduncertainty import NDUncertainty, UnknownUncertainty
from astropy import log
from astropy.units import Unit, Quantity
from astropy.utils.metadata import MetaData
__all__ = ['NDData']
_meta_doc = """`dict`-like : Additional meta information about the dataset."""
class NDData(NDDataBase):
"""
A container for `numpy.ndarray`-based datasets, using the
`~astropy.nddata.NDDataBase` interface.
The key distinction from raw `numpy.ndarray` is the presence of
additional metadata such as uncertainty, mask, unit, a coordinate system
and/or a dictionary containing further meta information. This class *only*
provides a container for *storing* such datasets. For further functionality
take a look at the ``See also`` section.
See also: http://docs.astropy.org/en/stable/nddata/
Parameters
-----------
data : `numpy.ndarray`-like or `NDData`-like
The dataset.
uncertainty : any type, optional
Uncertainty in the dataset.
Should have an attribute ``uncertainty_type`` that defines what kind of
uncertainty is stored, for example ``"std"`` for standard deviation or
``"var"`` for variance. A metaclass defining such an interface is
`NDUncertainty` - but isn't mandatory. If the uncertainty has no such
attribute the uncertainty is stored as `UnknownUncertainty`.
Defaults to ``None``.
mask : any type, optional
Mask for the dataset. Masks should follow the ``numpy`` convention that
**valid** data points are marked by ``False`` and **invalid** ones with
``True``.
Defaults to ``None``.
wcs : any type, optional
World coordinate system (WCS) for the dataset.
Default is ``None``.
meta : `dict`-like object, optional
Additional meta information about the dataset. If no meta is provided
an empty `collections.OrderedDict` is created.
Default is ``None``.
unit : `~astropy.units.Unit`-like or str, optional
Unit for the dataset. Strings that can be converted to a
`~astropy.units.Unit` are allowed.
Default is ``None``.
copy : `bool`, optional
Indicates whether to save the arguments as copy. ``True`` copies
every attribute before saving it while ``False`` tries to save every
parameter as reference.
Note however that it is not always possible to save the input as
reference.
Default is ``False``.
.. versionadded:: 1.2
Raises
------
TypeError
In case ``data`` or ``meta`` don't meet the restrictions.
Notes
-----
Each attribute can be accessed through the homonymous instance attribute:
``data`` in a `NDData` object can be accessed through the `data`
attribute::
>>> from astropy.nddata import NDData
>>> nd = NDData([1,2,3])
>>> nd.data
array([1, 2, 3])
Given a conflicting implicit and an explicit parameter during
initialization, for example the ``data`` is a `~astropy.units.Quantity` and
the unit parameter is not ``None``, then the implicit parameter is replaced
(without conversion) by the explicit one and a warning is issued::
>>> import numpy as np
>>> import astropy.units as u
>>> q = np.array([1,2,3,4]) * u.m
>>> nd2 = NDData(q, unit=u.cm)
INFO: overwriting Quantity's current unit with specified unit. [astropy.nddata.nddata]
>>> nd2.data # doctest: +FLOAT_CMP
array([1., 2., 3., 4.])
>>> nd2.unit
Unit("cm")
See also
--------
NDDataRef
NDDataArray
"""
# Instead of a custom property use the MetaData descriptor also used for
# Tables. It will check if the meta is dict-like or raise an exception.
meta = MetaData(doc=_meta_doc, copy=False)
def __init__(self, data, uncertainty=None, mask=None, wcs=None,
meta=None, unit=None, copy=False):
# Rather pointless since the NDDataBase does not implement any setting
# but before the NDDataBase did call the uncertainty
# setter. But if anyone wants to alter this behavior again the call
# to the superclass NDDataBase should be in here.
super().__init__()
# Check if data is any type from which to collect some implicitly
# passed parameters.
if isinstance(data, NDData): # don't use self.__class__ (issue #4137)
# Of course we need to check the data because subclasses with other
# init-logic might be passed in here. We could skip these
# tests if we compared for self.__class__ but that has other
# drawbacks.
# Comparing if there is an explicit and an implicit unit parameter.
# If that is the case use the explicit one and issue a warning
# that there might be a conflict. In case there is no explicit
# unit just overwrite the unit parameter with the NDData.unit
# and proceed as if that one was given as parameter. Same for the
# other parameters.
if (unit is not None and data.unit is not None and
unit != data.unit):
log.info("overwriting NDData's current "
"unit with specified unit.")
elif data.unit is not None:
unit = data.unit
if uncertainty is not None and data.uncertainty is not None:
log.info("overwriting NDData's current "
"uncertainty with specified uncertainty.")
elif data.uncertainty is not None:
uncertainty = data.uncertainty
if mask is not None and data.mask is not None:
log.info("overwriting NDData's current "
"mask with specified mask.")
elif data.mask is not None:
mask = data.mask
if wcs is not None and data.wcs is not None:
log.info("overwriting NDData's current "
"wcs with specified wcs.")
elif data.wcs is not None:
wcs = data.wcs
if meta is not None and data.meta is not None:
log.info("overwriting NDData's current "
"meta with specified meta.")
elif data.meta is not None:
meta = data.meta
data = data.data
else:
if hasattr(data, 'mask') and hasattr(data, 'data'):
# Separating data and mask
if mask is not None:
log.info("overwriting Masked Objects's current "
"mask with specified mask.")
else:
mask = data.mask
# Just save the data for further processing, we could be given
# a masked Quantity or something else entirely. Better to check
# it first.
data = data.data
if isinstance(data, Quantity):
if unit is not None and unit != data.unit:
log.info("overwriting Quantity's current "
"unit with specified unit.")
else:
unit = data.unit
data = data.value
# Quick check on the parameters if they match the requirements.
if (not hasattr(data, 'shape') or not hasattr(data, '__getitem__') or
not hasattr(data, '__array__')):
# Data doesn't look like a numpy array, try converting it to
# one.
data = np.array(data, subok=True, copy=False)
# Another quick check to see if what we got looks like an array
# rather than an object (since numpy will convert a
# non-numerical/non-string inputs to an array of objects).
if data.dtype == 'O':
raise TypeError("could not convert data to numpy array.")
if unit is not None:
unit = Unit(unit)
if copy:
# Data might have been copied before but no way of validating
# without another variable.
data = deepcopy(data)
mask = deepcopy(mask)
wcs = deepcopy(wcs)
meta = deepcopy(meta)
uncertainty = deepcopy(uncertainty)
# Actually - copying the unit is unnecessary but better safe
# than sorry :-)
unit = deepcopy(unit)
# Store the attributes
self._data = data
self.mask = mask
self._wcs = wcs
self.meta = meta # TODO: Make this call the setter sometime
self._unit = unit
# Call the setter for uncertainty to further check the uncertainty
self.uncertainty = uncertainty
def __str__(self):
return str(self.data)
def __repr__(self):
prefix = self.__class__.__name__ + '('
body = np.array2string(self.data, separator=', ', prefix=prefix)
return ''.join([prefix, body, ')'])
@property
def data(self):
"""
`~numpy.ndarray`-like : The stored dataset.
"""
return self._data
@property
def mask(self):
"""
any type : Mask for the dataset, if any.
Masks should follow the ``numpy`` convention that valid data points are
marked by ``False`` and invalid ones with ``True``.
"""
return self._mask
@mask.setter
def mask(self, value):
self._mask = value
@property
def unit(self):
"""
`~astropy.units.Unit` : Unit for the dataset, if any.
"""
return self._unit
@property
def wcs(self):
"""
any type : A world coordinate system (WCS) for the dataset, if any.
"""
return self._wcs
@property
def uncertainty(self):
"""
any type : Uncertainty in the dataset, if any.
Should have an attribute ``uncertainty_type`` that defines what kind of
uncertainty is stored, such as ``'std'`` for standard deviation or
``'var'`` for variance. A metaclass defining such an interface is
`~astropy.nddata.NDUncertainty` but isn't mandatory.
"""
return self._uncertainty
@uncertainty.setter
def uncertainty(self, value):
if value is not None:
# There is one requirements on the uncertainty: That
# it has an attribute 'uncertainty_type'.
# If it does not match this requirement convert it to an unknown
# uncertainty.
if not hasattr(value, 'uncertainty_type'):
log.info('uncertainty should have attribute uncertainty_type.')
value = UnknownUncertainty(value, copy=False)
# If it is a subclass of NDUncertainty we must set the
# parent_nddata attribute. (#4152)
if isinstance(value, NDUncertainty):
# In case the uncertainty already has a parent create a new
# instance because we need to assume that we don't want to
# steal the uncertainty from another NDData object
if value._parent_nddata is not None:
value = value.__class__(value, copy=False)
# Then link it to this NDData instance (internally this needs
# to be saved as weakref but that's done by NDUncertainty
# setter).
value.parent_nddata = self
self._uncertainty = value
|
da79e3beaa90a8b7ec3cdef68f104d319ee1f0096e09a8cf883c84a12b812aa4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The `astropy.nddata` subpackage provides the `~astropy.nddata.NDData`
class and related tools to manage n-dimensional array-based data (e.g.
CCD images, IFU Data, grid-based simulation data, ...). This is more than
just `numpy.ndarray` objects, because it provides metadata that cannot
be easily provided by a single array.
"""
from .nddata import *
from .nddata_base import *
from .nddata_withmixins import *
from .nduncertainty import *
from .flag_collection import *
from .decorators import *
from .mixins.ndarithmetic import *
from .mixins.ndslicing import *
from .mixins.ndio import *
from .compat import *
from .utils import *
from .ccddata import *
from .bitmask import *
from astropy import config as _config
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.nddata`.
"""
warn_unsupported_correlated = _config.ConfigItem(
True,
'Whether to issue a warning if `~astropy.nddata.NDData` arithmetic '
'is performed with uncertainties and the uncertainties do not '
'support the propagation of correlated uncertainties.'
)
warn_setting_unit_directly = _config.ConfigItem(
True,
'Whether to issue a warning when the `~astropy.nddata.NDData` unit '
'attribute is changed from a non-``None`` value to another value '
'that data values/uncertainties are not scaled with the unit change.'
)
conf = Conf()
|
235f4383f5fe09d56b61960733522bc8b1ba2de97dcab7ca54c14dee8e68b821 | """
A module that provides functions for manipulating bit masks and data quality
(DQ) arrays.
"""
import sys
import warnings
import numbers
import numpy as np
__all__ = ['bitfield_to_boolean_mask', 'interpret_bit_flags']
_MAX_UINT_TYPE = np.maximum_sctype(np.uint)
_SUPPORTED_FLAGS = int(np.bitwise_not(
0, dtype=_MAX_UINT_TYPE, casting='unsafe'
))
def _is_bit_flag(n):
"""
Verifies if the input number is a bit flag (i.e., an integer number that is
an integer power of 2).
Parameters
----------
n : int
A positive integer number. Non-positive integers are considered not to
be "flags".
Returns
-------
bool
``True`` if input ``n`` is a bit flag and ``False`` if it is not.
"""
if n < 1:
return False
return bin(n).count('1') == 1
def _is_int(n):
return (
(isinstance(n, numbers.Integral) and not isinstance(n, bool)) or
(isinstance(n, np.generic) and np.issubdtype(n, np.integer))
)
def interpret_bit_flags(bit_flags, flip_bits=None):
"""
Converts input bit flags to a single integer value (bit mask) or `None`.
When input is a list of flags (either a Python list of integer flags or a
sting of comma- or '+'-separated list of flags), the returned bit mask
is obtained by summing input flags.
.. note::
In order to flip the bits of the returned bit mask,
for input of `str` type, prepend '~' to the input string. '~' must
be prepended to the *entire string* and not to each bit flag! For
input that is already a bit mask or a Python list of bit flags, set
``flip_bits`` for `True` in order to flip the bits of the returned
bit mask.
Parameters
----------
bit_flags : int, str, list, None
An integer bit mask or flag, `None`, a string of comma- or
'+'-separated list of integer bit flags, or a Python list of integer
bit flags. If ``bit_flags`` is a `str` and if it is prepended with '~',
then the output bit mask will have its bits flipped (compared to simple
sum of input flags). For input ``bit_flags`` that is already a bit mask
or a Python list of bit flags, bit-flipping can be controlled through
``flip_bits`` parameter.
flip_bits : bool, None
Indicates whether or not to flip the bits of the returned bit mask
obtained from input bit flags. This parameter must be set to `None`
when input ``bit_flags`` is either `None` or a Python list of flags.
Returns
-------
bitmask : int or None
Returns an integer bit mask formed from the input bit value or `None`
if input ``bit_flags`` parameter is `None` or an empty string.
If input string value was prepended with '~' (or ``flip_bits`` was set
to `True`), then returned value will have its bits flipped
(inverse mask).
Examples
--------
>>> from astropy.nddata.bitmask import interpret_bit_flags
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags(28))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('4,8,16'))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~4,8,16'))
'1111111111100011'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~(4+8+16)'))
'1111111111100011'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags([4, 8, 16]))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags([4, 8, 16], flip_bits=True))
'1111111111100011'
"""
has_flip_bits = flip_bits is not None
flip_bits = bool(flip_bits)
allow_non_flags = False
if _is_int(bit_flags):
return (~int(bit_flags) if flip_bits else int(bit_flags))
elif bit_flags is None:
if has_flip_bits:
raise TypeError(
"Keyword argument 'flip_bits' must be set to 'None' when "
"input 'bit_flags' is None."
)
return None
elif isinstance(bit_flags, str):
if has_flip_bits:
raise TypeError(
"Keyword argument 'flip_bits' is not permitted for "
"comma-separated string lists of bit flags. Prepend '~' to "
"the string to indicate bit-flipping."
)
bit_flags = str(bit_flags).strip()
if bit_flags.upper() in ['', 'NONE', 'INDEF']:
return None
# check whether bitwise-NOT is present and if it is, check that it is
# in the first position:
bitflip_pos = bit_flags.find('~')
if bitflip_pos == 0:
flip_bits = True
bit_flags = bit_flags[1:].lstrip()
else:
if bitflip_pos > 0:
raise ValueError("Bitwise-NOT must precede bit flag list.")
flip_bits = False
# basic check for correct use of parenthesis:
while True:
nlpar = bit_flags.count('(')
nrpar = bit_flags.count(')')
if nlpar == 0 and nrpar == 0:
break
if nlpar != nrpar:
raise ValueError("Unbalanced parantheses in bit flag list.")
lpar_pos = bit_flags.find('(')
rpar_pos = bit_flags.rfind(')')
if lpar_pos > 0 or rpar_pos < (len(bit_flags) - 1):
raise ValueError("Incorrect syntax (incorrect use of "
"parenthesis) in bit flag list.")
bit_flags = bit_flags[1:-1].strip()
if ',' in bit_flags:
bit_flags = bit_flags.split(',')
elif '+' in bit_flags:
bit_flags = bit_flags.split('+')
else:
if bit_flags == '':
raise ValueError(
"Empty bit flag lists not allowed when either bitwise-NOT "
"or parenthesis are present."
)
bit_flags = [bit_flags]
allow_non_flags = len(bit_flags) == 1
elif hasattr(bit_flags, '__iter__'):
if not all([_is_int(flag) for flag in bit_flags]):
raise TypeError("Each bit flag in a list must be an integer.")
else:
raise TypeError("Unsupported type for argument 'bit_flags'.")
bitset = set(map(int, bit_flags))
if len(bitset) != len(bit_flags):
warnings.warn("Duplicate bit flags will be ignored")
bitmask = 0
for v in bitset:
if not _is_bit_flag(v) and not allow_non_flags:
raise ValueError("Input list contains invalid (not powers of two) "
"bit flag: {:d}".format(v))
bitmask += v
if flip_bits:
bitmask = ~bitmask
return bitmask
def bitfield_to_boolean_mask(bitfield, ignore_flags=0, flip_bits=None,
good_mask_value=False, dtype=np.bool_):
"""
bitfield_to_boolean_mask(bitfield, ignore_flags=None, flip_bits=None, \
good_mask_value=False, dtype=numpy.bool_)
Converts an array of bit fields to a boolean (or integer) mask array
according to a bit mask constructed from the supplied bit flags (see
``ignore_flags`` parameter).
This function is particularly useful to convert data quality arrays to
boolean masks with selective filtering of DQ flags.
Parameters
----------
bitfield : numpy.ndarray
An array of bit flags. By default, values different from zero are
interpreted as "bad" values and values equal to zero are considered
as "good" values. However, see ``ignore_flags`` parameter on how to
selectively ignore some bits in the ``bitfield`` array data.
ignore_flags : int, str, list, None (Default = 0)
An integer bit mask, a Python list of bit flags, a comma- or
'+'-separated string list of integer bit flags that indicate what
bits in the input ``bitfield`` should be *ignored* (i.e., zeroed), or
`None`.
| Setting ``ignore_flags`` to `None` effectively will make
`bitfield_to_boolean_mask` interpret all ``bitfield`` elements
as "good" regardless of their value.
| When ``ignore_flags`` argument is an integer bit mask, it will be
combined using bitwise-NOT and bitwise-AND with each element of the
input ``bitfield`` array (``~ignore_flags & bitfield``). If the
resultant bitfield element is non-zero, that element will be
interpreted as a "bad" in the output boolean mask and it will be
interpreted as "good" otherwise. ``flip_bits`` parameter may be used
to flip the bits (``bitwise-NOT``) of the bit mask thus effectively
changing the meaning of the ``ignore_flags`` parameter from "ignore"
to "use only" these flags.
.. note::
Setting ``ignore_flags`` to 0 effectively will assume that all
non-zero elements in the input ``bitfield`` array are to be
interpreted as "bad".
| When ``ignore_flags`` argument is a Python list of integer bit
flags, these flags are added together to create an integer bit mask.
Each item in the list must be a flag, i.e., an integer that is an
integer power of 2. In order to flip the bits of the resultant
bit mask, use ``flip_bits`` parameter.
| Alternatively, ``ignore_flags`` may be a string of comma- or
'+'-separated list of integer bit flags that should be added together
to create an integer bit mask. For example, both ``'4,8'`` and
``'4+8'`` are equivalent and indicate that bit flags 4 and 8 in
the input ``bitfield`` array should be ignored when generating
boolean mask.
.. note::
``'None'``, ``'INDEF'``, and empty (or all white space) strings
are special values of string ``ignore_flags`` that are
interpreted as `None`.
.. note::
Each item in the list must be a flag, i.e., an integer that is an
integer power of 2. In addition, for convenience, an arbitrary
**single** integer is allowed and it will be interpretted as an
integer bit mask. For example, instead of ``'4,8'`` one could
simply provide string ``'12'``.
.. note::
When ``ignore_flags`` is a `str` and when it is prepended with
'~', then the meaning of ``ignore_flags`` parameters will be
reversed: now it will be interpreted as a list of bit flags to be
*used* (or *not ignored*) when deciding which elements of the
input ``bitfield`` array are "bad". Following this convention,
an ``ignore_flags`` string value of ``'~0'`` would be equivalent
to setting ``ignore_flags=None``.
.. warning::
Because prepending '~' to a string ``ignore_flags`` is equivalent
to setting ``flip_bits`` to `True`, ``flip_bits`` cannot be used
with string ``ignore_flags`` and it must be set to `None`.
flip_bits : bool, None (Default = None)
Specifies whether or not to invert the bits of the bit mask either
supplied directly through ``ignore_flags`` parameter or built from the
bit flags passed through ``ignore_flags`` (only when bit flags are
passed as Python lists of integer bit flags). Occasionally, it may be
useful to *consider only specific bit flags* in the ``bitfield``
array when creating a boolean mask as opposed to *ignoring* specific
bit flags as ``ignore_flags`` behaves by default. This can be achieved
by inverting/flipping the bits of the bit mask created from
``ignore_flags`` flags which effectively changes the meaning of the
``ignore_flags`` parameter from "ignore" to "use only" these flags.
Setting ``flip_bits`` to `None` means that no bit flipping will be
performed. Bit flipping for string lists of bit flags must be
specified by prepending '~' to string bit flag lists
(see documentation for ``ignore_flags`` for more details).
.. warning::
This parameter can be set to either `True` or `False` **ONLY** when
``ignore_flags`` is either an integer bit mask or a Python
list of integer bit flags. When ``ignore_flags`` is either
`None` or a string list of flags, ``flip_bits`` **MUST** be set
to `None`.
good_mask_value : int, bool (Default = False)
This parameter is used to derive the values that will be assigned to
the elements in the output boolean mask array that correspond to the
"good" bit fields (that are 0 after zeroing bits specified by
``ignore_flags``) in the input ``bitfield`` array. When
``good_mask_value`` is non-zero or ``numpy.True_`` then values in the
output boolean mask array corresponding to "good" bit fields in
``bitfield`` will be ``numpy.True_`` (if ``dtype`` is ``numpy.bool_``)
or 1 (if ``dtype`` is of numerical type) and values of corresponding
to "bad" flags will be ``numpy.False_`` (or 0). When
``good_mask_value`` is zero or ``numpy.False_`` then the values
in the output boolean mask array corresponding to "good" bit fields
in ``bitfield`` will be ``numpy.False_`` (if ``dtype`` is
``numpy.bool_``) or 0 (if ``dtype`` is of numerical type) and values
of corresponding to "bad" flags will be ``numpy.True_`` (or 1).
dtype : data-type (Default = ``numpy.bool_``)
The desired data-type for the output binary mask array.
Returns
-------
mask : numpy.ndarray
Returns an array of the same dimensionality as the input ``bitfield``
array whose elements can have two possible values,
e.g., ``numpy.True_`` or ``numpy.False_`` (or 1 or 0 for integer
``dtype``) according to values of to the input ``bitfield`` elements,
``ignore_flags`` parameter, and the ``good_mask_value`` parameter.
Examples
--------
>>> from astropy.nddata import bitmask
>>> import numpy as np
>>> dqbits = np.asarray([[0, 0, 1, 2, 0, 8, 12, 0],
... [10, 4, 0, 0, 0, 16, 6, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqbits, ignore_flags=0,
... dtype=int)
array([[0, 0, 1, 1, 0, 1, 1, 0],
[1, 1, 0, 0, 0, 1, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqbits, ignore_flags=0,
... dtype=bool)
array([[False, False, True, True, False, True, True, False],
[ True, True, False, False, False, True, True, False]]...)
>>> bitmask.bitfield_to_boolean_mask(dqbits, ignore_flags=6,
... good_mask_value=0, dtype=int)
array([[0, 0, 1, 0, 0, 1, 1, 0],
[1, 0, 0, 0, 0, 1, 0, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqbits, ignore_flags=~6,
... good_mask_value=0, dtype=int)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqbits, ignore_flags=6, dtype=int,
... flip_bits=True, good_mask_value=0)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqbits, ignore_flags='~(2+4)',
... good_mask_value=0, dtype=int)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqbits, ignore_flags=[2, 4],
... flip_bits=True, good_mask_value=0,
... dtype=int)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
"""
bitfield = np.asarray(bitfield)
if not np.issubdtype(bitfield.dtype, np.integer):
raise TypeError("Input bitfield array must be of integer type.")
ignore_mask = interpret_bit_flags(ignore_flags, flip_bits=flip_bits)
if ignore_mask is None:
if good_mask_value:
mask = np.ones_like(bitfield, dtype=dtype)
else:
mask = np.zeros_like(bitfield, dtype=dtype)
return mask
# filter out bits beyond the maximum supported by the data type:
ignore_mask = ignore_mask & _SUPPORTED_FLAGS
# invert the "ignore" mask:
ignore_mask = np.bitwise_not(ignore_mask, dtype=bitfield.dtype,
casting='unsafe')
mask = np.empty_like(bitfield, dtype=np.bool_)
np.bitwise_and(bitfield, ignore_mask, out=mask, casting='unsafe')
if good_mask_value:
np.logical_not(mask, out=mask)
return mask.astype(dtype=dtype, subok=False, copy=False)
|
98949557e7121c99450fbe222e4760a857a6dc988e8cfe5dd94d8dc158034f5f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""This module implements the base CCDData class."""
import itertools
import numpy as np
from .compat import NDDataArray
from .nduncertainty import (
StdDevUncertainty, NDUncertainty, VarianceUncertainty, InverseVariance)
from astropy.io import fits, registry
from astropy import units as u
from astropy import log
from astropy.wcs import WCS
from astropy.utils.decorators import sharedmethod
__all__ = ['CCDData', 'fits_ccddata_reader', 'fits_ccddata_writer']
_known_uncertainties = (StdDevUncertainty, VarianceUncertainty, InverseVariance)
_unc_name_to_cls = {cls.__name__: cls for cls in _known_uncertainties}
_unc_cls_to_name = {cls: cls.__name__ for cls in _known_uncertainties}
# Global value which can turn on/off the unit requirements when creating a
# CCDData. Should be used with care because several functions actually break
# if the unit is None!
_config_ccd_requires_unit = True
def _arithmetic(op):
"""Decorator factory which temporarly disables the need for a unit when
creating a new CCDData instance. The final result must have a unit.
Parameters
----------
op : function
The function to apply. Supported are:
- ``np.add``
- ``np.subtract``
- ``np.multiply``
- ``np.true_divide``
Notes
-----
Should only be used on CCDData ``add``, ``subtract``, ``divide`` or
``multiply`` because only these methods from NDArithmeticMixin are
overwritten.
"""
def decorator(func):
def inner(self, operand, operand2=None, **kwargs):
global _config_ccd_requires_unit
_config_ccd_requires_unit = False
result = self._prepare_then_do_arithmetic(op, operand,
operand2, **kwargs)
# Wrap it again as CCDData so it checks the final unit.
_config_ccd_requires_unit = True
return result.__class__(result)
inner.__doc__ = ("See `astropy.nddata.NDArithmeticMixin.{}`."
"".format(func.__name__))
return sharedmethod(inner)
return decorator
def _uncertainty_unit_equivalent_to_parent(uncertainty_type, unit, parent_unit):
if uncertainty_type is StdDevUncertainty:
return unit == parent_unit
elif uncertainty_type is VarianceUncertainty:
return unit == (parent_unit ** 2)
elif uncertainty_type is InverseVariance:
return unit == (1 / (parent_unit ** 2))
raise ValueError("unsupported uncertainty type: {}"
.format(uncertainty_type))
class CCDData(NDDataArray):
"""A class describing basic CCD data.
The CCDData class is based on the NDData object and includes a data array,
uncertainty frame, mask frame, flag frame, meta data, units, and WCS
information for a single CCD image.
Parameters
-----------
data : `~astropy.nddata.CCDData`-like or `numpy.ndarray`-like
The actual data contained in this `~astropy.nddata.CCDData` object.
Note that the data will always be saved by *reference*, so you should
make a copy of the ``data`` before passing it in if that's the desired
behavior.
uncertainty : `~astropy.nddata.StdDevUncertainty`, \
`~astropy.nddata.VarianceUncertainty`, \
`~astropy.nddata.InverseVariance`, `numpy.ndarray` or \
None, optional
Uncertainties on the data. If the uncertainty is a `numpy.ndarray`, it
it assumed to be, and stored as, a `~astropy.nddata.StdDevUncertainty`.
Default is ``None``.
mask : `numpy.ndarray` or None, optional
Mask for the data, given as a boolean Numpy array with a shape
matching that of the data. The values must be `False` where
the data is *valid* and `True` when it is not (like Numpy
masked arrays). If ``data`` is a numpy masked array, providing
``mask`` here will causes the mask from the masked array to be
ignored.
Default is ``None``.
flags : `numpy.ndarray` or `~astropy.nddata.FlagCollection` or None, \
optional
Flags giving information about each pixel. These can be specified
either as a Numpy array of any type with a shape matching that of the
data, or as a `~astropy.nddata.FlagCollection` instance which has a
shape matching that of the data.
Default is ``None``.
wcs : `~astropy.wcs.WCS` or None, optional
WCS-object containing the world coordinate system for the data.
Default is ``None``.
meta : dict-like object or None, optional
Metadata for this object. "Metadata" here means all information that
is included with this object but not part of any other attribute
of this particular object, e.g. creation date, unique identifier,
simulation parameters, exposure time, telescope name, etc.
unit : `~astropy.units.Unit` or str, optional
The units of the data.
Default is ``None``.
.. warning::
If the unit is ``None`` or not otherwise specified it will raise a
``ValueError``
Raises
------
ValueError
If the ``uncertainty`` or ``mask`` inputs cannot be broadcast (e.g.,
match shape) onto ``data``.
Methods
-------
read(\\*args, \\**kwargs)
``Classmethod`` to create an CCDData instance based on a ``FITS`` file.
This method uses :func:`fits_ccddata_reader` with the provided
parameters.
write(\\*args, \\**kwargs)
Writes the contents of the CCDData instance into a new ``FITS`` file.
This method uses :func:`fits_ccddata_writer` with the provided
parameters.
Notes
-----
`~astropy.nddata.CCDData` objects can be easily converted to a regular
Numpy array using `numpy.asarray`.
For example::
>>> from astropy.nddata import CCDData
>>> import numpy as np
>>> x = CCDData([1,2,3], unit='adu')
>>> np.asarray(x)
array([1, 2, 3])
This is useful, for example, when plotting a 2D image using
matplotlib.
>>> from astropy.nddata import CCDData
>>> from matplotlib import pyplot as plt # doctest: +SKIP
>>> x = CCDData([[1,2,3], [4,5,6]], unit='adu')
>>> plt.imshow(x) # doctest: +SKIP
"""
def __init__(self, *args, **kwd):
if 'meta' not in kwd:
kwd['meta'] = kwd.pop('header', None)
if 'header' in kwd:
raise ValueError("can't have both header and meta.")
super().__init__(*args, **kwd)
# Check if a unit is set. This can be temporarly disabled by the
# _CCDDataUnit contextmanager.
if _config_ccd_requires_unit and self.unit is None:
raise ValueError("a unit for CCDData must be specified.")
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = value
@property
def wcs(self):
return self._wcs
@wcs.setter
def wcs(self, value):
self._wcs = value
@property
def unit(self):
return self._unit
@unit.setter
def unit(self, value):
self._unit = u.Unit(value)
@property
def header(self):
return self._meta
@header.setter
def header(self, value):
self.meta = value
@property
def uncertainty(self):
return self._uncertainty
@uncertainty.setter
def uncertainty(self, value):
if value is not None:
if isinstance(value, NDUncertainty):
if getattr(value, '_parent_nddata', None) is not None:
value = value.__class__(value, copy=False)
self._uncertainty = value
elif isinstance(value, np.ndarray):
if value.shape != self.shape:
raise ValueError("uncertainty must have same shape as "
"data.")
self._uncertainty = StdDevUncertainty(value)
log.info("array provided for uncertainty; assuming it is a "
"StdDevUncertainty.")
else:
raise TypeError("uncertainty must be an instance of a "
"NDUncertainty object or a numpy array.")
self._uncertainty.parent_nddata = self
else:
self._uncertainty = value
def to_hdu(self, hdu_mask='MASK', hdu_uncertainty='UNCERT',
hdu_flags=None, wcs_relax=True, key_uncertainty_type='UTYPE'):
"""Creates an HDUList object from a CCDData object.
Parameters
----------
hdu_mask, hdu_uncertainty, hdu_flags : str or None, optional
If it is a string append this attribute to the HDUList as
`~astropy.io.fits.ImageHDU` with the string as extension name.
Flags are not supported at this time. If ``None`` this attribute
is not appended.
Default is ``'MASK'`` for mask, ``'UNCERT'`` for uncertainty and
``None`` for flags.
wcs_relax : bool
Value of the ``relax`` parameter to use in converting the WCS to a
FITS header using `~astropy.wcs.WCS.to_header`. The common
``CTYPE`` ``RA---TAN-SIP`` and ``DEC--TAN-SIP`` requires
``relax=True`` for the ``-SIP`` part of the ``CTYPE`` to be
preserved.
key_uncertainty_type : str, optional
The header key name for the class name of the uncertainty (if any)
that is used to store the uncertainty type in the uncertainty hdu.
Default is ``UTYPE``.
.. versionadded:: 3.1
Raises
-------
ValueError
- If ``self.mask`` is set but not a `numpy.ndarray`.
- If ``self.uncertainty`` is set but not a astropy uncertainty type.
- If ``self.uncertainty`` is set but has another unit then
``self.data``.
NotImplementedError
Saving flags is not supported.
Returns
-------
hdulist : `~astropy.io.fits.HDUList`
"""
if isinstance(self.header, fits.Header):
# Copy here so that we can modify the HDU header by adding WCS
# information without changing the header of the CCDData object.
header = self.header.copy()
else:
# Because _insert_in_metadata_fits_safe is written as a method
# we need to create a dummy CCDData instance to hold the FITS
# header we are constructing. This probably indicates that
# _insert_in_metadata_fits_safe should be rewritten in a more
# sensible way...
dummy_ccd = CCDData([1], meta=fits.Header(), unit="adu")
for k, v in self.header.items():
dummy_ccd._insert_in_metadata_fits_safe(k, v)
header = dummy_ccd.header
if self.unit is not u.dimensionless_unscaled:
header['bunit'] = self.unit.to_string()
if self.wcs:
# Simply extending the FITS header with the WCS can lead to
# duplicates of the WCS keywords; iterating over the WCS
# header should be safer.
#
# Turns out if I had read the io.fits.Header.extend docs more
# carefully, I would have realized that the keywords exist to
# avoid duplicates and preserve, as much as possible, the
# structure of the commentary cards.
#
# Note that until astropy/astropy#3967 is closed, the extend
# will fail if there are comment cards in the WCS header but
# not header.
wcs_header = self.wcs.to_header(relax=wcs_relax)
header.extend(wcs_header, useblanks=False, update=True)
hdus = [fits.PrimaryHDU(self.data, header)]
if hdu_mask and self.mask is not None:
# Always assuming that the mask is a np.ndarray (check that it has
# a 'shape').
if not hasattr(self.mask, 'shape'):
raise ValueError('only a numpy.ndarray mask can be saved.')
# Convert boolean mask to uint since io.fits cannot handle bool.
hduMask = fits.ImageHDU(self.mask.astype(np.uint8), name=hdu_mask)
hdus.append(hduMask)
if hdu_uncertainty and self.uncertainty is not None:
# We need to save some kind of information which uncertainty was
# used so that loading the HDUList can infer the uncertainty type.
# No idea how this can be done so only allow StdDevUncertainty.
uncertainty_cls = self.uncertainty.__class__
if uncertainty_cls not in _known_uncertainties:
raise ValueError('only uncertainties of type {} can be saved.'
.format(_known_uncertainties))
uncertainty_name = _unc_cls_to_name[uncertainty_cls]
hdr_uncertainty = fits.Header()
hdr_uncertainty[key_uncertainty_type] = uncertainty_name
# Assuming uncertainty is an StdDevUncertainty save just the array
# this might be problematic if the Uncertainty has a unit differing
# from the data so abort for different units. This is important for
# astropy > 1.2
if (hasattr(self.uncertainty, 'unit') and
self.uncertainty.unit is not None):
if not _uncertainty_unit_equivalent_to_parent(
uncertainty_cls, self.uncertainty.unit, self.unit):
raise ValueError(
'saving uncertainties with a unit that is not '
'equivalent to the unit from the data unit is not '
'supported.')
hduUncert = fits.ImageHDU(self.uncertainty.array, hdr_uncertainty,
name=hdu_uncertainty)
hdus.append(hduUncert)
if hdu_flags and self.flags:
raise NotImplementedError('adding the flags to a HDU is not '
'supported at this time.')
hdulist = fits.HDUList(hdus)
return hdulist
def copy(self):
"""
Return a copy of the CCDData object.
"""
return self.__class__(self, copy=True)
add = _arithmetic(np.add)(NDDataArray.add)
subtract = _arithmetic(np.subtract)(NDDataArray.subtract)
multiply = _arithmetic(np.multiply)(NDDataArray.multiply)
divide = _arithmetic(np.true_divide)(NDDataArray.divide)
def _insert_in_metadata_fits_safe(self, key, value):
"""
Insert key/value pair into metadata in a way that FITS can serialize.
Parameters
----------
key : str
Key to be inserted in dictionary.
value : str or None
Value to be inserted.
Notes
-----
This addresses a shortcoming of the FITS standard. There are length
restrictions on both the ``key`` (8 characters) and ``value`` (72
characters) in the FITS standard. There is a convention for handling
long keywords and a convention for handling long values, but the
two conventions cannot be used at the same time.
This addresses that case by checking the length of the ``key`` and
``value`` and, if necessary, shortening the key.
"""
if len(key) > 8 and len(value) > 72:
short_name = key[:8]
self.meta['HIERARCH {0}'.format(key.upper())] = (
short_name, "Shortened name for {}".format(key))
self.meta[short_name] = value
else:
self.meta[key] = value
# These need to be importable by the tests...
_KEEP_THESE_KEYWORDS_IN_HEADER = [
'JD-OBS',
'MJD-OBS',
'DATE-OBS'
]
_PCs = set(['PC1_1', 'PC1_2', 'PC2_1', 'PC2_2'])
_CDs = set(['CD1_1', 'CD1_2', 'CD2_1', 'CD2_2'])
def _generate_wcs_and_update_header(hdr):
"""
Generate a WCS object from a header and remove the WCS-specific
keywords from the header.
Parameters
----------
hdr : astropy.io.fits.header or other dict-like
Returns
-------
new_header, wcs
"""
# Try constructing a WCS object.
try:
wcs = WCS(hdr)
except Exception as exc:
# Normally WCS only raises Warnings and doesn't fail but in rare
# cases (malformed header) it could fail...
log.info('An exception happened while extracting WCS informations from '
'the Header.\n{}: {}'.format(type(exc).__name__, str(exc)))
return hdr, None
# Test for success by checking to see if the wcs ctype has a non-empty
# value, return None for wcs if ctype is empty.
if not wcs.wcs.ctype[0]:
return (hdr, None)
new_hdr = hdr.copy()
# If the keywords below are in the header they are also added to WCS.
# It seems like they should *not* be removed from the header, though.
wcs_header = wcs.to_header(relax=True)
for k in wcs_header:
if k not in _KEEP_THESE_KEYWORDS_IN_HEADER:
new_hdr.remove(k, ignore_missing=True)
# Check that this does not result in an inconsistent header WCS if the WCS
# is converted back to a header.
if (_PCs & set(wcs_header)) and (_CDs & set(new_hdr)):
# The PCi_j representation is used by the astropy.wcs object,
# so CDi_j keywords were not removed from new_hdr. Remove them now.
for cd in _CDs:
new_hdr.remove(cd, ignore_missing=True)
# The other case -- CD in the header produced by astropy.wcs -- should
# never happen based on [1], which computes the matrix in PC form.
# [1]: https://github.com/astropy/astropy/blob/1cf277926d3598dd672dd528504767c37531e8c9/cextern/wcslib/C/wcshdr.c#L596
#
# The test test_ccddata.test_wcs_keyword_removal_for_wcs_test_files() does
# check for the possibility that both PC and CD are present in the result
# so if the implementation of to_header changes in wcslib in the future
# then the tests should catch it, and then this code will need to be
# updated.
# We need to check for any SIP coefficients that got left behind if the
# header has SIP.
if wcs.sip is not None:
keyword = '{}_{}_{}'
polynomials = ['A', 'B', 'AP', 'BP']
for poly in polynomials:
order = wcs.sip.__getattribute__('{}_order'.format(poly.lower()))
for i, j in itertools.product(range(order), repeat=2):
new_hdr.remove(keyword.format(poly, i, j),
ignore_missing=True)
return (new_hdr, wcs)
def fits_ccddata_reader(filename, hdu=0, unit=None, hdu_uncertainty='UNCERT',
hdu_mask='MASK', hdu_flags=None,
key_uncertainty_type='UTYPE', **kwd):
"""
Generate a CCDData object from a FITS file.
Parameters
----------
filename : str
Name of fits file.
hdu : int, optional
FITS extension from which CCDData should be initialized. If zero and
and no data in the primary extension, it will search for the first
extension with data. The header will be added to the primary header.
Default is ``0``.
unit : `~astropy.units.Unit`, optional
Units of the image data. If this argument is provided and there is a
unit for the image in the FITS header (the keyword ``BUNIT`` is used
as the unit, if present), this argument is used for the unit.
Default is ``None``.
hdu_uncertainty : str or None, optional
FITS extension from which the uncertainty should be initialized. If the
extension does not exist the uncertainty of the CCDData is ``None``.
Default is ``'UNCERT'``.
hdu_mask : str or None, optional
FITS extension from which the mask should be initialized. If the
extension does not exist the mask of the CCDData is ``None``.
Default is ``'MASK'``.
hdu_flags : str or None, optional
Currently not implemented.
Default is ``None``.
key_uncertainty_type : str, optional
The header key name where the class name of the uncertainty is stored
in the hdu of the uncertainty (if any).
Default is ``UTYPE``.
.. versionadded:: 3.1
kwd :
Any additional keyword parameters are passed through to the FITS reader
in :mod:`astropy.io.fits`; see Notes for additional discussion.
Notes
-----
FITS files that contained scaled data (e.g. unsigned integer images) will
be scaled and the keywords used to manage scaled data in
:mod:`astropy.io.fits` are disabled.
"""
unsupport_open_keywords = {
'do_not_scale_image_data': 'Image data must be scaled.',
'scale_back': 'Scale information is not preserved.'
}
for key, msg in unsupport_open_keywords.items():
if key in kwd:
prefix = 'unsupported keyword: {0}.'.format(key)
raise TypeError(' '.join([prefix, msg]))
with fits.open(filename, **kwd) as hdus:
hdr = hdus[hdu].header
if hdu_uncertainty is not None and hdu_uncertainty in hdus:
unc_hdu = hdus[hdu_uncertainty]
stored_unc_name = unc_hdu.header.get(key_uncertainty_type, 'None')
# For compatibility reasons the default is standard deviation
# uncertainty because files could have been created before the
# uncertainty type was stored in the header.
unc_type = _unc_name_to_cls.get(stored_unc_name, StdDevUncertainty)
uncertainty = unc_type(unc_hdu.data)
else:
uncertainty = None
if hdu_mask is not None and hdu_mask in hdus:
# Mask is saved as uint but we want it to be boolean.
mask = hdus[hdu_mask].data.astype(np.bool_)
else:
mask = None
if hdu_flags is not None and hdu_flags in hdus:
raise NotImplementedError('loading flags is currently not '
'supported.')
# search for the first instance with data if
# the primary header is empty.
if hdu == 0 and hdus[hdu].data is None:
for i in range(len(hdus)):
if (hdus.info(hdu)[i][3] == 'ImageHDU' and
hdus.fileinfo(i)['datSpan'] > 0):
hdu = i
comb_hdr = hdus[hdu].header.copy()
# Add header values from the primary header that aren't
# present in the extension header.
comb_hdr.extend(hdr, unique=True)
hdr = comb_hdr
log.info("first HDU with data is extension "
"{0}.".format(hdu))
break
if 'bunit' in hdr:
fits_unit_string = hdr['bunit']
# patch to handle FITS files using ADU for the unit instead of the
# standard version of 'adu'
if fits_unit_string.strip().lower() == 'adu':
fits_unit_string = fits_unit_string.lower()
else:
fits_unit_string = None
if fits_unit_string:
if unit is None:
# Convert the BUNIT header keyword to a unit and if that's not
# possible raise a meaningful error message.
try:
fits_unit_string = u.Unit(fits_unit_string)
except ValueError:
raise ValueError(
'The Header value for the key BUNIT ({}) cannot be '
'interpreted as valid unit. To successfully read the '
'file as CCDData you can pass in a valid `unit` '
'argument explicitly or change the header of the FITS '
'file before reading it.'
.format(fits_unit_string))
else:
log.info("using the unit {0} passed to the FITS reader instead "
"of the unit {1} in the FITS file."
.format(unit, fits_unit_string))
use_unit = unit or fits_unit_string
hdr, wcs = _generate_wcs_and_update_header(hdr)
ccd_data = CCDData(hdus[hdu].data, meta=hdr, unit=use_unit,
mask=mask, uncertainty=uncertainty, wcs=wcs)
return ccd_data
def fits_ccddata_writer(
ccd_data, filename, hdu_mask='MASK', hdu_uncertainty='UNCERT',
hdu_flags=None, key_uncertainty_type='UTYPE', **kwd):
"""
Write CCDData object to FITS file.
Parameters
----------
filename : str
Name of file.
hdu_mask, hdu_uncertainty, hdu_flags : str or None, optional
If it is a string append this attribute to the HDUList as
`~astropy.io.fits.ImageHDU` with the string as extension name.
Flags are not supported at this time. If ``None`` this attribute
is not appended.
Default is ``'MASK'`` for mask, ``'UNCERT'`` for uncertainty and
``None`` for flags.
key_uncertainty_type : str, optional
The header key name for the class name of the uncertainty (if any)
that is used to store the uncertainty type in the uncertainty hdu.
Default is ``UTYPE``.
.. versionadded:: 3.1
kwd :
All additional keywords are passed to :py:mod:`astropy.io.fits`
Raises
-------
ValueError
- If ``self.mask`` is set but not a `numpy.ndarray`.
- If ``self.uncertainty`` is set but not a
`~astropy.nddata.StdDevUncertainty`.
- If ``self.uncertainty`` is set but has another unit then
``self.data``.
NotImplementedError
Saving flags is not supported.
"""
hdu = ccd_data.to_hdu(
hdu_mask=hdu_mask, hdu_uncertainty=hdu_uncertainty,
key_uncertainty_type=key_uncertainty_type, hdu_flags=hdu_flags)
hdu.writeto(filename, **kwd)
with registry.delay_doc_updates(CCDData):
registry.register_reader('fits', CCDData, fits_ccddata_reader)
registry.register_writer('fits', CCDData, fits_ccddata_writer)
registry.register_identifier('fits', CCDData, fits.connect.is_fits)
|
d06ae2ebf340e62f15cae3bb365dbd093f0ef073270b3c9776d5946311a7217b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module includes helper functions for array operations.
"""
from copy import deepcopy
import numpy as np
from .decorators import support_nddata
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.utils import lazyproperty
from astropy.wcs.utils import skycoord_to_pixel, proj_plane_pixel_scales
from astropy.wcs import Sip
__all__ = ['extract_array', 'add_array', 'subpixel_indices',
'overlap_slices', 'block_reduce', 'block_replicate',
'NoOverlapError', 'PartialOverlapError', 'Cutout2D']
class NoOverlapError(ValueError):
'''Raised when determining the overlap of non-overlapping arrays.'''
pass
class PartialOverlapError(ValueError):
'''Raised when arrays only partially overlap.'''
pass
def overlap_slices(large_array_shape, small_array_shape, position,
mode='partial'):
"""
Get slices for the overlapping part of a small and a large array.
Given a certain position of the center of the small array, with
respect to the large array, tuples of slices are returned which can be
used to extract, add or subtract the small array at the given
position. This function takes care of the correct behavior at the
boundaries, where the small array is cut of appropriately.
Integer positions are at the pixel centers.
Parameters
----------
large_array_shape : tuple of int or int
The shape of the large array (for 1D arrays, this can be an
`int`).
small_array_shape : tuple of int or int
The shape of the small array (for 1D arrays, this can be an
`int`). See the ``mode`` keyword for additional details.
position : tuple of numbers or number
The position of the small array's center with respect to the
large array. The pixel coordinates should be in the same order
as the array shape. Integer positions are at the pixel centers.
For any axis where ``small_array_shape`` is even, the position
is rounded up, e.g. extracting two elements with a center of
``1`` will define the extracted region as ``[0, 1]``.
mode : {'partial', 'trim', 'strict'}, optional
In ``'partial'`` mode, a partial overlap of the small and the
large array is sufficient. The ``'trim'`` mode is similar to
the ``'partial'`` mode, but ``slices_small`` will be adjusted to
return only the overlapping elements. In the ``'strict'`` mode,
the small array has to be fully contained in the large array,
otherwise an `~astropy.nddata.utils.PartialOverlapError` is
raised. In all modes, non-overlapping arrays will raise a
`~astropy.nddata.utils.NoOverlapError`.
Returns
-------
slices_large : tuple of slices
A tuple of slice objects for each axis of the large array, such
that ``large_array[slices_large]`` extracts the region of the
large array that overlaps with the small array.
slices_small : tuple of slices
A tuple of slice objects for each axis of the small array, such
that ``small_array[slices_small]`` extracts the region that is
inside the large array.
"""
if mode not in ['partial', 'trim', 'strict']:
raise ValueError('Mode can be only "partial", "trim", or "strict".')
if np.isscalar(small_array_shape):
small_array_shape = (small_array_shape, )
if np.isscalar(large_array_shape):
large_array_shape = (large_array_shape, )
if np.isscalar(position):
position = (position, )
if len(small_array_shape) != len(large_array_shape):
raise ValueError('"large_array_shape" and "small_array_shape" must '
'have the same number of dimensions.')
if len(small_array_shape) != len(position):
raise ValueError('"position" must have the same number of dimensions '
'as "small_array_shape".')
# define the min/max pixel indices
indices_min = [int(np.ceil(pos - (small_shape / 2.)))
for (pos, small_shape) in zip(position, small_array_shape)]
indices_max = [int(np.ceil(pos + (small_shape / 2.)))
for (pos, small_shape) in zip(position, small_array_shape)]
for e_max in indices_max:
if e_max <= 0:
raise NoOverlapError('Arrays do not overlap.')
for e_min, large_shape in zip(indices_min, large_array_shape):
if e_min >= large_shape:
raise NoOverlapError('Arrays do not overlap.')
if mode == 'strict':
for e_min in indices_min:
if e_min < 0:
raise PartialOverlapError('Arrays overlap only partially.')
for e_max, large_shape in zip(indices_max, large_array_shape):
if e_max >= large_shape:
raise PartialOverlapError('Arrays overlap only partially.')
# Set up slices
slices_large = tuple(slice(max(0, indices_min),
min(large_shape, indices_max))
for (indices_min, indices_max, large_shape) in
zip(indices_min, indices_max, large_array_shape))
if mode == 'trim':
slices_small = tuple(slice(0, slc.stop - slc.start)
for slc in slices_large)
else:
slices_small = tuple(slice(max(0, -indices_min),
min(large_shape - indices_min,
indices_max - indices_min))
for (indices_min, indices_max, large_shape) in
zip(indices_min, indices_max, large_array_shape))
return slices_large, slices_small
def extract_array(array_large, shape, position, mode='partial',
fill_value=np.nan, return_position=False):
"""
Extract a smaller array of the given shape and position from a
larger array.
Parameters
----------
array_large : `~numpy.ndarray`
The array from which to extract the small array.
shape : tuple or int
The shape of the extracted array (for 1D arrays, this can be an
`int`). See the ``mode`` keyword for additional details.
position : tuple of numbers or number
The position of the small array's center with respect to the
large array. The pixel coordinates should be in the same order
as the array shape. Integer positions are at the pixel centers
(for 1D arrays, this can be a number).
mode : {'partial', 'trim', 'strict'}, optional
The mode used for extracting the small array. For the
``'partial'`` and ``'trim'`` modes, a partial overlap of the
small array and the large array is sufficient. For the
``'strict'`` mode, the small array has to be fully contained
within the large array, otherwise an
`~astropy.nddata.utils.PartialOverlapError` is raised. In all
modes, non-overlapping arrays will raise a
`~astropy.nddata.utils.NoOverlapError`. In ``'partial'`` mode,
positions in the small array that do not overlap with the large
array will be filled with ``fill_value``. In ``'trim'`` mode
only the overlapping elements are returned, thus the resulting
small array may be smaller than the requested ``shape``.
fill_value : number, optional
If ``mode='partial'``, the value to fill pixels in the extracted
small array that do not overlap with the input ``array_large``.
``fill_value`` must have the same ``dtype`` as the
``array_large`` array.
return_position : boolean, optional
If `True`, return the coordinates of ``position`` in the
coordinate system of the returned array.
Returns
-------
array_small : `~numpy.ndarray`
The extracted array.
new_position : tuple
If ``return_position`` is true, this tuple will contain the
coordinates of the input ``position`` in the coordinate system
of ``array_small``. Note that for partially overlapping arrays,
``new_position`` might actually be outside of the
``array_small``; ``array_small[new_position]`` might give wrong
results if any element in ``new_position`` is negative.
Examples
--------
We consider a large array with the shape 11x10, from which we extract
a small array of shape 3x5:
>>> import numpy as np
>>> from astropy.nddata.utils import extract_array
>>> large_array = np.arange(110).reshape((11, 10))
>>> extract_array(large_array, (3, 5), (7, 7))
array([[65, 66, 67, 68, 69],
[75, 76, 77, 78, 79],
[85, 86, 87, 88, 89]])
"""
if np.isscalar(shape):
shape = (shape, )
if np.isscalar(position):
position = (position, )
if mode not in ['partial', 'trim', 'strict']:
raise ValueError("Valid modes are 'partial', 'trim', and 'strict'.")
large_slices, small_slices = overlap_slices(array_large.shape,
shape, position, mode=mode)
extracted_array = array_large[large_slices]
if return_position:
new_position = [i - s.start for i, s in zip(position, large_slices)]
# Extracting on the edges is presumably a rare case, so treat special here
if (extracted_array.shape != shape) and (mode == 'partial'):
extracted_array = np.zeros(shape, dtype=array_large.dtype)
extracted_array[:] = fill_value
extracted_array[small_slices] = array_large[large_slices]
if return_position:
new_position = [i + s.start for i, s in zip(new_position,
small_slices)]
if return_position:
return extracted_array, tuple(new_position)
else:
return extracted_array
def add_array(array_large, array_small, position):
"""
Add a smaller array at a given position in a larger array.
Parameters
----------
array_large : `~numpy.ndarray`
Large array.
array_small : `~numpy.ndarray`
Small array to add.
position : tuple
Position of the small array's center, with respect to the large array.
Coordinates should be in the same order as the array shape.
Returns
-------
new_array : `~numpy.ndarray`
The new array formed from the sum of ``array_large`` and
``array_small``.
Notes
-----
The addition is done in-place.
Examples
--------
We consider a large array of zeros with the shape 5x5 and a small
array of ones with a shape of 3x3:
>>> import numpy as np
>>> from astropy.nddata.utils import add_array
>>> large_array = np.zeros((5, 5))
>>> small_array = np.ones((3, 3))
>>> add_array(large_array, small_array, (1, 2)) # doctest: +FLOAT_CMP
array([[0., 1., 1., 1., 0.],
[0., 1., 1., 1., 0.],
[0., 1., 1., 1., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]])
"""
# Check if large array is really larger
if all(large_shape > small_shape for (large_shape, small_shape)
in zip(array_large.shape, array_small.shape)):
large_slices, small_slices = overlap_slices(array_large.shape,
array_small.shape,
position)
array_large[large_slices] += array_small[small_slices]
return array_large
else:
raise ValueError("Can't add array. Small array too large.")
def subpixel_indices(position, subsampling):
"""
Convert decimal points to indices, given a subsampling factor.
This discards the integer part of the position and uses only the decimal
place, and converts this to a subpixel position depending on the
subsampling specified. The center of a pixel corresponds to an integer
position.
Parameters
----------
position : `~numpy.ndarray` or array-like
Positions in pixels.
subsampling : int
Subsampling factor per pixel.
Returns
-------
indices : `~numpy.ndarray`
The integer subpixel indices corresponding to the input positions.
Examples
--------
If no subsampling is used, then the subpixel indices returned are always 0:
>>> from astropy.nddata.utils import subpixel_indices
>>> subpixel_indices([1.2, 3.4, 5.6], 1) # doctest: +FLOAT_CMP
array([0., 0., 0.])
If instead we use a subsampling of 2, we see that for the two first values
(1.1 and 3.4) the subpixel position is 1, while for 5.6 it is 0. This is
because the values of 1, 3, and 6 lie in the center of pixels, and 1.1 and
3.4 lie in the left part of the pixels and 5.6 lies in the right part.
>>> subpixel_indices([1.2, 3.4, 5.5], 2) # doctest: +FLOAT_CMP
array([1., 1., 0.])
"""
# Get decimal points
fractions = np.modf(np.asanyarray(position) + 0.5)[0]
return np.floor(fractions * subsampling)
@support_nddata
def block_reduce(data, block_size, func=np.sum):
"""
Downsample a data array by applying a function to local blocks.
If ``data`` is not perfectly divisible by ``block_size`` along a
given axis then the data will be trimmed (from the end) along that
axis.
Parameters
----------
data : array_like
The data to be resampled.
block_size : int or array_like (int)
The integer block size along each axis. If ``block_size`` is a
scalar and ``data`` has more than one dimension, then
``block_size`` will be used for for every axis.
func : callable, optional
The method to use to downsample the data. Must be a callable
that takes in a `~numpy.ndarray` along with an ``axis`` keyword,
which defines the axis along which the function is applied. The
default is `~numpy.sum`, which provides block summation (and
conserves the data sum).
Returns
-------
output : array-like
The resampled data.
Examples
--------
>>> import numpy as np
>>> from astropy.nddata.utils import block_reduce
>>> data = np.arange(16).reshape(4, 4)
>>> block_reduce(data, 2) # doctest: +SKIP
array([[10, 18],
[42, 50]])
>>> block_reduce(data, 2, func=np.mean) # doctest: +SKIP
array([[ 2.5, 4.5],
[ 10.5, 12.5]])
"""
from skimage.measure import block_reduce
data = np.asanyarray(data)
block_size = np.atleast_1d(block_size)
if data.ndim > 1 and len(block_size) == 1:
block_size = np.repeat(block_size, data.ndim)
if len(block_size) != data.ndim:
raise ValueError('`block_size` must be a scalar or have the same '
'length as `data.shape`')
block_size = np.array([int(i) for i in block_size])
size_resampled = np.array(data.shape) // block_size
size_init = size_resampled * block_size
# trim data if necessary
for i in range(data.ndim):
if data.shape[i] != size_init[i]:
data = data.swapaxes(0, i)
data = data[:size_init[i]]
data = data.swapaxes(0, i)
return block_reduce(data, tuple(block_size), func=func)
@support_nddata
def block_replicate(data, block_size, conserve_sum=True):
"""
Upsample a data array by block replication.
Parameters
----------
data : array_like
The data to be block replicated.
block_size : int or array_like (int)
The integer block size along each axis. If ``block_size`` is a
scalar and ``data`` has more than one dimension, then
``block_size`` will be used for for every axis.
conserve_sum : bool, optional
If `True` (the default) then the sum of the output
block-replicated data will equal the sum of the input ``data``.
Returns
-------
output : array_like
The block-replicated data.
Examples
--------
>>> import numpy as np
>>> from astropy.nddata.utils import block_replicate
>>> data = np.array([[0., 1.], [2., 3.]])
>>> block_replicate(data, 2) # doctest: +FLOAT_CMP
array([[0. , 0. , 0.25, 0.25],
[0. , 0. , 0.25, 0.25],
[0.5 , 0.5 , 0.75, 0.75],
[0.5 , 0.5 , 0.75, 0.75]])
>>> block_replicate(data, 2, conserve_sum=False) # doctest: +FLOAT_CMP
array([[0., 0., 1., 1.],
[0., 0., 1., 1.],
[2., 2., 3., 3.],
[2., 2., 3., 3.]])
"""
data = np.asanyarray(data)
block_size = np.atleast_1d(block_size)
if data.ndim > 1 and len(block_size) == 1:
block_size = np.repeat(block_size, data.ndim)
if len(block_size) != data.ndim:
raise ValueError('`block_size` must be a scalar or have the same '
'length as `data.shape`')
for i in range(data.ndim):
data = np.repeat(data, block_size[i], axis=i)
if conserve_sum:
data = data / float(np.prod(block_size))
return data
class Cutout2D:
"""
Create a cutout object from a 2D array.
The returned object will contain a 2D cutout array. If
``copy=False`` (default), the cutout array is a view into the
original ``data`` array, otherwise the cutout array will contain a
copy of the original data.
If a `~astropy.wcs.WCS` object is input, then the returned object
will also contain a copy of the original WCS, but updated for the
cutout array.
For example usage, see :ref:`cutout_images`.
.. warning::
The cutout WCS object does not currently handle cases where the
input WCS object contains distortion lookup tables described in
the `FITS WCS distortion paper
<http://www.atnf.csiro.au/people/mcalabre/WCS/dcs_20040422.pdf>`__.
Parameters
----------
data : `~numpy.ndarray`
The 2D data array from which to extract the cutout array.
position : tuple or `~astropy.coordinates.SkyCoord`
The position of the cutout array's center with respect to
the ``data`` array. The position can be specified either as
a ``(x, y)`` tuple of pixel coordinates or a
`~astropy.coordinates.SkyCoord`, in which case ``wcs`` is a
required input.
size : int, array-like, `~astropy.units.Quantity`
The size of the cutout array along each axis. If ``size``
is a scalar number or a scalar `~astropy.units.Quantity`,
then a square cutout of ``size`` will be created. If
``size`` has two elements, they should be in ``(ny, nx)``
order. Scalar numbers in ``size`` are assumed to be in
units of pixels. ``size`` can also be a
`~astropy.units.Quantity` object or contain
`~astropy.units.Quantity` objects. Such
`~astropy.units.Quantity` objects must be in pixel or
angular units. For all cases, ``size`` will be converted to
an integer number of pixels, rounding the the nearest
integer. See the ``mode`` keyword for additional details on
the final cutout size.
.. note::
If ``size`` is in angular units, the cutout size is
converted to pixels using the pixel scales along each
axis of the image at the ``CRPIX`` location. Projection
and other non-linear distortions are not taken into
account.
wcs : `~astropy.wcs.WCS`, optional
A WCS object associated with the input ``data`` array. If
``wcs`` is not `None`, then the returned cutout object will
contain a copy of the updated WCS for the cutout data array.
mode : {'trim', 'partial', 'strict'}, optional
The mode used for creating the cutout data array. For the
``'partial'`` and ``'trim'`` modes, a partial overlap of the
cutout array and the input ``data`` array is sufficient.
For the ``'strict'`` mode, the cutout array has to be fully
contained within the ``data`` array, otherwise an
`~astropy.nddata.utils.PartialOverlapError` is raised. In
all modes, non-overlapping arrays will raise a
`~astropy.nddata.utils.NoOverlapError`. In ``'partial'``
mode, positions in the cutout array that do not overlap with
the ``data`` array will be filled with ``fill_value``. In
``'trim'`` mode only the overlapping elements are returned,
thus the resulting cutout array may be smaller than the
requested ``shape``.
fill_value : number, optional
If ``mode='partial'``, the value to fill pixels in the
cutout array that do not overlap with the input ``data``.
``fill_value`` must have the same ``dtype`` as the input
``data`` array.
copy : bool, optional
If `False` (default), then the cutout data will be a view
into the original ``data`` array. If `True`, then the
cutout data will hold a copy of the original ``data`` array.
Attributes
----------
data : 2D `~numpy.ndarray`
The 2D cutout array.
shape : 2 tuple
The ``(ny, nx)`` shape of the cutout array.
shape_input : 2 tuple
The ``(ny, nx)`` shape of the input (original) array.
input_position_cutout : 2 tuple
The (unrounded) ``(x, y)`` position with respect to the cutout
array.
input_position_original : 2 tuple
The original (unrounded) ``(x, y)`` input position (with respect
to the original array).
slices_original : 2 tuple of slice objects
A tuple of slice objects for the minimal bounding box of the
cutout with respect to the original array. For
``mode='partial'``, the slices are for the valid (non-filled)
cutout values.
slices_cutout : 2 tuple of slice objects
A tuple of slice objects for the minimal bounding box of the
cutout with respect to the cutout array. For
``mode='partial'``, the slices are for the valid (non-filled)
cutout values.
xmin_original, ymin_original, xmax_original, ymax_original : float
The minimum and maximum ``x`` and ``y`` indices of the minimal
rectangular region of the cutout array with respect to the
original array. For ``mode='partial'``, the bounding box
indices are for the valid (non-filled) cutout values. These
values are the same as those in `bbox_original`.
xmin_cutout, ymin_cutout, xmax_cutout, ymax_cutout : float
The minimum and maximum ``x`` and ``y`` indices of the minimal
rectangular region of the cutout array with respect to the
cutout array. For ``mode='partial'``, the bounding box indices
are for the valid (non-filled) cutout values. These values are
the same as those in `bbox_cutout`.
wcs : `~astropy.wcs.WCS` or `None`
A WCS object associated with the cutout array if a ``wcs``
was input.
Examples
--------
>>> import numpy as np
>>> from astropy.nddata.utils import Cutout2D
>>> from astropy import units as u
>>> data = np.arange(20.).reshape(5, 4)
>>> cutout1 = Cutout2D(data, (2, 2), (3, 3))
>>> print(cutout1.data) # doctest: +FLOAT_CMP
[[ 5. 6. 7.]
[ 9. 10. 11.]
[13. 14. 15.]]
>>> print(cutout1.center_original)
(2.0, 2.0)
>>> print(cutout1.center_cutout)
(1.0, 1.0)
>>> print(cutout1.origin_original)
(1, 1)
>>> cutout2 = Cutout2D(data, (2, 2), 3)
>>> print(cutout2.data) # doctest: +FLOAT_CMP
[[ 5. 6. 7.]
[ 9. 10. 11.]
[13. 14. 15.]]
>>> size = u.Quantity([3, 3], u.pixel)
>>> cutout3 = Cutout2D(data, (0, 0), size)
>>> print(cutout3.data) # doctest: +FLOAT_CMP
[[0. 1.]
[4. 5.]]
>>> cutout4 = Cutout2D(data, (0, 0), (3 * u.pixel, 3))
>>> print(cutout4.data) # doctest: +FLOAT_CMP
[[0. 1.]
[4. 5.]]
>>> cutout5 = Cutout2D(data, (0, 0), (3, 3), mode='partial')
>>> print(cutout5.data) # doctest: +FLOAT_CMP
[[nan nan nan]
[nan 0. 1.]
[nan 4. 5.]]
"""
def __init__(self, data, position, size, wcs=None, mode='trim',
fill_value=np.nan, copy=False):
if isinstance(position, SkyCoord):
if wcs is None:
raise ValueError('wcs must be input if position is a '
'SkyCoord')
position = skycoord_to_pixel(position, wcs, mode='all') # (x, y)
if np.isscalar(size):
size = np.repeat(size, 2)
# special handling for a scalar Quantity
if isinstance(size, u.Quantity):
size = np.atleast_1d(size)
if len(size) == 1:
size = np.repeat(size, 2)
if len(size) > 2:
raise ValueError('size must have at most two elements')
shape = np.zeros(2).astype(int)
pixel_scales = None
# ``size`` can have a mixture of int and Quantity (and even units),
# so evaluate each axis separately
for axis, side in enumerate(size):
if not isinstance(side, u.Quantity):
shape[axis] = int(np.round(size[axis])) # pixels
else:
if side.unit == u.pixel:
shape[axis] = int(np.round(side.value))
elif side.unit.physical_type == 'angle':
if wcs is None:
raise ValueError('wcs must be input if any element '
'of size has angular units')
if pixel_scales is None:
pixel_scales = u.Quantity(
proj_plane_pixel_scales(wcs), wcs.wcs.cunit[axis])
shape[axis] = int(np.round(
(side / pixel_scales[axis]).decompose()))
else:
raise ValueError('shape can contain Quantities with only '
'pixel or angular units')
data = np.asanyarray(data)
# reverse position because extract_array and overlap_slices
# use (y, x), but keep the input position
pos_yx = position[::-1]
cutout_data, input_position_cutout = extract_array(
data, tuple(shape), pos_yx, mode=mode, fill_value=fill_value,
return_position=True)
if copy:
cutout_data = np.copy(cutout_data)
self.data = cutout_data
self.input_position_cutout = input_position_cutout[::-1] # (x, y)
slices_original, slices_cutout = overlap_slices(
data.shape, shape, pos_yx, mode=mode)
self.slices_original = slices_original
self.slices_cutout = slices_cutout
self.shape = self.data.shape
self.input_position_original = position
self.shape_input = shape
((self.ymin_original, self.ymax_original),
(self.xmin_original, self.xmax_original)) = self.bbox_original
((self.ymin_cutout, self.ymax_cutout),
(self.xmin_cutout, self.xmax_cutout)) = self.bbox_cutout
# the true origin pixel of the cutout array, including any
# filled cutout values
self._origin_original_true = (
self.origin_original[0] - self.slices_cutout[1].start,
self.origin_original[1] - self.slices_cutout[0].start)
if wcs is not None:
self.wcs = deepcopy(wcs)
self.wcs.wcs.crpix -= self._origin_original_true
self.wcs.array_shape = self.data.shape
if wcs.sip is not None:
self.wcs.sip = Sip(wcs.sip.a, wcs.sip.b,
wcs.sip.ap, wcs.sip.bp,
wcs.sip.crpix - self._origin_original_true)
else:
self.wcs = None
def to_original_position(self, cutout_position):
"""
Convert an ``(x, y)`` position in the cutout array to the original
``(x, y)`` position in the original large array.
Parameters
----------
cutout_position : tuple
The ``(x, y)`` pixel position in the cutout array.
Returns
-------
original_position : tuple
The corresponding ``(x, y)`` pixel position in the original
large array.
"""
return tuple(cutout_position[i] + self.origin_original[i]
for i in [0, 1])
def to_cutout_position(self, original_position):
"""
Convert an ``(x, y)`` position in the original large array to
the ``(x, y)`` position in the cutout array.
Parameters
----------
original_position : tuple
The ``(x, y)`` pixel position in the original large array.
Returns
-------
cutout_position : tuple
The corresponding ``(x, y)`` pixel position in the cutout
array.
"""
return tuple(original_position[i] - self.origin_original[i]
for i in [0, 1])
def plot_on_original(self, ax=None, fill=False, **kwargs):
"""
Plot the cutout region on a matplotlib Axes instance.
Parameters
----------
ax : `matplotlib.axes.Axes` instance, optional
If `None`, then the current `matplotlib.axes.Axes` instance
is used.
fill : bool, optional
Set whether to fill the cutout patch. The default is
`False`.
kwargs : optional
Any keyword arguments accepted by `matplotlib.patches.Patch`.
Returns
-------
ax : `matplotlib.axes.Axes` instance
The matplotlib Axes instance constructed in the method if
``ax=None``. Otherwise the output ``ax`` is the same as the
input ``ax``.
"""
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
kwargs['fill'] = fill
if ax is None:
ax = plt.gca()
height, width = self.shape
hw, hh = width / 2., height / 2.
pos_xy = self.position_original - np.array([hw, hh])
patch = mpatches.Rectangle(pos_xy, width, height, 0., **kwargs)
ax.add_patch(patch)
return ax
@staticmethod
def _calc_center(slices):
"""
Calculate the center position. The center position will be
fractional for even-sized arrays. For ``mode='partial'``, the
central position is calculated for the valid (non-filled) cutout
values.
"""
return tuple(0.5 * (slices[i].start + slices[i].stop - 1)
for i in [1, 0])
@staticmethod
def _calc_bbox(slices):
"""
Calculate a minimal bounding box in the form ``((ymin, ymax),
(xmin, xmax))``. Note these are pixel locations, not slice
indices. For ``mode='partial'``, the bounding box indices are
for the valid (non-filled) cutout values.
"""
# (stop - 1) to return the max pixel location, not the slice index
return ((slices[0].start, slices[0].stop - 1),
(slices[1].start, slices[1].stop - 1))
@lazyproperty
def origin_original(self):
"""
The ``(x, y)`` index of the origin pixel of the cutout with
respect to the original array. For ``mode='partial'``, the
origin pixel is calculated for the valid (non-filled) cutout
values.
"""
return (self.slices_original[1].start, self.slices_original[0].start)
@lazyproperty
def origin_cutout(self):
"""
The ``(x, y)`` index of the origin pixel of the cutout with
respect to the cutout array. For ``mode='partial'``, the origin
pixel is calculated for the valid (non-filled) cutout values.
"""
return (self.slices_cutout[1].start, self.slices_cutout[0].start)
@staticmethod
def _round(a):
"""
Round the input to the nearest integer.
If two integers are equally close, the value is rounded up.
Note that this is different from `np.round`, which rounds to the
nearest even number.
"""
return int(np.floor(a + 0.5))
@lazyproperty
def position_original(self):
"""
The ``(x, y)`` position index (rounded to the nearest pixel) in
the original array.
"""
return (self._round(self.input_position_original[0]),
self._round(self.input_position_original[1]))
@lazyproperty
def position_cutout(self):
"""
The ``(x, y)`` position index (rounded to the nearest pixel) in
the cutout array.
"""
return (self._round(self.input_position_cutout[0]),
self._round(self.input_position_cutout[1]))
@lazyproperty
def center_original(self):
"""
The central ``(x, y)`` position of the cutout array with respect
to the original array. For ``mode='partial'``, the central
position is calculated for the valid (non-filled) cutout values.
"""
return self._calc_center(self.slices_original)
@lazyproperty
def center_cutout(self):
"""
The central ``(x, y)`` position of the cutout array with respect
to the cutout array. For ``mode='partial'``, the central
position is calculated for the valid (non-filled) cutout values.
"""
return self._calc_center(self.slices_cutout)
@lazyproperty
def bbox_original(self):
"""
The bounding box ``((ymin, ymax), (xmin, xmax))`` of the minimal
rectangular region of the cutout array with respect to the
original array. For ``mode='partial'``, the bounding box
indices are for the valid (non-filled) cutout values.
"""
return self._calc_bbox(self.slices_original)
@lazyproperty
def bbox_cutout(self):
"""
The bounding box ``((ymin, ymax), (xmin, xmax))`` of the minimal
rectangular region of the cutout array with respect to the
cutout array. For ``mode='partial'``, the bounding box indices
are for the valid (non-filled) cutout values.
"""
return self._calc_bbox(self.slices_cutout)
|
07f391ac338a4889689b90aafe17bb5907087fff4e42ba6f0143ff3fab93dfba | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module contains a class equivalent to pre-1.0 NDData.
import numpy as np
from astropy.units import UnitsError, UnitConversionError, Unit
from astropy import log
from .nddata import NDData
from .nduncertainty import NDUncertainty
from .mixins.ndslicing import NDSlicingMixin
from .mixins.ndarithmetic import NDArithmeticMixin
from .mixins.ndio import NDIOMixin
from .flag_collection import FlagCollection
__all__ = ['NDDataArray']
class NDDataArray(NDArithmeticMixin, NDSlicingMixin, NDIOMixin, NDData):
"""
An ``NDData`` object with arithmetic. This class is functionally equivalent
to ``NDData`` in astropy versions prior to 1.0.
The key distinction from raw numpy arrays is the presence of
additional metadata such as uncertainties, a mask, units, flags,
and/or a coordinate system.
See also: http://docs.astropy.org/en/stable/nddata/
Parameters
-----------
data : `~numpy.ndarray` or `NDData`
The actual data contained in this `NDData` object. Not that this
will always be copies by *reference* , so you should make copy
the ``data`` before passing it in if that's the desired behavior.
uncertainty : `~astropy.nddata.NDUncertainty`, optional
Uncertainties on the data.
mask : `~numpy.ndarray`-like, optional
Mask for the data, given as a boolean Numpy array or any object that
can be converted to a boolean Numpy array with a shape
matching that of the data. The values must be ``False`` where
the data is *valid* and ``True`` when it is not (like Numpy
masked arrays). If ``data`` is a numpy masked array, providing
``mask`` here will causes the mask from the masked array to be
ignored.
flags : `~numpy.ndarray`-like or `~astropy.nddata.FlagCollection`, optional
Flags giving information about each pixel. These can be specified
either as a Numpy array of any type (or an object which can be converted
to a Numpy array) with a shape matching that of the
data, or as a `~astropy.nddata.FlagCollection` instance which has a
shape matching that of the data.
wcs : undefined, optional
WCS-object containing the world coordinate system for the data.
.. warning::
This is not yet defined because the discussion of how best to
represent this class's WCS system generically is still under
consideration. For now just leave it as None
meta : `dict`-like object, optional
Metadata for this object. "Metadata" here means all information that
is included with this object but not part of any other attribute
of this particular object. e.g., creation date, unique identifier,
simulation parameters, exposure time, telescope name, etc.
unit : `~astropy.units.UnitBase` instance or str, optional
The units of the data.
Raises
------
ValueError :
If the `uncertainty` or `mask` inputs cannot be broadcast (e.g., match
shape) onto ``data``.
"""
def __init__(self, data, *args, flags=None, **kwargs):
# Initialize with the parent...
super().__init__(data, *args, **kwargs)
# ...then reset uncertainty to force it to go through the
# setter logic below. In base NDData all that is done is to
# set self._uncertainty to whatever uncertainty is passed in.
self.uncertainty = self._uncertainty
# Same thing for mask.
self.mask = self._mask
# Initial flags because it is no longer handled in NDData
# or NDDataBase.
if isinstance(data, NDDataArray):
if flags is None:
flags = data.flags
else:
log.info("Overwriting NDDataArrays's current "
"flags with specified flags")
self.flags = flags
# Implement uncertainty as NDUncertainty to support propagation of
# uncertainties in arithmetic operations
@property
def uncertainty(self):
return self._uncertainty
@uncertainty.setter
def uncertainty(self, value):
if value is not None:
if isinstance(value, NDUncertainty):
class_name = self.__class__.__name__
if not self.unit and value._unit:
# Raise an error if uncertainty has unit and data does not
raise ValueError("Cannot assign an uncertainty with unit "
"to {0} without "
"a unit".format(class_name))
self._uncertainty = value
self._uncertainty.parent_nddata = self
else:
raise TypeError("Uncertainty must be an instance of "
"a NDUncertainty object")
else:
self._uncertainty = value
# Override unit so that we can add a setter.
@property
def unit(self):
return self._unit
@unit.setter
def unit(self, value):
from . import conf
try:
if self._unit is not None and conf.warn_setting_unit_directly:
log.info('Setting the unit directly changes the unit without '
'updating the data or uncertainty. Use the '
'.convert_unit_to() method to change the unit and '
'scale values appropriately.')
except AttributeError:
# raised if self._unit has not been set yet, in which case the
# warning is irrelevant
pass
if value is None:
self._unit = None
else:
self._unit = Unit(value)
# Implement mask in a way that converts nicely to a numpy masked array
@property
def mask(self):
if self._mask is np.ma.nomask:
return None
else:
return self._mask
@mask.setter
def mask(self, value):
# Check that value is not either type of null mask.
if (value is not None) and (value is not np.ma.nomask):
mask = np.array(value, dtype=np.bool_, copy=False)
if mask.shape != self.data.shape:
raise ValueError("dimensions of mask do not match data")
else:
self._mask = mask
else:
# internal representation should be one numpy understands
self._mask = np.ma.nomask
@property
def shape(self):
"""
shape tuple of this object's data.
"""
return self.data.shape
@property
def size(self):
"""
integer size of this object's data.
"""
return self.data.size
@property
def dtype(self):
"""
`numpy.dtype` of this object's data.
"""
return self.data.dtype
@property
def ndim(self):
"""
integer dimensions of this object's data
"""
return self.data.ndim
@property
def flags(self):
return self._flags
@flags.setter
def flags(self, value):
if value is not None:
if isinstance(value, FlagCollection):
if value.shape != self.shape:
raise ValueError("dimensions of FlagCollection does not match data")
else:
self._flags = value
else:
flags = np.array(value, copy=False)
if flags.shape != self.shape:
raise ValueError("dimensions of flags do not match data")
else:
self._flags = flags
else:
self._flags = value
def __array__(self):
"""
This allows code that requests a Numpy array to use an NDData
object as a Numpy array.
"""
if self.mask is not None:
return np.ma.masked_array(self.data, self.mask)
else:
return np.array(self.data)
def __array_prepare__(self, array, context=None):
"""
This ensures that a masked array is returned if self is masked.
"""
if self.mask is not None:
return np.ma.masked_array(array, self.mask)
else:
return array
def convert_unit_to(self, unit, equivalencies=[]):
"""
Returns a new `NDData` object whose values have been converted
to a new unit.
Parameters
----------
unit : `astropy.units.UnitBase` instance or str
The unit to convert to.
equivalencies : list of equivalence pairs, optional
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`unit_equivalencies`.
Returns
-------
result : `~astropy.nddata.NDData`
The resulting dataset
Raises
------
UnitsError
If units are inconsistent.
"""
if self.unit is None:
raise ValueError("No unit specified on source data")
data = self.unit.to(unit, self.data, equivalencies=equivalencies)
if self.uncertainty is not None:
uncertainty_values = self.unit.to(unit, self.uncertainty.array,
equivalencies=equivalencies)
# should work for any uncertainty class
uncertainty = self.uncertainty.__class__(uncertainty_values)
else:
uncertainty = None
if self.mask is not None:
new_mask = self.mask.copy()
else:
new_mask = None
# Call __class__ in case we are dealing with an inherited type
result = self.__class__(data, uncertainty=uncertainty,
mask=new_mask,
wcs=self.wcs,
meta=self.meta, unit=unit)
return result
|
af2ba7122ffbffec24826a4ec9b6c789acc4ea7a22c7ceb296b0b37ecc74dc16 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from .core import *
|
c0d3dcfbef99d6da9ed0e59dac27386a24467eb5f4256552b9d00d83ee24e93f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module's main purpose is to act as a script to create new versions
of ufunc.c when ERFA is updated (or this generator is enhanced).
`Jinja2 <http://jinja.pocoo.org/>`_ must be installed for this
module/script to function.
Note that this does *not* currently automate the process of creating structs
or dtypes for those structs. They should be added manually in the template file.
"""
# note that we do *not* use unicode_literals here, because that makes the
# generated code's strings have u'' in them on py 2.x
import re
import os.path
from collections import OrderedDict
from distutils.version import LooseVersion
import numpy
# Note: once we support only numpy >=1.16, all things related to "d3_fix"
# can be removed, here and in the templates (core.py.templ
# NOTE: we define this variable here instead of importing from astropy to
# ensure that running this script does not require importing astropy.
NUMPY_LT_1_16 = LooseVersion(numpy.__version__) < '1.16'
DEFAULT_ERFA_LOC = os.path.join(os.path.split(__file__)[0],
'../../cextern/erfa')
DEFAULT_TEMPLATE_LOC = os.path.split(__file__)[0]
NDIMS_REX = re.compile(re.escape("numpy.dtype([('fi0', '.*', <(.*)>)])").replace(r'\.\*', '.*').replace(r'\<', '(').replace(r'\>', ')'))
class FunctionDoc:
def __init__(self, doc):
self.doc = doc.replace("**", " ").replace("/*\n", "").replace("*/", "")
self.__input = None
self.__output = None
self.__ret_info = None
def _get_arg_doc_list(self, doc_lines):
"""Parse input/output doc section lines, getting arguments from them.
Ensure all elements of eraASTROM and eraLDBODY are left out, as those
are not input or output arguments themselves. Also remove the nb
argument in from of eraLDBODY, as we infer nb from the python array.
"""
doc_list = []
skip = []
for d in doc_lines:
arg_doc = ArgumentDoc(d)
if arg_doc.name is not None:
if skip:
if skip[0] == arg_doc.name:
skip.pop(0)
continue
else:
raise RuntimeError("We whould be skipping {} "
"but {} encountered."
.format(skip[0], arg_doc.name))
if arg_doc.type.startswith('eraLDBODY'):
# Special-case LDBODY: for those, the previous argument
# is always the number of bodies, but we don't need it
# as an input argument for the ufunc since we're going
# to determine this from the array itself. Also skip
# the description of its contents; those are not arguments.
doc_list.pop()
skip = ['bm', 'dl', 'pv']
elif arg_doc.type.startswith('eraASTROM'):
# Special-case ASTROM: need to skip the description
# of its contents; those are not arguments.
skip = ['pmt', 'eb', 'eh', 'em', 'v', 'bm1',
'bpn', 'along', 'xpl', 'ypl', 'sphi',
'cphi', 'diurab', 'eral', 'refa', 'refb']
doc_list.append(arg_doc)
return doc_list
@property
def input(self):
if self.__input is None:
self.__input = []
for regex in ("Given([^\n]*):\n(.+?) \n",
"Given and returned([^\n]*):\n(.+?) \n"):
result = re.search(regex, self.doc, re.DOTALL)
if result is not None:
doc_lines = result.group(2).split("\n")
self.__input += self._get_arg_doc_list(doc_lines)
return self.__input
@property
def output(self):
if self.__output is None:
self.__output = []
for regex in ("Given and returned([^\n]*):\n(.+?) \n",
"Returned([^\n]*):\n(.+?) \n"):
result = re.search(regex, self.doc, re.DOTALL)
if result is not None:
doc_lines = result.group(2).split("\n")
self.__output += self._get_arg_doc_list(doc_lines)
return self.__output
@property
def ret_info(self):
if self.__ret_info is None:
ret_info = []
result = re.search("Returned \\(function value\\)([^\n]*):\n(.+?) \n", self.doc, re.DOTALL)
if result is not None:
ret_info.append(ReturnDoc(result.group(2)))
if len(ret_info) == 0:
self.__ret_info = ''
elif len(ret_info) == 1:
self.__ret_info = ret_info[0]
else:
raise ValueError("Multiple C return sections found in this doc:\n" + self.doc)
return self.__ret_info
def __repr__(self):
return self.doc.replace(" \n", "\n")
class ArgumentDoc:
def __init__(self, doc):
match = re.search("^ +([^ ]+)[ ]+([^ ]+)[ ]+(.+)", doc)
if match is not None:
self.name = match.group(1)
self.type = match.group(2)
self.doc = match.group(3)
else:
self.name = None
self.type = None
self.doc = None
def __repr__(self):
return " {0:15} {1:15} {2}".format(self.name, self.type, self.doc)
class Variable:
"""Properties shared by Argument and Return."""
@property
def npy_type(self):
"""Predefined type used by numpy ufuncs to indicate a given ctype.
Eg., NPY_DOUBLE for double.
"""
return "NPY_" + self.ctype.upper()
@property
def dtype(self):
"""Name of dtype corresponding to the ctype.
Specifically,
double : dt_double
int : dt_int
double[3]: dt_vector
double[2][3] : dt_pv
double[2] : dt_pvdpv
double[3][3] : dt_matrix
int[4] : dt_ymdf | dt_hmsf | dt_dmsf, depding on name
eraASTROM: dt_eraASTROM
eraLDBODY: dt_eraLDBODY
char : dt_sign
char[] : dt_type
The corresponding dtypes are defined in ufunc.c, where they are
used for the loop definitions. In core.py, they are also used
to view-cast regular arrays to these structured dtypes.
"""
if self.ctype == 'const char':
return 'dt_type'
elif self.ctype == 'char':
return 'dt_sign'
elif self.ctype == 'int' and self.shape == (4,):
return 'dt_' + self.name[1:]
elif self.ctype == 'double' and self.shape == (3,):
return 'dt_double'
elif self.ctype == 'double' and self.shape == (2, 3):
return 'dt_pv'
elif self.ctype == 'double' and self.shape == (2,):
return 'dt_pvdpv'
elif self.ctype == 'double' and self.shape == (3, 3):
return 'dt_double'
elif not self.shape:
return 'dt_' + self.ctype
else:
raise ValueError("ctype {} with shape {} not recognized."
.format(self.ctype, self.shape))
@property
def view_dtype(self):
"""Name of dtype corresponding to the ctype for viewing back as array.
E.g., dt_double for double, dt_double33 for double[3][3].
The types are defined in core.py, where they are used for view-casts
of structured results as regular arrays.
"""
if self.ctype == 'const char':
return 'dt_bytes12'
elif self.ctype == 'char':
return 'dt_bytes1'
else:
raise ValueError('Only char ctype should need view back!')
@property
def ndim(self):
return len(self.shape)
@property
def size(self):
size = 1
for s in self.shape:
size *= s
return size
@property
def cshape(self):
return ''.join(['[{0}]'.format(s) for s in self.shape])
@property
def signature_shape(self):
if self.ctype == 'eraLDBODY':
return '(n)'
elif self.ctype == 'double' and self.shape == (3,):
return '(d3)' if NUMPY_LT_1_16 else '(3)'
elif self.ctype == 'double' and self.shape == (3, 3):
return '(d3, d3)' if NUMPY_LT_1_16 else '(3, 3)'
else:
return '()'
class Argument(Variable):
def __init__(self, definition, doc):
self.definition = definition
self.doc = doc
self.__inout_state = None
self.ctype, ptr_name_arr = definition.strip().rsplit(" ", 1)
if "*" == ptr_name_arr[0]:
self.is_ptr = True
name_arr = ptr_name_arr[1:]
else:
self.is_ptr = False
name_arr = ptr_name_arr
if "[]" in ptr_name_arr:
self.is_ptr = True
name_arr = name_arr[:-2]
if "[" in name_arr:
self.name, arr = name_arr.split("[", 1)
self.shape = tuple([int(size) for size in arr[:-1].split("][")])
else:
self.name = name_arr
self.shape = ()
@property
def inout_state(self):
if self.__inout_state is None:
self.__inout_state = ''
for i in self.doc.input:
if self.name in i.name.split(','):
self.__inout_state = 'in'
for o in self.doc.output:
if self.name in o.name.split(','):
if self.__inout_state == 'in':
self.__inout_state = 'inout'
else:
self.__inout_state = 'out'
return self.__inout_state
@property
def name_for_call(self):
"""How the argument should be used in the call to the ERFA function.
This takes care of ensuring that inputs are passed by value,
as well as adding back the number of bodies for any LDBODY argument.
The latter presumes that in the ufunc inner loops, that number is
called 'nb'.
"""
if self.ctype == 'eraLDBODY':
assert self.name == 'b'
return 'nb, _' + self.name
elif self.is_ptr:
return '_'+self.name
else:
return '*_'+self.name
def __repr__(self):
return "Argument('{0}', name='{1}', ctype='{2}', inout_state='{3}')".format(self.definition, self.name, self.ctype, self.inout_state)
class ReturnDoc:
def __init__(self, doc):
self.doc = doc
self.infoline = doc.split('\n')[0].strip()
self.type = self.infoline.split()[0]
self.descr = self.infoline.split()[1]
if self.descr.startswith('status'):
self.statuscodes = statuscodes = {}
code = None
for line in doc[doc.index(':')+1:].split('\n'):
ls = line.strip()
if ls != '':
if ' = ' in ls:
code, msg = ls.split(' = ')
if code != 'else':
code = int(code)
statuscodes[code] = msg
elif code is not None:
statuscodes[code] += ls
else:
self.statuscodes = None
def __repr__(self):
return "Return value, type={0:15}, {1}, {2}".format(self.type, self.descr, self.doc)
class Return(Variable):
def __init__(self, ctype, doc):
self.name = 'c_retval'
self.inout_state = 'stat' if ctype == 'int' else 'ret'
self.ctype = ctype
self.shape = ()
self.doc = doc
def __repr__(self):
return "Return(name='{0}', ctype='{1}', inout_state='{2}')".format(self.name, self.ctype, self.inout_state)
@property
def doc_info(self):
return self.doc.ret_info
class Function:
"""
A class representing a C function.
Parameters
----------
name : str
The name of the function
source_path : str
Either a directory, which means look for the function in a
stand-alone file (like for the standard ERFA distribution), or a
file, which means look for the function in that file (as for the
astropy-packaged single-file erfa.c).
match_line : str, optional
If given, searching of the source file will skip until it finds
a line matching this string, and start from there.
"""
def __init__(self, name, source_path, match_line=None):
self.name = name
self.pyname = name.split('era')[-1].lower()
self.filename = self.pyname+".c"
if os.path.isdir(source_path):
self.filepath = os.path.join(os.path.normpath(source_path), self.filename)
else:
self.filepath = source_path
with open(self.filepath) as f:
if match_line:
line = f.readline()
while line != '':
if line.startswith(match_line):
filecontents = '\n' + line + f.read()
break
line = f.readline()
else:
msg = ('Could not find the match_line "{0}" in '
'the source file "{1}"')
raise ValueError(msg.format(match_line, self.filepath))
else:
filecontents = f.read()
pattern = r"\n([^\n]+{0} ?\([^)]+\)).+?(/\*.+?\*/)".format(name)
p = re.compile(pattern, flags=re.DOTALL | re.MULTILINE)
search = p.search(filecontents)
self.cfunc = " ".join(search.group(1).split())
self.doc = FunctionDoc(search.group(2))
self.args = []
for arg in re.search(r"\(([^)]+)\)", self.cfunc).group(1).split(', '):
self.args.append(Argument(arg, self.doc))
self.ret = re.search("^(.*){0}".format(name), self.cfunc).group(1).strip()
if self.ret != 'void':
self.args.append(Return(self.ret, self.doc))
def args_by_inout(self, inout_filter, prop=None, join=None):
"""
Gives all of the arguments and/or returned values, depending on whether
they are inputs, outputs, etc.
The value for `inout_filter` should be a string containing anything
that arguments' `inout_state` attribute produces. Currently, that can be:
* "in" : input
* "out" : output
* "inout" : something that's could be input or output (e.g. a struct)
* "ret" : the return value of the C function
* "stat" : the return value of the C function if it is a status code
It can also be a "|"-separated string giving inout states to OR
together.
"""
result = []
for arg in self.args:
if arg.inout_state in inout_filter.split('|'):
if prop is None:
result.append(arg)
else:
result.append(getattr(arg, prop))
if join is not None:
return join.join(result)
else:
return result
@property
def user_dtype(self):
"""The non-standard dtype, if any, needed by this function's ufunc.
This would be any structured array for any input or output, but
we give preference to LDBODY, since that also decides that the ufunc
should be a generalized ufunc.
"""
user_dtype = None
for arg in self.args_by_inout('in|inout|out'):
if arg.ctype == 'eraLDBODY':
return arg.dtype
elif user_dtype is None and arg.dtype not in ('dt_double',
'dt_int'):
user_dtype = arg.dtype
return user_dtype
@property
def signature(self):
"""Possible signature, if this function should be a gufunc."""
if all(arg.signature_shape == '()'
for arg in self.args_by_inout('in|inout|out')):
return None
return '->'.join(
[','.join([arg.signature_shape for arg in args])
for args in (self.args_by_inout('in|inout'),
self.args_by_inout('inout|out|ret|stat'))])
def _d3_fix_arg_and_index(self):
if not any('d3' in arg.signature_shape
for arg in self.args_by_inout('in|inout')):
for j, arg in enumerate(self.args_by_inout('out')):
if 'd3' in arg.signature_shape:
return j, arg
return None, None
@property
def d3_fix_op_index(self):
"""Whether only output arguments have a d3 dimension."""
index = self._d3_fix_arg_and_index()[0]
if index is not None:
len_in = len(list(self.args_by_inout('in')))
len_inout = len(list(self.args_by_inout('inout')))
index += + len_in + 2 * len_inout
return index
@property
def d3_fix_arg(self):
"""Whether only output arguments have a d3 dimension."""
return self._d3_fix_arg_and_index()[1]
@property
def python_call(self):
outnames = [arg.name for arg in self.args_by_inout('inout|out|stat|ret')]
argnames = [arg.name for arg in self.args_by_inout('in|inout')]
argnames += [arg.name for arg in self.args_by_inout('inout')]
d3fix_index = self._d3_fix_arg_and_index()[0]
if d3fix_index is not None:
argnames += ['None'] * d3fix_index + [self.d3_fix_arg.name]
return '{out} = {func}({args})'.format(out=', '.join(outnames),
func='ufunc.' + self.pyname,
args=', '.join(argnames))
def __repr__(self):
return "Function(name='{0}', pyname='{1}', filename='{2}', filepath='{3}')".format(self.name, self.pyname, self.filename, self.filepath)
class Constant:
def __init__(self, name, value, doc):
self.name = name.replace("ERFA_", "")
self.value = value.replace("ERFA_", "")
self.doc = doc
class ExtraFunction(Function):
"""
An "extra" function - e.g. one not following the SOFA/ERFA standard format.
Parameters
----------
cname : str
The name of the function in C
prototype : str
The prototype for the function (usually derived from the header)
pathfordoc : str
The path to a file that contains the prototype, with the documentation
as a multiline string *before* it.
"""
def __init__(self, cname, prototype, pathfordoc):
self.name = cname
self.pyname = cname.split('era')[-1].lower()
self.filepath, self.filename = os.path.split(pathfordoc)
self.prototype = prototype.strip()
if prototype.endswith('{') or prototype.endswith(';'):
self.prototype = prototype[:-1].strip()
incomment = False
lastcomment = None
with open(pathfordoc, 'r') as f:
for l in f:
if incomment:
if l.lstrip().startswith('*/'):
incomment = False
lastcomment = ''.join(lastcomment)
else:
if l.startswith('**'):
l = l[2:]
lastcomment.append(l)
else:
if l.lstrip().startswith('/*'):
incomment = True
lastcomment = []
if l.startswith(self.prototype):
self.doc = lastcomment
break
else:
raise ValueError('Did not find prototype {} in file '
'{}'.format(self.prototype, pathfordoc))
self.args = []
argset = re.search(r"{0}\(([^)]+)?\)".format(self.name),
self.prototype).group(1)
if argset is not None:
for arg in argset.split(', '):
self.args.append(Argument(arg, self.doc))
self.ret = re.match("^(.*){0}".format(self.name),
self.prototype).group(1).strip()
if self.ret != 'void':
self.args.append(Return(self.ret, self.doc))
def __repr__(self):
r = super().__repr__()
if r.startswith('Function'):
r = 'Extra' + r
return r
def main(srcdir=DEFAULT_ERFA_LOC, outfn='core.py', ufuncfn='ufunc.c',
templateloc=DEFAULT_TEMPLATE_LOC, extra='erfa_additions.h',
verbose=True):
from jinja2 import Environment, FileSystemLoader
if verbose:
print_ = lambda *args, **kwargs: print(*args, **kwargs)
else:
print_ = lambda *args, **kwargs: None
# Prepare the jinja2 templating environment
env = Environment(loader=FileSystemLoader(templateloc))
def prefix(a_list, pre):
return [pre+'{0}'.format(an_element) for an_element in a_list]
def postfix(a_list, post):
return ['{0}'.format(an_element)+post for an_element in a_list]
def surround(a_list, pre, post):
return [pre+'{0}'.format(an_element)+post for an_element in a_list]
env.filters['prefix'] = prefix
env.filters['postfix'] = postfix
env.filters['surround'] = surround
erfa_c_in = env.get_template(ufuncfn + '.templ')
erfa_py_in = env.get_template(outfn + '.templ')
# Extract all the ERFA function names from erfa.h
if os.path.isdir(srcdir):
erfahfn = os.path.join(srcdir, 'erfa.h')
multifilserc = True
else:
erfahfn = os.path.join(os.path.split(srcdir)[0], 'erfa.h')
multifilserc = False
with open(erfahfn, "r") as f:
erfa_h = f.read()
print_("read erfa header")
if extra:
with open(os.path.join(templateloc or '.', extra), "r") as f:
erfa_h += f.read()
print_("read extra header")
funcs = OrderedDict()
section_subsection_functions = re.findall(
r'/\* (\w*)/(\w*) \*/\n(.*?)\n\n', erfa_h,
flags=re.DOTALL | re.MULTILINE)
for section, subsection, functions in section_subsection_functions:
print_("{0}.{1}".format(section, subsection))
# Right now, we compile everything, but one could be more selective.
# In particular, at the time of writing (2018-06-11), what was
# actually require for astropy was not quite everything, but:
# ((section == 'Extra')
# or (section == "Astronomy")
# or (subsection == "AngleOps")
# or (subsection == "SphericalCartesian")
# or (subsection == "MatrixVectorProducts")
# or (subsection == 'VectorOps'))
if True:
func_names = re.findall(r' (\w+)\(.*?\);', functions,
flags=re.DOTALL)
for name in func_names:
print_("{0}.{1}.{2}...".format(section, subsection, name))
if multifilserc:
# easy because it just looks in the file itself
cdir = (srcdir if section != 'Extra' else
templateloc or '.')
funcs[name] = Function(name, cdir)
else:
# Have to tell it to look for a declaration matching
# the start of the header declaration, otherwise it
# might find a *call* of the function instead of the
# definition
for line in functions.split(r'\n'):
if name in line:
# [:-1] is to remove trailing semicolon, and
# splitting on '(' is because the header and
# C files don't necessarily have to match
# argument names and line-breaking or
# whitespace
match_line = line[:-1].split('(')[0]
funcs[name] = Function(name, cdir, match_line)
break
else:
raise ValueError("A name for a C file wasn't "
"found in the string that "
"spawned it. This should be "
"impossible!")
funcs = funcs.values()
# Extract all the ERFA constants from erfam.h
erfamhfn = os.path.join(srcdir, 'erfam.h')
with open(erfamhfn, 'r') as f:
erfa_m_h = f.read()
constants = []
for chunk in erfa_m_h.split("\n\n"):
result = re.findall(r"#define (ERFA_\w+?) (.+?)$", chunk,
flags=re.DOTALL | re.MULTILINE)
if result:
doc = re.findall(r"/\* (.+?) \*/\n", chunk, flags=re.DOTALL)
for (name, value) in result:
constants.append(Constant(name, value, doc))
# TODO: re-enable this when const char* return values and
# non-status code integer rets are possible
# #Add in any "extra" functions from erfaextra.h
# erfaextrahfn = os.path.join(srcdir, 'erfaextra.h')
# with open(erfaextrahfn, 'r') as f:
# for l in f:
# ls = l.strip()
# match = re.match('.* (era.*)\(', ls)
# if match:
# print_("Extra: {0} ...".format(match.group(1)))
# funcs.append(ExtraFunction(match.group(1), ls, erfaextrahfn))
print_("Rendering template")
erfa_c = erfa_c_in.render(funcs=funcs, NUMPY_LT_1_16=NUMPY_LT_1_16)
erfa_py = erfa_py_in.render(funcs=funcs, constants=constants,
NUMPY_LT_1_16=NUMPY_LT_1_16)
if outfn is not None:
print_("Saving to", outfn, 'and', ufuncfn)
with open(os.path.join(templateloc, outfn), "w") as f:
f.write(erfa_py)
with open(os.path.join(templateloc, ufuncfn), "w") as f:
f.write(erfa_c)
print_("Done!")
return erfa_c, erfa_py, funcs
if __name__ == '__main__':
from argparse import ArgumentParser
ap = ArgumentParser()
ap.add_argument('srcdir', default=DEFAULT_ERFA_LOC, nargs='?',
help='Directory where the ERFA c and header files '
'can be found or to a single erfa.c file '
'(which must be in the same directory as '
'erfa.h). Defaults to the builtin astropy '
'erfa: "{0}"'.format(DEFAULT_ERFA_LOC))
ap.add_argument('-o', '--output', default='core.py',
help='The output filename for the pure-python output.')
ap.add_argument('-u', '--ufunc', default='ufunc.c',
help='The output filename for the ufunc .c output')
ap.add_argument('-t', '--template-loc',
default=DEFAULT_TEMPLATE_LOC,
help='the location where the "core.py.templ" and '
'"ufunc.c.templ templates can be found.')
ap.add_argument('-x', '--extra',
default='erfa_additions.h',
help='header file for any extra files in the template '
'location that should be included.')
ap.add_argument('-q', '--quiet', action='store_false', dest='verbose',
help='Suppress output normally printed to stdout.')
args = ap.parse_args()
main(args.srcdir, args.output, args.ufunc, args.template_loc,
args.extra)
|
ae6d6c529759da0100913099fe3f50a78dd36fa37520c7ae9c6a3aa98da883e1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import glob
from distutils import log
from distutils.extension import Extension
from astropy_helpers import setup_helpers
from astropy_helpers.utils import import_file
from astropy_helpers.version_helpers import get_pkg_version_module
ERFAPKGDIR = os.path.relpath(os.path.dirname(__file__))
ERFA_SRC = os.path.abspath(os.path.join(ERFAPKGDIR, '..', '..',
'cextern', 'erfa'))
SRC_FILES = glob.glob(os.path.join(ERFA_SRC, '*'))
SRC_FILES += [os.path.join(ERFAPKGDIR, filename)
for filename in ['pav2pv.c', 'pv2pav.c', 'erfa_additions.h',
'ufunc.c.templ', 'core.py.templ',
'erfa_generator.py']]
GEN_FILES = [os.path.join(ERFAPKGDIR, 'core.py'),
os.path.join(ERFAPKGDIR, 'ufunc.c')]
def pre_build_py_hook(cmd_obj):
preprocess_source()
def pre_build_ext_hook(cmd_obj):
preprocess_source()
def pre_sdist_hook(cmd_obj):
preprocess_source()
def preprocess_source():
# Generating the ERFA wrappers should only be done if needed. This also
# ensures that it is not done for any release tarball since those will
# include core.py and ufunc.c.
if all(os.path.exists(filename) for filename in GEN_FILES):
# Determine modification times
erfa_mtime = max(os.path.getmtime(filename) for filename in SRC_FILES)
gen_mtime = min(os.path.getmtime(filename) for filename in GEN_FILES)
version = import_file(os.path.join(ERFAPKGDIR, '..', 'version.py'))
if gen_mtime > erfa_mtime:
# If generated source is recent enough, don't update
return
elif version.release:
# or, if we're on a release, issue a warning, but go ahead and use
# the wrappers anyway
log.warn('WARNING: The autogenerated wrappers in astropy._erfa '
'seem to be older than the source templates used to '
'create them. Because this is a release version we will '
'use them anyway, but this might be a sign of some sort '
'of version mismatch or other tampering. Or it might just '
'mean you moved some files around or otherwise '
'accidentally changed timestamps.')
return
# otherwise rebuild the autogenerated files
# If jinja2 isn't present, then print a warning and use existing files
try:
import jinja2 # pylint: disable=W0611
except ImportError:
log.warn("WARNING: jinja2 could not be imported, so the existing "
"ERFA core.py and ufunc.c files will be used")
return
gen = import_file(os.path.join(ERFAPKGDIR, 'erfa_generator.py'))
gen.main(verbose=False)
def get_extensions():
sources = [os.path.join(ERFAPKGDIR, fn)
for fn in ("ufunc.c", "pav2pv.c", "pv2pav.c")]
include_dirs = ['numpy']
libraries = []
if setup_helpers.use_system_library('erfa'):
libraries.append('erfa')
else:
# get all of the .c files in the cextern/erfa directory
erfafns = os.listdir(ERFA_SRC)
sources.extend(['cextern/erfa/' + fn
for fn in erfafns if fn.endswith('.c')])
include_dirs.append('cextern/erfa')
erfa_ext = Extension(
name="astropy._erfa.ufunc",
sources=sources,
include_dirs=include_dirs,
libraries=libraries,
language="c",)
return [erfa_ext]
def get_external_libraries():
return ['erfa']
|
651dce58844a471398bd900356a07d55dc87d939b6ed885891d80fa338aaae98 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
.. _wcslib: http://www.atnf.csiro.au/people/mcalabre/WCS/wcslib/index.html
.. _distortion paper: http://www.atnf.csiro.au/people/mcalabre/WCS/dcs_20040422.pdf
.. _SIP: http://irsa.ipac.caltech.edu/data/SPITZER/docs/files/spitzer/shupeADASS.pdf
.. _FITS WCS standard: https://fits.gsfc.nasa.gov/fits_wcs.html
`astropy.wcs` contains utilities for managing World Coordinate System
(WCS) transformations in FITS files. These transformations map the
pixel locations in an image to their real-world units, such as their
position on the sky sphere.
It performs three separate classes of WCS transformations:
- Core WCS, as defined in the `FITS WCS standard`_, based on Mark
Calabretta's `wcslib`_. See `~astropy.wcs.Wcsprm`.
- Simple Imaging Polynomial (`SIP`_) convention. See
`~astropy.wcs.Sip`.
- table lookup distortions as defined in WCS `distortion paper`_. See
`~astropy.wcs.DistortionLookupTable`.
Each of these transformations can be used independently or together in
a standard pipeline.
"""
from .wcs import *
from . import utils
def get_include():
"""
Get the path to astropy.wcs's C header files.
"""
import os
return os.path.join(os.path.dirname(__file__), "include")
|
c162e6066e847615be86e24f3cc51005223d25c93523ec1d731e366a15e2953a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
CONTACT = "Michael Droettboom"
EMAIL = "[email protected]"
import io
from os.path import join
import os.path
import shutil
import sys
from distutils.core import Extension
from distutils.dep_util import newer_group
from astropy_helpers.utils import import_file
from astropy_helpers import setup_helpers
from astropy_helpers.distutils_helpers import get_distutils_build_option
WCSROOT = os.path.relpath(os.path.dirname(__file__))
WCSVERSION = "6.2.0"
def b(s):
return s.encode('ascii')
def string_escape(s):
s = s.decode('ascii').encode('ascii', 'backslashreplace')
s = s.replace(b'\n', b'\\n')
s = s.replace(b'\0', b'\\0')
return s.decode('ascii')
def determine_64_bit_int():
"""
The only configuration parameter needed at compile-time is how to
specify a 64-bit signed integer. Python's ctypes module can get us
that information.
If we can't be absolutely certain, we default to "long long int",
which is correct on most platforms (x86, x86_64). If we find
platforms where this heuristic doesn't work, we may need to
hardcode for them.
"""
try:
try:
import ctypes
except ImportError:
raise ValueError()
if ctypes.sizeof(ctypes.c_longlong) == 8:
return "long long int"
elif ctypes.sizeof(ctypes.c_long) == 8:
return "long int"
elif ctypes.sizeof(ctypes.c_int) == 8:
return "int"
else:
raise ValueError()
except ValueError:
return "long long int"
def write_wcsconfig_h(paths):
"""
Writes out the wcsconfig.h header with local configuration.
"""
h_file = io.StringIO()
h_file.write("""
/* The bundled version has WCSLIB_VERSION */
#define HAVE_WCSLIB_VERSION 1
/* WCSLIB library version number. */
#define WCSLIB_VERSION {0}
/* 64-bit integer data type. */
#define WCSLIB_INT64 {1}
/* Windows needs some other defines to prevent inclusion of wcsset()
which conflicts with wcslib's wcsset(). These need to be set
on code that *uses* astropy.wcs, in addition to astropy.wcs itself.
*/
#if defined(_WIN32) || defined(_MSC_VER) || defined(__MINGW32__) || defined (__MINGW64__)
#ifndef YY_NO_UNISTD_H
#define YY_NO_UNISTD_H
#endif
#ifndef _CRT_SECURE_NO_WARNINGS
#define _CRT_SECURE_NO_WARNINGS
#endif
#ifndef _NO_OLDNAMES
#define _NO_OLDNAMES
#endif
#ifndef NO_OLDNAMES
#define NO_OLDNAMES
#endif
#ifndef __STDC__
#define __STDC__ 1
#endif
#endif
""".format(WCSVERSION, determine_64_bit_int()))
content = h_file.getvalue().encode('ascii')
for path in paths:
setup_helpers.write_if_different(path, content)
######################################################################
# GENERATE DOCSTRINGS IN C
def generate_c_docstrings():
docstrings = import_file(os.path.join(WCSROOT, 'docstrings.py'))
docstrings = docstrings.__dict__
keys = [
key for key, val in docstrings.items()
if not key.startswith('__') and isinstance(val, str)]
keys.sort()
docs = {}
for key in keys:
docs[key] = docstrings[key].encode('utf8').lstrip() + b'\0'
h_file = io.StringIO()
h_file.write("""/*
DO NOT EDIT!
This file is autogenerated by astropy/wcs/setup_package.py. To edit
its contents, edit astropy/wcs/docstrings.py
*/
#ifndef __DOCSTRINGS_H__
#define __DOCSTRINGS_H__
""")
for key in keys:
val = docs[key]
h_file.write('extern char doc_{0}[{1}];\n'.format(key, len(val)))
h_file.write("\n#endif\n\n")
setup_helpers.write_if_different(
join(WCSROOT, 'include', 'astropy_wcs', 'docstrings.h'),
h_file.getvalue().encode('utf-8'))
c_file = io.StringIO()
c_file.write("""/*
DO NOT EDIT!
This file is autogenerated by astropy/wcs/setup_package.py. To edit
its contents, edit astropy/wcs/docstrings.py
The weirdness here with strncpy is because some C compilers, notably
MSVC, do not support string literals greater than 256 characters.
*/
#include <string.h>
#include "astropy_wcs/docstrings.h"
""")
for key in keys:
val = docs[key]
c_file.write('char doc_{0}[{1}] = {{\n'.format(key, len(val)))
for i in range(0, len(val), 12):
section = val[i:i+12]
c_file.write(' ')
c_file.write(''.join('0x{0:02x}, '.format(x) for x in section))
c_file.write('\n')
c_file.write(" };\n\n")
setup_helpers.write_if_different(
join(WCSROOT, 'src', 'docstrings.c'),
c_file.getvalue().encode('utf-8'))
def get_wcslib_cfg(cfg, wcslib_files, include_paths):
debug = import_file(os.path.join(WCSROOT, '..', 'version.py')).debug
cfg['include_dirs'].append('numpy')
cfg['define_macros'].extend([
('ECHO', None),
('WCSTRIG_MACRO', None),
('ASTROPY_WCS_BUILD', None),
('_GNU_SOURCE', None)])
if (not setup_helpers.use_system_library('wcslib') or
sys.platform == 'win32'):
write_wcsconfig_h(include_paths)
wcslib_path = join("cextern", "wcslib") # Path to wcslib
wcslib_cpath = join(wcslib_path, "C") # Path to wcslib source files
cfg['sources'].extend(join(wcslib_cpath, x) for x in wcslib_files)
cfg['include_dirs'].append(wcslib_cpath)
else:
wcsconfig_h_path = join(WCSROOT, 'include', 'wcsconfig.h')
if os.path.exists(wcsconfig_h_path):
os.unlink(wcsconfig_h_path)
cfg.update(setup_helpers.pkg_config(['wcslib'], ['wcs']))
if debug:
cfg['define_macros'].append(('DEBUG', None))
cfg['undef_macros'].append('NDEBUG')
if (not sys.platform.startswith('sun') and
not sys.platform == 'win32'):
cfg['extra_compile_args'].extend(["-fno-inline", "-O0", "-g"])
else:
# Define ECHO as nothing to prevent spurious newlines from
# printing within the libwcs parser
cfg['define_macros'].append(('NDEBUG', None))
cfg['undef_macros'].append('DEBUG')
if sys.platform == 'win32':
# These are written into wcsconfig.h, but that file is not
# used by all parts of wcslib.
cfg['define_macros'].extend([
('YY_NO_UNISTD_H', None),
('_CRT_SECURE_NO_WARNINGS', None),
('_NO_OLDNAMES', None), # for mingw32
('NO_OLDNAMES', None), # for mingw64
('__STDC__', None) # for MSVC
])
if sys.platform.startswith('linux'):
cfg['define_macros'].append(('HAVE_SINCOS', None))
# Squelch a few compilation warnings in WCSLIB
if setup_helpers.get_compiler_option() in ('unix', 'mingw32'):
if not get_distutils_build_option('debug'):
cfg['extra_compile_args'].extend([
'-Wno-strict-prototypes',
'-Wno-unused-function',
'-Wno-unused-value',
'-Wno-uninitialized'])
def get_extensions():
generate_c_docstrings()
######################################################################
# DISTUTILS SETUP
cfg = setup_helpers.DistutilsExtensionArgs()
wcslib_files = [ # List of wcslib files to compile
'flexed/wcsbth.c',
'flexed/wcspih.c',
'flexed/wcsulex.c',
'flexed/wcsutrn.c',
'cel.c',
'dis.c',
'lin.c',
'log.c',
'prj.c',
'spc.c',
'sph.c',
'spx.c',
'tab.c',
'wcs.c',
'wcserr.c',
'wcsfix.c',
'wcshdr.c',
'wcsprintf.c',
'wcsunits.c',
'wcsutil.c'
]
wcslib_config_paths = [
join(WCSROOT, 'include', 'astropy_wcs', 'wcsconfig.h'),
join(WCSROOT, 'include', 'wcsconfig.h')
]
get_wcslib_cfg(cfg, wcslib_files, wcslib_config_paths)
cfg['include_dirs'].append(join(WCSROOT, "include"))
astropy_wcs_files = [ # List of astropy.wcs files to compile
'distortion.c',
'distortion_wrap.c',
'docstrings.c',
'pipeline.c',
'pyutil.c',
'astropy_wcs.c',
'astropy_wcs_api.c',
'sip.c',
'sip_wrap.c',
'str_list_proxy.c',
'unit_list_proxy.c',
'util.c',
'wcslib_wrap.c',
'wcslib_tabprm_wrap.c']
cfg['sources'].extend(join(WCSROOT, 'src', x) for x in astropy_wcs_files)
cfg['sources'] = [str(x) for x in cfg['sources']]
cfg = dict((str(key), val) for key, val in cfg.items())
return [Extension(str('astropy.wcs._wcs'), **cfg)]
def get_package_data():
# Installs the testing data files
api_files = [
'astropy_wcs.h',
'astropy_wcs_api.h',
'distortion.h',
'isnan.h',
'pipeline.h',
'pyutil.h',
'sip.h',
'util.h',
'wcsconfig.h',
]
api_files = [join('include', 'astropy_wcs', x) for x in api_files]
api_files.append(join('include', 'astropy_wcs_api.h'))
wcslib_headers = [
'cel.h',
'lin.h',
'prj.h',
'spc.h',
'spx.h',
'tab.h',
'wcs.h',
'wcserr.h',
'wcsmath.h',
'wcsprintf.h',
]
if not setup_helpers.use_system_library('wcslib'):
for header in wcslib_headers:
source = join('cextern', 'wcslib', 'C', header)
dest = join('astropy', 'wcs', 'include', 'wcslib', header)
if newer_group([source], dest, 'newer'):
shutil.copy(source, dest)
api_files.append(join('include', 'wcslib', header))
return {
str('astropy.wcs.tests'): ['extension/*.c'],
str('astropy.wcs'): api_files,
}
def get_external_libraries():
return ['wcslib']
|
848b8a09016c301efe5dc9247014e99b9a8553279632e5fdec50c0dc34d4446f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from astropy import units as u
from .wcs import WCS, WCSSUB_LONGITUDE, WCSSUB_LATITUDE, WCSSUB_CELESTIAL
__doctest_skip__ = ['wcs_to_celestial_frame', 'celestial_frame_to_wcs']
__all__ = ['add_stokes_axis_to_wcs', 'celestial_frame_to_wcs',
'wcs_to_celestial_frame', 'proj_plane_pixel_scales',
'proj_plane_pixel_area', 'is_proj_plane_distorted',
'non_celestial_pixel_scales', 'skycoord_to_pixel',
'pixel_to_skycoord', 'custom_wcs_to_frame_mappings',
'custom_frame_to_wcs_mappings']
def add_stokes_axis_to_wcs(wcs, add_before_ind):
"""
Add a new Stokes axis that is uncorrelated with any other axes.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS to add to
add_before_ind : int
Index of the WCS to insert the new Stokes axis in front of.
To add at the end, do add_before_ind = wcs.wcs.naxis
The beginning is at position 0.
Returns
-------
A new `~astropy.wcs.WCS` instance with an additional axis
"""
inds = [i + 1 for i in range(wcs.wcs.naxis)]
inds.insert(add_before_ind, 0)
newwcs = wcs.sub(inds)
newwcs.wcs.ctype[add_before_ind] = 'STOKES'
newwcs.wcs.cname[add_before_ind] = 'STOKES'
return newwcs
def _wcs_to_celestial_frame_builtin(wcs):
# Import astropy.coordinates here to avoid circular imports
from astropy.coordinates import FK4, FK4NoETerms, FK5, ICRS, ITRS, Galactic
# Import astropy.time here otherwise setup.py fails before extensions are compiled
from astropy.time import Time
if wcs.wcs.lng == -1 or wcs.wcs.lat == -1:
return None
radesys = wcs.wcs.radesys
if np.isnan(wcs.wcs.equinox):
equinox = None
else:
equinox = wcs.wcs.equinox
xcoord = wcs.wcs.ctype[wcs.wcs.lng][:4]
ycoord = wcs.wcs.ctype[wcs.wcs.lat][:4]
# Apply logic from FITS standard to determine the default radesys
if radesys == '' and xcoord == 'RA--' and ycoord == 'DEC-':
if equinox is None:
radesys = "ICRS"
elif equinox < 1984.:
radesys = "FK4"
else:
radesys = "FK5"
if radesys == 'FK4':
if equinox is not None:
equinox = Time(equinox, format='byear')
frame = FK4(equinox=equinox)
elif radesys == 'FK4-NO-E':
if equinox is not None:
equinox = Time(equinox, format='byear')
frame = FK4NoETerms(equinox=equinox)
elif radesys == 'FK5':
if equinox is not None:
equinox = Time(equinox, format='jyear')
frame = FK5(equinox=equinox)
elif radesys == 'ICRS':
frame = ICRS()
else:
if xcoord == 'GLON' and ycoord == 'GLAT':
frame = Galactic()
elif xcoord == 'TLON' and ycoord == 'TLAT':
frame = ITRS(obstime=wcs.wcs.dateobs or None)
else:
frame = None
return frame
def _celestial_frame_to_wcs_builtin(frame, projection='TAN'):
# Import astropy.coordinates here to avoid circular imports
from astropy.coordinates import BaseRADecFrame, FK4, FK4NoETerms, FK5, ICRS, ITRS, Galactic
# Create a 2-dimensional WCS
wcs = WCS(naxis=2)
if isinstance(frame, BaseRADecFrame):
xcoord = 'RA--'
ycoord = 'DEC-'
if isinstance(frame, ICRS):
wcs.wcs.radesys = 'ICRS'
elif isinstance(frame, FK4NoETerms):
wcs.wcs.radesys = 'FK4-NO-E'
wcs.wcs.equinox = frame.equinox.byear
elif isinstance(frame, FK4):
wcs.wcs.radesys = 'FK4'
wcs.wcs.equinox = frame.equinox.byear
elif isinstance(frame, FK5):
wcs.wcs.radesys = 'FK5'
wcs.wcs.equinox = frame.equinox.jyear
else:
return None
elif isinstance(frame, Galactic):
xcoord = 'GLON'
ycoord = 'GLAT'
elif isinstance(frame, ITRS):
xcoord = 'TLON'
ycoord = 'TLAT'
wcs.wcs.radesys = 'ITRS'
wcs.wcs.dateobs = frame.obstime.utc.isot
else:
return None
wcs.wcs.ctype = [xcoord + '-' + projection, ycoord + '-' + projection]
return wcs
WCS_FRAME_MAPPINGS = [[_wcs_to_celestial_frame_builtin]]
FRAME_WCS_MAPPINGS = [[_celestial_frame_to_wcs_builtin]]
class custom_wcs_to_frame_mappings:
def __init__(self, mappings=[]):
if hasattr(mappings, '__call__'):
mappings = [mappings]
WCS_FRAME_MAPPINGS.append(mappings)
def __enter__(self):
pass
def __exit__(self, type, value, tb):
WCS_FRAME_MAPPINGS.pop()
# Backward-compatibility
custom_frame_mappings = custom_wcs_to_frame_mappings
class custom_frame_to_wcs_mappings:
def __init__(self, mappings=[]):
if hasattr(mappings, '__call__'):
mappings = [mappings]
FRAME_WCS_MAPPINGS.append(mappings)
def __enter__(self):
pass
def __exit__(self, type, value, tb):
FRAME_WCS_MAPPINGS.pop()
def wcs_to_celestial_frame(wcs):
"""
For a given WCS, return the coordinate frame that matches the celestial
component of the WCS.
Parameters
----------
wcs : :class:`~astropy.wcs.WCS` instance
The WCS to find the frame for
Returns
-------
frame : :class:`~astropy.coordinates.baseframe.BaseCoordinateFrame` subclass instance
An instance of a :class:`~astropy.coordinates.baseframe.BaseCoordinateFrame`
subclass instance that best matches the specified WCS.
Notes
-----
To extend this function to frames not defined in astropy.coordinates, you
can write your own function which should take a :class:`~astropy.wcs.WCS`
instance and should return either an instance of a frame, or `None` if no
matching frame was found. You can register this function temporarily with::
>>> from astropy.wcs.utils import wcs_to_celestial_frame, custom_wcs_to_frame_mappings
>>> with custom_wcs_to_frame_mappings(my_function):
... wcs_to_celestial_frame(...)
"""
for mapping_set in WCS_FRAME_MAPPINGS:
for func in mapping_set:
frame = func(wcs)
if frame is not None:
return frame
raise ValueError("Could not determine celestial frame corresponding to "
"the specified WCS object")
def celestial_frame_to_wcs(frame, projection='TAN'):
"""
For a given coordinate frame, return the corresponding WCS object.
Note that the returned WCS object has only the elements corresponding to
coordinate frames set (e.g. ctype, equinox, radesys).
Parameters
----------
frame : :class:`~astropy.coordinates.baseframe.BaseCoordinateFrame` subclass instance
An instance of a :class:`~astropy.coordinates.baseframe.BaseCoordinateFrame`
subclass instance for which to find the WCS
projection : str
Projection code to use in ctype, if applicable
Returns
-------
wcs : :class:`~astropy.wcs.WCS` instance
The corresponding WCS object
Examples
--------
::
>>> from astropy.wcs.utils import celestial_frame_to_wcs
>>> from astropy.coordinates import FK5
>>> frame = FK5(equinox='J2010')
>>> wcs = celestial_frame_to_wcs(frame)
>>> wcs.to_header()
WCSAXES = 2 / Number of coordinate axes
CRPIX1 = 0.0 / Pixel coordinate of reference point
CRPIX2 = 0.0 / Pixel coordinate of reference point
CDELT1 = 1.0 / [deg] Coordinate increment at reference point
CDELT2 = 1.0 / [deg] Coordinate increment at reference point
CUNIT1 = 'deg' / Units of coordinate increment and value
CUNIT2 = 'deg' / Units of coordinate increment and value
CTYPE1 = 'RA---TAN' / Right ascension, gnomonic projection
CTYPE2 = 'DEC--TAN' / Declination, gnomonic projection
CRVAL1 = 0.0 / [deg] Coordinate value at reference point
CRVAL2 = 0.0 / [deg] Coordinate value at reference point
LONPOLE = 180.0 / [deg] Native longitude of celestial pole
LATPOLE = 0.0 / [deg] Native latitude of celestial pole
RADESYS = 'FK5' / Equatorial coordinate system
EQUINOX = 2010.0 / [yr] Equinox of equatorial coordinates
Notes
-----
To extend this function to frames not defined in astropy.coordinates, you
can write your own function which should take a
:class:`~astropy.coordinates.baseframe.BaseCoordinateFrame` subclass
instance and a projection (given as a string) and should return either a WCS
instance, or `None` if the WCS could not be determined. You can register
this function temporarily with::
>>> from astropy.wcs.utils import celestial_frame_to_wcs, custom_frame_to_wcs_mappings
>>> with custom_frame_to_wcs_mappings(my_function):
... celestial_frame_to_wcs(...)
"""
for mapping_set in FRAME_WCS_MAPPINGS:
for func in mapping_set:
wcs = func(frame, projection=projection)
if wcs is not None:
return wcs
raise ValueError("Could not determine WCS corresponding to the specified "
"coordinate frame.")
def proj_plane_pixel_scales(wcs):
"""
For a WCS returns pixel scales along each axis of the image pixel at
the ``CRPIX`` location once it is projected onto the
"plane of intermediate world coordinates" as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <http://adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
.. note::
This function is concerned **only** about the transformation
"image plane"->"projection plane" and **not** about the
transformation "celestial sphere"->"projection plane"->"image plane".
Therefore, this function ignores distortions arising due to
non-linear nature of most projections.
.. note::
In order to compute the scales corresponding to celestial axes only,
make sure that the input `~astropy.wcs.WCS` object contains
celestial axes only, e.g., by passing in the
`~astropy.wcs.WCS.celestial` WCS object.
Parameters
----------
wcs : `~astropy.wcs.WCS`
A world coordinate system object.
Returns
-------
scale : `~numpy.ndarray`
A vector (`~numpy.ndarray`) of projection plane increments
corresponding to each pixel side (axis). The units of the returned
results are the same as the units of `~astropy.wcs.Wcsprm.cdelt`,
`~astropy.wcs.Wcsprm.crval`, and `~astropy.wcs.Wcsprm.cd` for
the celestial WCS and can be obtained by inquiring the value
of `~astropy.wcs.Wcsprm.cunit` property of the input
`~astropy.wcs.WCS` WCS object.
See Also
--------
astropy.wcs.utils.proj_plane_pixel_area
"""
return np.sqrt((wcs.pixel_scale_matrix**2).sum(axis=0, dtype=float))
def proj_plane_pixel_area(wcs):
"""
For a **celestial** WCS (see `astropy.wcs.WCS.celestial`) returns pixel
area of the image pixel at the ``CRPIX`` location once it is projected
onto the "plane of intermediate world coordinates" as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <http://adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
.. note::
This function is concerned **only** about the transformation
"image plane"->"projection plane" and **not** about the
transformation "celestial sphere"->"projection plane"->"image plane".
Therefore, this function ignores distortions arising due to
non-linear nature of most projections.
.. note::
In order to compute the area of pixels corresponding to celestial
axes only, this function uses the `~astropy.wcs.WCS.celestial` WCS
object of the input ``wcs``. This is different from the
`~astropy.wcs.utils.proj_plane_pixel_scales` function
that computes the scales for the axes of the input WCS itself.
Parameters
----------
wcs : `~astropy.wcs.WCS`
A world coordinate system object.
Returns
-------
area : float
Area (in the projection plane) of the pixel at ``CRPIX`` location.
The units of the returned result are the same as the units of
the `~astropy.wcs.Wcsprm.cdelt`, `~astropy.wcs.Wcsprm.crval`,
and `~astropy.wcs.Wcsprm.cd` for the celestial WCS and can be
obtained by inquiring the value of `~astropy.wcs.Wcsprm.cunit`
property of the `~astropy.wcs.WCS.celestial` WCS object.
Raises
------
ValueError
Pixel area is defined only for 2D pixels. Most likely the
`~astropy.wcs.Wcsprm.cd` matrix of the `~astropy.wcs.WCS.celestial`
WCS is not a square matrix of second order.
Notes
-----
Depending on the application, square root of the pixel area can be used to
represent a single pixel scale of an equivalent square pixel
whose area is equal to the area of a generally non-square pixel.
See Also
--------
astropy.wcs.utils.proj_plane_pixel_scales
"""
psm = wcs.celestial.pixel_scale_matrix
if psm.shape != (2, 2):
raise ValueError("Pixel area is defined only for 2D pixels.")
return np.abs(np.linalg.det(psm))
def is_proj_plane_distorted(wcs, maxerr=1.0e-5):
r"""
For a WCS returns `False` if square image (detector) pixels stay square
when projected onto the "plane of intermediate world coordinates"
as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <http://adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
It will return `True` if transformation from image (detector) coordinates
to the focal plane coordinates is non-orthogonal or if WCS contains
non-linear (e.g., SIP) distortions.
.. note::
Since this function is concerned **only** about the transformation
"image plane"->"focal plane" and **not** about the transformation
"celestial sphere"->"focal plane"->"image plane",
this function ignores distortions arising due to non-linear nature
of most projections.
Let's denote by *C* either the original or the reconstructed
(from ``PC`` and ``CDELT``) CD matrix. `is_proj_plane_distorted`
verifies that the transformation from image (detector) coordinates
to the focal plane coordinates is orthogonal using the following
check:
.. math::
\left \| \frac{C \cdot C^{\mathrm{T}}}
{| det(C)|} - I \right \|_{\mathrm{max}} < \epsilon .
Parameters
----------
wcs : `~astropy.wcs.WCS`
World coordinate system object
maxerr : float, optional
Accuracy to which the CD matrix, **normalized** such
that :math:`|det(CD)|=1`, should be close to being an
orthogonal matrix as described in the above equation
(see :math:`\epsilon`).
Returns
-------
distorted : bool
Returns `True` if focal (projection) plane is distorted and `False`
otherwise.
"""
cwcs = wcs.celestial
return (not _is_cd_orthogonal(cwcs.pixel_scale_matrix, maxerr) or
_has_distortion(cwcs))
def _is_cd_orthogonal(cd, maxerr):
shape = cd.shape
if not (len(shape) == 2 and shape[0] == shape[1]):
raise ValueError("CD (or PC) matrix must be a 2D square matrix.")
pixarea = np.abs(np.linalg.det(cd))
if (pixarea == 0.0):
raise ValueError("CD (or PC) matrix is singular.")
# NOTE: Technically, below we should use np.dot(cd, np.conjugate(cd.T))
# However, I am not aware of complex CD/PC matrices...
I = np.dot(cd, cd.T) / pixarea
cd_unitary_err = np.amax(np.abs(I - np.eye(shape[0])))
return (cd_unitary_err < maxerr)
def non_celestial_pixel_scales(inwcs):
"""
Calculate the pixel scale along each axis of a non-celestial WCS,
for example one with mixed spectral and spatial axes.
Parameters
----------
inwcs : `~astropy.wcs.WCS`
The world coordinate system object.
Returns
-------
scale : `numpy.ndarray`
The pixel scale along each axis.
"""
if inwcs.is_celestial:
raise ValueError("WCS is celestial, use celestial_pixel_scales instead")
pccd = inwcs.pixel_scale_matrix
if np.allclose(np.extract(1-np.eye(*pccd.shape), pccd), 0):
return np.abs(np.diagonal(pccd))*u.deg
else:
raise ValueError("WCS is rotated, cannot determine consistent pixel scales")
def _has_distortion(wcs):
"""
`True` if contains any SIP or image distortion components.
"""
return any(getattr(wcs, dist_attr) is not None
for dist_attr in ['cpdis1', 'cpdis2', 'det2im1', 'det2im2', 'sip'])
# TODO: in future, we should think about how the following two functions can be
# integrated better into the WCS class.
def skycoord_to_pixel(coords, wcs, origin=0, mode='all'):
"""
Convert a set of SkyCoord coordinates into pixels.
Parameters
----------
coords : `~astropy.coordinates.SkyCoord`
The coordinates to convert.
wcs : `~astropy.wcs.WCS`
The WCS transformation to use.
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
Returns
-------
xp, yp : `numpy.ndarray`
The pixel coordinates
See Also
--------
astropy.coordinates.SkyCoord.from_pixel
"""
if _has_distortion(wcs) and wcs.naxis != 2:
raise ValueError("Can only handle WCS with distortions for 2-dimensional WCS")
# Keep only the celestial part of the axes, also re-orders lon/lat
wcs = wcs.sub([WCSSUB_LONGITUDE, WCSSUB_LATITUDE])
if wcs.naxis != 2:
raise ValueError("WCS should contain celestial component")
# Check which frame the WCS uses
frame = wcs_to_celestial_frame(wcs)
# Check what unit the WCS needs
xw_unit = u.Unit(wcs.wcs.cunit[0])
yw_unit = u.Unit(wcs.wcs.cunit[1])
# Convert positions to frame
coords = coords.transform_to(frame)
# Extract longitude and latitude. We first try and use lon/lat directly,
# but if the representation is not spherical or unit spherical this will
# fail. We should then force the use of the unit spherical
# representation. We don't do that directly to make sure that we preserve
# custom lon/lat representations if available.
try:
lon = coords.data.lon.to(xw_unit)
lat = coords.data.lat.to(yw_unit)
except AttributeError:
lon = coords.spherical.lon.to(xw_unit)
lat = coords.spherical.lat.to(yw_unit)
# Convert to pixel coordinates
if mode == 'all':
xp, yp = wcs.all_world2pix(lon.value, lat.value, origin)
elif mode == 'wcs':
xp, yp = wcs.wcs_world2pix(lon.value, lat.value, origin)
else:
raise ValueError("mode should be either 'all' or 'wcs'")
return xp, yp
def pixel_to_skycoord(xp, yp, wcs, origin=0, mode='all', cls=None):
"""
Convert a set of pixel coordinates into a `~astropy.coordinates.SkyCoord`
coordinate.
Parameters
----------
xp, yp : float or `numpy.ndarray`
The coordinates to convert.
wcs : `~astropy.wcs.WCS`
The WCS transformation to use.
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
cls : class or None
The class of object to create. Should be a
`~astropy.coordinates.SkyCoord` subclass. If None, defaults to
`~astropy.coordinates.SkyCoord`.
Returns
-------
coords : Whatever ``cls`` is (a subclass of `~astropy.coordinates.SkyCoord`)
The celestial coordinates
See Also
--------
astropy.coordinates.SkyCoord.from_pixel
"""
# Import astropy.coordinates here to avoid circular imports
from astropy.coordinates import SkyCoord, UnitSphericalRepresentation
# we have to do this instead of actually setting the default to SkyCoord
# because importing SkyCoord at the module-level leads to circular
# dependencies.
if cls is None:
cls = SkyCoord
if _has_distortion(wcs) and wcs.naxis != 2:
raise ValueError("Can only handle WCS with distortions for 2-dimensional WCS")
# Keep only the celestial part of the axes, also re-orders lon/lat
wcs = wcs.sub([WCSSUB_LONGITUDE, WCSSUB_LATITUDE])
if wcs.naxis != 2:
raise ValueError("WCS should contain celestial component")
# Check which frame the WCS uses
frame = wcs_to_celestial_frame(wcs)
# Check what unit the WCS gives
lon_unit = u.Unit(wcs.wcs.cunit[0])
lat_unit = u.Unit(wcs.wcs.cunit[1])
# Convert pixel coordinates to celestial coordinates
if mode == 'all':
lon, lat = wcs.all_pix2world(xp, yp, origin)
elif mode == 'wcs':
lon, lat = wcs.wcs_pix2world(xp, yp, origin)
else:
raise ValueError("mode should be either 'all' or 'wcs'")
# Add units to longitude/latitude
lon = lon * lon_unit
lat = lat * lat_unit
# Create a SkyCoord-like object
data = UnitSphericalRepresentation(lon=lon, lat=lat)
coords = cls(frame.realize_frame(data))
return coords
|
19af0e7595a81c662b20b91a3c50c8d7fd9c3366e59cb30d4d98ec81d62b3da0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Under the hood, there are 3 separate classes that perform different
parts of the transformation:
- `~astropy.wcs.Wcsprm`: Is a direct wrapper of the core WCS
functionality in `wcslib`_. (This includes TPV and TPD
polynomial distortion, but not SIP distortion).
- `~astropy.wcs.Sip`: Handles polynomial distortion as defined in the
`SIP`_ convention.
- `~astropy.wcs.DistortionLookupTable`: Handles `distortion paper`_
lookup tables.
Additionally, the class `WCS` aggregates all of these transformations
together in a pipeline:
- Detector to image plane correction (by a pair of
`~astropy.wcs.DistortionLookupTable` objects).
- `SIP`_ distortion correction (by an underlying `~astropy.wcs.Sip`
object)
- `distortion paper`_ table-lookup correction (by a pair of
`~astropy.wcs.DistortionLookupTable` objects).
- `wcslib`_ WCS transformation (by a `~astropy.wcs.Wcsprm` object)
"""
# STDLIB
import copy
import io
import itertools
import os
import re
import textwrap
import warnings
import builtins
# THIRD-PARTY
import numpy as np
# LOCAL
from astropy import log
from astropy.io import fits
from . import docstrings
from . import _wcs
from astropy.utils.compat import possible_filename
from astropy.utils.exceptions import AstropyWarning, AstropyUserWarning, AstropyDeprecationWarning
# Mix-in class that provides the APE 14 API
from .wcsapi.fitswcs import FITSWCSAPIMixin, SlicedFITSWCS
__all__ = ['FITSFixedWarning', 'WCS', 'find_all_wcs',
'DistortionLookupTable', 'Sip', 'Tabprm', 'Wcsprm',
'WCSBase', 'validate', 'WcsError', 'SingularMatrixError',
'InconsistentAxisTypesError', 'InvalidTransformError',
'InvalidCoordinateError', 'NoSolutionError',
'InvalidSubimageSpecificationError', 'NoConvergence',
'NonseparableSubimageCoordinateSystemError',
'NoWcsKeywordsFoundError', 'InvalidTabularParametersError']
__doctest_skip__ = ['WCS.all_world2pix']
NAXIS_DEPRECATE_MESSAGE = """
Private attributes "_naxis1" and "_naxis2" have been deprecated since v3.1.
Instead use the "pixel_shape" property which returns a list of NAXISj keyword values.
"""
if _wcs is not None:
_parsed_version = _wcs.__version__.split('.')
if int(_parsed_version[0]) == 5 and int(_parsed_version[1]) < 8:
raise ImportError(
"astropy.wcs is built with wcslib {0}, but only versions 5.8 and "
"later on the 5.x series are known to work. The version of wcslib "
"that ships with astropy may be used.")
if not _wcs._sanity_check():
raise RuntimeError(
"astropy.wcs did not pass its sanity check for your build "
"on your platform.")
WCSBase = _wcs._Wcs
DistortionLookupTable = _wcs.DistortionLookupTable
Sip = _wcs.Sip
Wcsprm = _wcs.Wcsprm
Tabprm = _wcs.Tabprm
WcsError = _wcs.WcsError
SingularMatrixError = _wcs.SingularMatrixError
InconsistentAxisTypesError = _wcs.InconsistentAxisTypesError
InvalidTransformError = _wcs.InvalidTransformError
InvalidCoordinateError = _wcs.InvalidCoordinateError
NoSolutionError = _wcs.NoSolutionError
InvalidSubimageSpecificationError = _wcs.InvalidSubimageSpecificationError
NonseparableSubimageCoordinateSystemError = _wcs.NonseparableSubimageCoordinateSystemError
NoWcsKeywordsFoundError = _wcs.NoWcsKeywordsFoundError
InvalidTabularParametersError = _wcs.InvalidTabularParametersError
# Copy all the constants from the C extension into this module's namespace
for key, val in _wcs.__dict__.items():
if key.startswith(('WCSSUB', 'WCSHDR', 'WCSHDO')):
locals()[key] = val
__all__.append(key)
else:
WCSBase = object
Wcsprm = object
DistortionLookupTable = object
Sip = object
Tabprm = object
WcsError = None
SingularMatrixError = None
InconsistentAxisTypesError = None
InvalidTransformError = None
InvalidCoordinateError = None
NoSolutionError = None
InvalidSubimageSpecificationError = None
NonseparableSubimageCoordinateSystemError = None
NoWcsKeywordsFoundError = None
InvalidTabularParametersError = None
# Additional relax bit flags
WCSHDO_SIP = 0x80000
# Regular expression defining SIP keyword It matches keyword that starts with A
# or B, optionally followed by P, followed by an underscore then a number in
# range of 0-19, followed by an underscore and another number in range of 0-19.
# Keyword optionally ends with a capital letter.
SIP_KW = re.compile('''^[AB]P?_1?[0-9]_1?[0-9][A-Z]?$''')
def _parse_keysel(keysel):
keysel_flags = 0
if keysel is not None:
for element in keysel:
if element.lower() == 'image':
keysel_flags |= _wcs.WCSHDR_IMGHEAD
elif element.lower() == 'binary':
keysel_flags |= _wcs.WCSHDR_BIMGARR
elif element.lower() == 'pixel':
keysel_flags |= _wcs.WCSHDR_PIXLIST
else:
raise ValueError(
"keysel must be a list of 'image', 'binary' " +
"and/or 'pixel'")
else:
keysel_flags = -1
return keysel_flags
class NoConvergence(Exception):
"""
An error class used to report non-convergence and/or divergence
of numerical methods. It is used to report errors in the
iterative solution used by
the :py:meth:`~astropy.wcs.WCS.all_world2pix`.
Attributes
----------
best_solution : `numpy.ndarray`
Best solution achieved by the numerical method.
accuracy : `numpy.ndarray`
Accuracy of the ``best_solution``.
niter : `int`
Number of iterations performed by the numerical method
to compute ``best_solution``.
divergent : None, `numpy.ndarray`
Indices of the points in ``best_solution`` array
for which the solution appears to be divergent. If the
solution does not diverge, ``divergent`` will be set to `None`.
slow_conv : None, `numpy.ndarray`
Indices of the solutions in ``best_solution`` array
for which the solution failed to converge within the
specified maximum number of iterations. If there are no
non-converging solutions (i.e., if the required accuracy
has been achieved for all input data points)
then ``slow_conv`` will be set to `None`.
"""
def __init__(self, *args, best_solution=None, accuracy=None, niter=None,
divergent=None, slow_conv=None, **kwargs):
super().__init__(*args)
self.best_solution = best_solution
self.accuracy = accuracy
self.niter = niter
self.divergent = divergent
self.slow_conv = slow_conv
if kwargs:
warnings.warn("Function received unexpected arguments ({}) these "
"are ignored but will raise an Exception in the "
"future.".format(list(kwargs)),
AstropyDeprecationWarning)
class FITSFixedWarning(AstropyWarning):
"""
The warning raised when the contents of the FITS header have been
modified to be standards compliant.
"""
pass
class WCS(FITSWCSAPIMixin, WCSBase):
"""WCS objects perform standard WCS transformations, and correct for
`SIP`_ and `distortion paper`_ table-lookup transformations, based
on the WCS keywords and supplementary data read from a FITS file.
See also: http://docs.astropy.org/en/stable/wcs/
Parameters
----------
header : astropy.io.fits header object, Primary HDU, Image HDU, string, dict-like, or None, optional
If *header* is not provided or None, the object will be
initialized to default values.
fobj : An astropy.io.fits file (hdulist) object, optional
It is needed when header keywords point to a `distortion
paper`_ lookup table stored in a different extension.
key : str, optional
The name of a particular WCS transform to use. This may be
either ``' '`` or ``'A'``-``'Z'`` and corresponds to the
``\"a\"`` part of the ``CTYPEia`` cards. *key* may only be
provided if *header* is also provided.
minerr : float, optional
The minimum value a distortion correction must have in order
to be applied. If the value of ``CQERRja`` is smaller than
*minerr*, the corresponding distortion is not applied.
relax : bool or int, optional
Degree of permissiveness:
- `True` (default): Admit all recognized informal extensions
of the WCS standard.
- `False`: Recognize only FITS keywords defined by the
published WCS standard.
- `int`: a bit field selecting specific extensions to accept.
See :ref:`relaxread` for details.
naxis : int or sequence, optional
Extracts specific coordinate axes using
:meth:`~astropy.wcs.Wcsprm.sub`. If a header is provided, and
*naxis* is not ``None``, *naxis* will be passed to
:meth:`~astropy.wcs.Wcsprm.sub` in order to select specific
axes from the header. See :meth:`~astropy.wcs.Wcsprm.sub` for
more details about this parameter.
keysel : sequence of flags, optional
A sequence of flags used to select the keyword types
considered by wcslib. When ``None``, only the standard image
header keywords are considered (and the underlying wcspih() C
function is called). To use binary table image array or pixel
list keywords, *keysel* must be set.
Each element in the list should be one of the following
strings:
- 'image': Image header keywords
- 'binary': Binary table image array keywords
- 'pixel': Pixel list keywords
Keywords such as ``EQUIna`` or ``RFRQna`` that are common to
binary table image arrays and pixel lists (including
``WCSNna`` and ``TWCSna``) are selected by both 'binary' and
'pixel'.
colsel : sequence of int, optional
A sequence of table column numbers used to restrict the WCS
transformations considered to only those pertaining to the
specified columns. If `None`, there is no restriction.
fix : bool, optional
When `True` (default), call `~astropy.wcs.Wcsprm.fix` on
the resulting object to fix any non-standard uses in the
header. `FITSFixedWarning` Warnings will be emitted if any
changes were made.
translate_units : str, optional
Specify which potentially unsafe translations of non-standard
unit strings to perform. By default, performs none. See
`WCS.fix` for more information about this parameter. Only
effective when ``fix`` is `True`.
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid key.
KeyError
Key not found in FITS header.
ValueError
Lookup table distortion present in the header but *fobj* was
not provided.
Notes
-----
1. astropy.wcs supports arbitrary *n* dimensions for the core WCS
(the transformations handled by WCSLIB). However, the
`distortion paper`_ lookup table and `SIP`_ distortions must be
two dimensional. Therefore, if you try to create a WCS object
where the core WCS has a different number of dimensions than 2
and that object also contains a `distortion paper`_ lookup
table or `SIP`_ distortion, a `ValueError`
exception will be raised. To avoid this, consider using the
*naxis* kwarg to select two dimensions from the core WCS.
2. The number of coordinate axes in the transformation is not
determined directly from the ``NAXIS`` keyword but instead from
the highest of:
- ``NAXIS`` keyword
- ``WCSAXESa`` keyword
- The highest axis number in any parameterized WCS keyword.
The keyvalue, as well as the keyword, must be
syntactically valid otherwise it will not be considered.
If none of these keyword types is present, i.e. if the header
only contains auxiliary WCS keywords for a particular
coordinate representation, then no coordinate description is
constructed for it.
The number of axes, which is set as the ``naxis`` member, may
differ for different coordinate representations of the same
image.
3. When the header includes duplicate keywords, in most cases the
last encountered is used.
4. `~astropy.wcs.Wcsprm.set` is called immediately after
construction, so any invalid keywords or transformations will
be raised by the constructor, not when subsequently calling a
transformation method.
"""
def __init__(self, header=None, fobj=None, key=' ', minerr=0.0,
relax=True, naxis=None, keysel=None, colsel=None,
fix=True, translate_units='', _do_set=True):
close_fds = []
if header is None:
if naxis is None:
naxis = 2
wcsprm = _wcs.Wcsprm(header=None, key=key,
relax=relax, naxis=naxis)
self.naxis = wcsprm.naxis
# Set some reasonable defaults.
det2im = (None, None)
cpdis = (None, None)
sip = None
else:
keysel_flags = _parse_keysel(keysel)
if isinstance(header, (str, bytes)):
try:
is_path = (possible_filename(header) and
os.path.exists(header))
except (OSError, ValueError):
is_path = False
if is_path:
if fobj is not None:
raise ValueError(
"Can not provide both a FITS filename to "
"argument 1 and a FITS file object to argument 2")
fobj = fits.open(header)
close_fds.append(fobj)
header = fobj[0].header
elif isinstance(header, fits.hdu.image._ImageBaseHDU):
header = header.header
elif not isinstance(header, fits.Header):
try:
# Accept any dict-like object
orig_header = header
header = fits.Header()
for dict_key in orig_header.keys():
header[dict_key] = orig_header[dict_key]
except TypeError:
raise TypeError(
"header must be a string, an astropy.io.fits.Header "
"object, or a dict-like object")
if isinstance(header, fits.Header):
header_string = header.tostring().rstrip()
else:
header_string = header
# Importantly, header is a *copy* of the passed-in header
# because we will be modifying it
if isinstance(header_string, str):
header_bytes = header_string.encode('ascii')
header_string = header_string
else:
header_bytes = header_string
header_string = header_string.decode('ascii')
try:
tmp_header = fits.Header.fromstring(header_string)
self._remove_sip_kw(tmp_header)
tmp_header_bytes = tmp_header.tostring().rstrip()
if isinstance(tmp_header_bytes, str):
tmp_header_bytes = tmp_header_bytes.encode('ascii')
tmp_wcsprm = _wcs.Wcsprm(header=tmp_header_bytes, key=key,
relax=relax, keysel=keysel_flags,
colsel=colsel, warnings=False)
except _wcs.NoWcsKeywordsFoundError:
est_naxis = 0
else:
if naxis is not None:
try:
tmp_wcsprm.sub(naxis)
except ValueError:
pass
est_naxis = tmp_wcsprm.naxis
else:
est_naxis = 2
header = fits.Header.fromstring(header_string)
if est_naxis == 0:
est_naxis = 2
self.naxis = est_naxis
det2im = self._read_det2im_kw(header, fobj, err=minerr)
cpdis = self._read_distortion_kw(
header, fobj, dist='CPDIS', err=minerr)
sip = self._read_sip_kw(header, wcskey=key)
self._remove_sip_kw(header)
header_string = header.tostring()
header_string = header_string.replace('END' + ' ' * 77, '')
if isinstance(header_string, str):
header_bytes = header_string.encode('ascii')
header_string = header_string
else:
header_bytes = header_string
header_string = header_string.decode('ascii')
try:
wcsprm = _wcs.Wcsprm(header=header_bytes, key=key,
relax=relax, keysel=keysel_flags,
colsel=colsel)
except _wcs.NoWcsKeywordsFoundError:
# The header may have SIP or distortions, but no core
# WCS. That isn't an error -- we want a "default"
# (identity) core Wcs transformation in that case.
if colsel is None:
wcsprm = _wcs.Wcsprm(header=None, key=key,
relax=relax, keysel=keysel_flags,
colsel=colsel)
else:
raise
if naxis is not None:
wcsprm = wcsprm.sub(naxis)
self.naxis = wcsprm.naxis
if (wcsprm.naxis != 2 and
(det2im[0] or det2im[1] or cpdis[0] or cpdis[1] or sip)):
raise ValueError(
"""
FITS WCS distortion paper lookup tables and SIP distortions only work
in 2 dimensions. However, WCSLIB has detected {0} dimensions in the
core WCS keywords. To use core WCS in conjunction with FITS WCS
distortion paper lookup tables or SIP distortion, you must select or
reduce these to 2 dimensions using the naxis kwarg.
""".format(wcsprm.naxis))
header_naxis = header.get('NAXIS', None)
if header_naxis is not None and header_naxis < wcsprm.naxis:
warnings.warn(
"The WCS transformation has more axes ({0:d}) than the "
"image it is associated with ({1:d})".format(
wcsprm.naxis, header_naxis), FITSFixedWarning)
self._get_naxis(header)
WCSBase.__init__(self, sip, cpdis, wcsprm, det2im)
if fix:
self.fix(translate_units=translate_units)
if _do_set:
self.wcs.set()
for fd in close_fds:
fd.close()
self._pixel_bounds = None
def __copy__(self):
new_copy = self.__class__()
WCSBase.__init__(new_copy, self.sip,
(self.cpdis1, self.cpdis2),
self.wcs,
(self.det2im1, self.det2im2))
new_copy.__dict__.update(self.__dict__)
return new_copy
def __deepcopy__(self, memo):
from copy import deepcopy
new_copy = self.__class__()
new_copy.naxis = deepcopy(self.naxis, memo)
WCSBase.__init__(new_copy, deepcopy(self.sip, memo),
(deepcopy(self.cpdis1, memo),
deepcopy(self.cpdis2, memo)),
deepcopy(self.wcs, memo),
(deepcopy(self.det2im1, memo),
deepcopy(self.det2im2, memo)))
for key, val in self.__dict__.items():
new_copy.__dict__[key] = deepcopy(val, memo)
return new_copy
def copy(self):
"""
Return a shallow copy of the object.
Convenience method so user doesn't have to import the
:mod:`copy` stdlib module.
.. warning::
Use `deepcopy` instead of `copy` unless you know why you need a
shallow copy.
"""
return copy.copy(self)
def deepcopy(self):
"""
Return a deep copy of the object.
Convenience method so user doesn't have to import the
:mod:`copy` stdlib module.
"""
return copy.deepcopy(self)
def sub(self, axes=None):
copy = self.deepcopy()
copy.wcs = self.wcs.sub(axes)
copy.naxis = copy.wcs.naxis
return copy
if _wcs is not None:
sub.__doc__ = _wcs.Wcsprm.sub.__doc__
def _fix_scamp(self):
"""
Remove SCAMP's PVi_m distortion parameters if SIP distortion parameters
are also present. Some projects (e.g., Palomar Transient Factory)
convert SCAMP's distortion parameters (which abuse the PVi_m cards) to
SIP. However, wcslib gets confused by the presence of both SCAMP and
SIP distortion parameters.
See https://github.com/astropy/astropy/issues/299.
"""
# Nothing to be done if no WCS attached
if self.wcs is None:
return
# Nothing to be done if no PV parameters attached
pv = self.wcs.get_pv()
if not pv:
return
# Nothing to be done if axes don't use SIP distortion parameters
if self.sip is None:
return
# Nothing to be done if any radial terms are present...
# Loop over list to find any radial terms.
# Certain values of the `j' index are used for storing
# radial terms; refer to Equation (1) in
# <http://web.ipac.caltech.edu/staff/shupe/reprints/SIP_to_PV_SPIE2012.pdf>.
pv = np.asarray(pv)
# Loop over distinct values of `i' index
for i in set(pv[:, 0]):
# Get all values of `j' index for this value of `i' index
js = set(pv[:, 1][pv[:, 0] == i])
# Find max value of `j' index
max_j = max(js)
for j in (3, 11, 23, 39):
if j < max_j and j in js:
return
self.wcs.set_pv([])
warnings.warn("Removed redundant SCAMP distortion parameters " +
"because SIP parameters are also present", FITSFixedWarning)
def fix(self, translate_units='', naxis=None):
"""
Perform the fix operations from wcslib, and warn about any
changes it has made.
Parameters
----------
translate_units : str, optional
Specify which potentially unsafe translations of
non-standard unit strings to perform. By default,
performs none.
Although ``"S"`` is commonly used to represent seconds,
its translation to ``"s"`` is potentially unsafe since the
standard recognizes ``"S"`` formally as Siemens, however
rarely that may be used. The same applies to ``"H"`` for
hours (Henry), and ``"D"`` for days (Debye).
This string controls what to do in such cases, and is
case-insensitive.
- If the string contains ``"s"``, translate ``"S"`` to
``"s"``.
- If the string contains ``"h"``, translate ``"H"`` to
``"h"``.
- If the string contains ``"d"``, translate ``"D"`` to
``"d"``.
Thus ``''`` doesn't do any unsafe translations, whereas
``'shd'`` does all of them.
naxis : int array[naxis], optional
Image axis lengths. If this array is set to zero or
``None``, then `~astropy.wcs.Wcsprm.cylfix` will not be
invoked.
"""
if self.wcs is not None:
self._fix_scamp()
fixes = self.wcs.fix(translate_units, naxis)
for key, val in fixes.items():
if val != "No change":
warnings.warn(
("'{0}' made the change '{1}'.").
format(key, val),
FITSFixedWarning)
def calc_footprint(self, header=None, undistort=True, axes=None, center=True):
"""
Calculates the footprint of the image on the sky.
A footprint is defined as the positions of the corners of the
image on the sky after all available distortions have been
applied.
Parameters
----------
header : `~astropy.io.fits.Header` object, optional
Used to get ``NAXIS1`` and ``NAXIS2``
header and axes are mutually exclusive, alternative ways
to provide the same information.
undistort : bool, optional
If `True`, take SIP and distortion lookup table into
account
axes : length 2 sequence ints, optional
If provided, use the given sequence as the shape of the
image. Otherwise, use the ``NAXIS1`` and ``NAXIS2``
keywords from the header that was used to create this
`WCS` object.
center : bool, optional
If `True` use the center of the pixel, otherwise use the corner.
Returns
-------
coord : (4, 2) array of (*x*, *y*) coordinates.
The order is clockwise starting with the bottom left corner.
"""
if axes is not None:
naxis1, naxis2 = axes
else:
if header is None:
try:
# classes that inherit from WCS and define naxis1/2
# do not require a header parameter
naxis1, naxis2 = self.pixel_shape
except (AttributeError, TypeError):
warnings.warn("Need a valid header in order to calculate footprint\n", AstropyUserWarning)
return None
else:
naxis1 = header.get('NAXIS1', None)
naxis2 = header.get('NAXIS2', None)
if naxis1 is None or naxis2 is None:
raise ValueError(
"Image size could not be determined.")
if center:
corners = np.array([[1, 1],
[1, naxis2],
[naxis1, naxis2],
[naxis1, 1]], dtype=np.float64)
else:
corners = np.array([[0.5, 0.5],
[0.5, naxis2 + 0.5],
[naxis1 + 0.5, naxis2 + 0.5],
[naxis1 + 0.5, 0.5]], dtype=np.float64)
if undistort:
return self.all_pix2world(corners, 1)
else:
return self.wcs_pix2world(corners, 1)
def _read_det2im_kw(self, header, fobj, err=0.0):
"""
Create a `distortion paper`_ type lookup table for detector to
image plane correction.
"""
if fobj is None:
return (None, None)
if not isinstance(fobj, fits.HDUList):
return (None, None)
try:
axiscorr = header[str('AXISCORR')]
d2imdis = self._read_d2im_old_format(header, fobj, axiscorr)
return d2imdis
except KeyError:
pass
dist = 'D2IMDIS'
d_kw = 'D2IM'
err_kw = 'D2IMERR'
tables = {}
for i in range(1, self.naxis + 1):
d_error = header.get(err_kw + str(i), 0.0)
if d_error < err:
tables[i] = None
continue
distortion = dist + str(i)
if distortion in header:
dis = header[distortion].lower()
if dis == 'lookup':
del header[distortion]
assert isinstance(fobj, fits.HDUList), ('An astropy.io.fits.HDUList'
'is required for Lookup table distortion.')
dp = (d_kw + str(i)).strip()
dp_extver_key = dp + str('.EXTVER')
if dp_extver_key in header:
d_extver = header[dp_extver_key]
del header[dp_extver_key]
else:
d_extver = 1
dp_axis_key = dp + str('.AXIS.{0:d}').format(i)
if i == header[dp_axis_key]:
d_data = fobj[str('D2IMARR'), d_extver].data
else:
d_data = (fobj[str('D2IMARR'), d_extver].data).transpose()
del header[dp_axis_key]
d_header = fobj[str('D2IMARR'), d_extver].header
d_crpix = (d_header.get(str('CRPIX1'), 0.0), d_header.get(str('CRPIX2'), 0.0))
d_crval = (d_header.get(str('CRVAL1'), 0.0), d_header.get(str('CRVAL2'), 0.0))
d_cdelt = (d_header.get(str('CDELT1'), 1.0), d_header.get(str('CDELT2'), 1.0))
d_lookup = DistortionLookupTable(d_data, d_crpix,
d_crval, d_cdelt)
tables[i] = d_lookup
else:
warnings.warn('Polynomial distortion is not implemented.\n', AstropyUserWarning)
for key in list(header):
if key.startswith(dp + str('.')):
del header[key]
else:
tables[i] = None
if not tables:
return (None, None)
else:
return (tables.get(1), tables.get(2))
def _read_d2im_old_format(self, header, fobj, axiscorr):
warnings.warn("The use of ``AXISCORR`` for D2IM correction has been deprecated."
"`~astropy.wcs` will read in files with ``AXISCORR`` but ``to_fits()`` will write "
"out files without it.",
AstropyDeprecationWarning)
cpdis = [None, None]
crpix = [0., 0.]
crval = [0., 0.]
cdelt = [1., 1.]
try:
d2im_data = fobj[(str('D2IMARR'), 1)].data
except KeyError:
return (None, None)
except AttributeError:
return (None, None)
d2im_data = np.array([d2im_data])
d2im_hdr = fobj[(str('D2IMARR'), 1)].header
naxis = d2im_hdr[str('NAXIS')]
for i in range(1, naxis + 1):
crpix[i - 1] = d2im_hdr.get(str('CRPIX') + str(i), 0.0)
crval[i - 1] = d2im_hdr.get(str('CRVAL') + str(i), 0.0)
cdelt[i - 1] = d2im_hdr.get(str('CDELT') + str(i), 1.0)
cpdis = DistortionLookupTable(d2im_data, crpix, crval, cdelt)
if axiscorr == 1:
return (cpdis, None)
elif axiscorr == 2:
return (None, cpdis)
else:
warnings.warn("Expected AXISCORR to be 1 or 2", AstropyUserWarning)
return (None, None)
def _write_det2im(self, hdulist):
"""
Writes a `distortion paper`_ type lookup table to the given
`astropy.io.fits.HDUList`.
"""
if self.det2im1 is None and self.det2im2 is None:
return
dist = 'D2IMDIS'
d_kw = 'D2IM'
err_kw = 'D2IMERR'
def write_d2i(num, det2im):
if det2im is None:
return
str('{0}{1:d}').format(dist, num),
hdulist[0].header[str('{0}{1:d}').format(dist, num)] = (
'LOOKUP', 'Detector to image correction type')
hdulist[0].header[str('{0}{1:d}.EXTVER').format(d_kw, num)] = (
num, 'Version number of WCSDVARR extension')
hdulist[0].header[str('{0}{1:d}.NAXES').format(d_kw, num)] = (
len(det2im.data.shape), 'Number of independent variables in d2im function')
for i in range(det2im.data.ndim):
hdulist[0].header[str('{0}{1:d}.AXIS.{2:d}').format(d_kw, num, i + 1)] = (
i + 1, 'Axis number of the jth independent variable in a d2im function')
image = fits.ImageHDU(det2im.data, name=str('D2IMARR'))
header = image.header
header[str('CRPIX1')] = (det2im.crpix[0],
'Coordinate system reference pixel')
header[str('CRPIX2')] = (det2im.crpix[1],
'Coordinate system reference pixel')
header[str('CRVAL1')] = (det2im.crval[0],
'Coordinate system value at reference pixel')
header[str('CRVAL2')] = (det2im.crval[1],
'Coordinate system value at reference pixel')
header[str('CDELT1')] = (det2im.cdelt[0],
'Coordinate increment along axis')
header[str('CDELT2')] = (det2im.cdelt[1],
'Coordinate increment along axis')
image.ver = int(hdulist[0].header[str('{0}{1:d}.EXTVER').format(d_kw, num)])
hdulist.append(image)
write_d2i(1, self.det2im1)
write_d2i(2, self.det2im2)
def _read_distortion_kw(self, header, fobj, dist='CPDIS', err=0.0):
"""
Reads `distortion paper`_ table-lookup keywords and data, and
returns a 2-tuple of `~astropy.wcs.DistortionLookupTable`
objects.
If no `distortion paper`_ keywords are found, ``(None, None)``
is returned.
"""
if isinstance(header, (str, bytes)):
return (None, None)
if dist == 'CPDIS':
d_kw = str('DP')
err_kw = str('CPERR')
else:
d_kw = str('DQ')
err_kw = str('CQERR')
tables = {}
for i in range(1, self.naxis + 1):
d_error_key = err_kw + str(i)
if d_error_key in header:
d_error = header[d_error_key]
del header[d_error_key]
else:
d_error = 0.0
if d_error < err:
tables[i] = None
continue
distortion = dist + str(i)
if distortion in header:
dis = header[distortion].lower()
del header[distortion]
if dis == 'lookup':
if not isinstance(fobj, fits.HDUList):
raise ValueError('an astropy.io.fits.HDUList is '
'required for Lookup table distortion.')
dp = (d_kw + str(i)).strip()
dp_extver_key = dp + str('.EXTVER')
if dp_extver_key in header:
d_extver = header[dp_extver_key]
del header[dp_extver_key]
else:
d_extver = 1
dp_axis_key = dp + str('.AXIS.{0:d}'.format(i))
if i == header[dp_axis_key]:
d_data = fobj[str('WCSDVARR'), d_extver].data
else:
d_data = (fobj[str('WCSDVARR'), d_extver].data).transpose()
del header[dp_axis_key]
d_header = fobj[str('WCSDVARR'), d_extver].header
d_crpix = (d_header.get(str('CRPIX1'), 0.0),
d_header.get(str('CRPIX2'), 0.0))
d_crval = (d_header.get(str('CRVAL1'), 0.0),
d_header.get(str('CRVAL2'), 0.0))
d_cdelt = (d_header.get(str('CDELT1'), 1.0),
d_header.get(str('CDELT2'), 1.0))
d_lookup = DistortionLookupTable(d_data, d_crpix, d_crval, d_cdelt)
tables[i] = d_lookup
for key in list(header):
if key.startswith(dp + str('.')):
del header[key]
else:
warnings.warn('Polynomial distortion is not implemented.\n', AstropyUserWarning)
else:
tables[i] = None
if not tables:
return (None, None)
else:
return (tables.get(1), tables.get(2))
def _write_distortion_kw(self, hdulist, dist='CPDIS'):
"""
Write out `distortion paper`_ keywords to the given
`fits.HDUList`.
"""
if self.cpdis1 is None and self.cpdis2 is None:
return
if dist == 'CPDIS':
d_kw = str('DP')
err_kw = str('CPERR')
else:
d_kw = str('DQ')
err_kw = str('CQERR')
def write_dist(num, cpdis):
if cpdis is None:
return
hdulist[0].header[str('{0}{1:d}').format(dist, num)] = (
'LOOKUP', 'Prior distortion function type')
hdulist[0].header[str('{0}{1:d}.EXTVER').format(d_kw, num)] = (
num, 'Version number of WCSDVARR extension')
hdulist[0].header[str('{0}{1:d}.NAXES').format(d_kw, num)] = (
len(cpdis.data.shape), 'Number of independent variables in distortion function')
for i in range(cpdis.data.ndim):
hdulist[0].header[str('{0}{1:d}.AXIS.{2:d}').format(d_kw, num, i + 1)] = (
i + 1,
'Axis number of the jth independent variable in a distortion function')
image = fits.ImageHDU(cpdis.data, name=str('WCSDVARR'))
header = image.header
header[str('CRPIX1')] = (cpdis.crpix[0], 'Coordinate system reference pixel')
header[str('CRPIX2')] = (cpdis.crpix[1], 'Coordinate system reference pixel')
header[str('CRVAL1')] = (cpdis.crval[0], 'Coordinate system value at reference pixel')
header[str('CRVAL2')] = (cpdis.crval[1], 'Coordinate system value at reference pixel')
header[str('CDELT1')] = (cpdis.cdelt[0], 'Coordinate increment along axis')
header[str('CDELT2')] = (cpdis.cdelt[1], 'Coordinate increment along axis')
image.ver = int(hdulist[0].header[str('{0}{1:d}.EXTVER').format(d_kw, num)])
hdulist.append(image)
write_dist(1, self.cpdis1)
write_dist(2, self.cpdis2)
def _remove_sip_kw(self, header):
"""
Remove SIP information from a header.
"""
# Never pass SIP coefficients to wcslib
# CTYPE must be passed with -SIP to wcslib
for key in (m.group() for m in map(SIP_KW.match, list(header))
if m is not None):
del header[key]
def _read_sip_kw(self, header, wcskey=""):
"""
Reads `SIP`_ header keywords and returns a `~astropy.wcs.Sip`
object.
If no `SIP`_ header keywords are found, ``None`` is returned.
"""
if isinstance(header, (str, bytes)):
# TODO: Parse SIP from a string without pyfits around
return None
if str("A_ORDER") in header and header[str('A_ORDER')] > 1:
if str("B_ORDER") not in header:
raise ValueError(
"A_ORDER provided without corresponding B_ORDER "
"keyword for SIP distortion")
m = int(header[str("A_ORDER")])
a = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = str("A_{0}_{1}").format(i, j)
if key in header:
a[i, j] = header[key]
del header[key]
m = int(header[str("B_ORDER")])
if m > 1:
b = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = str("B_{0}_{1}").format(i, j)
if key in header:
b[i, j] = header[key]
del header[key]
else:
a = None
b = None
del header[str('A_ORDER')]
del header[str('B_ORDER')]
ctype = [header['CTYPE{0}{1}'.format(nax, wcskey)] for nax in range(1, self.naxis + 1)]
if any(not ctyp.endswith('-SIP') for ctyp in ctype):
message = """
Inconsistent SIP distortion information is present in the FITS header and the WCS object:
SIP coefficients were detected, but CTYPE is missing a "-SIP" suffix.
astropy.wcs is using the SIP distortion coefficients,
therefore the coordinates calculated here might be incorrect.
If you do not want to apply the SIP distortion coefficients,
please remove the SIP coefficients from the FITS header or the
WCS object. As an example, if the image is already distortion-corrected
(e.g., drizzled) then distortion components should not apply and the SIP
coefficients should be removed.
While the SIP distortion coefficients are being applied here, if that was indeed the intent,
for consistency please append "-SIP" to the CTYPE in the FITS header or the WCS object.
"""
log.info(message)
elif str("B_ORDER") in header and header[str('B_ORDER')] > 1:
raise ValueError(
"B_ORDER provided without corresponding A_ORDER " +
"keyword for SIP distortion")
else:
a = None
b = None
if str("AP_ORDER") in header and header[str('AP_ORDER')] > 1:
if str("BP_ORDER") not in header:
raise ValueError(
"AP_ORDER provided without corresponding BP_ORDER "
"keyword for SIP distortion")
m = int(header[str("AP_ORDER")])
ap = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = str("AP_{0}_{1}").format(i, j)
if key in header:
ap[i, j] = header[key]
del header[key]
m = int(header[str("BP_ORDER")])
if m > 1:
bp = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = str("BP_{0}_{1}").format(i, j)
if key in header:
bp[i, j] = header[key]
del header[key]
else:
ap = None
bp = None
del header[str('AP_ORDER')]
del header[str('BP_ORDER')]
elif str("BP_ORDER") in header and header[str('BP_ORDER')] > 1:
raise ValueError(
"BP_ORDER provided without corresponding AP_ORDER "
"keyword for SIP distortion")
else:
ap = None
bp = None
if a is None and b is None and ap is None and bp is None:
return None
if str("CRPIX1{0}".format(wcskey)) not in header or str("CRPIX2{0}".format(wcskey)) not in header:
raise ValueError(
"Header has SIP keywords without CRPIX keywords")
crpix1 = header.get("CRPIX1{0}".format(wcskey))
crpix2 = header.get("CRPIX2{0}".format(wcskey))
return Sip(a, b, ap, bp, (crpix1, crpix2))
def _write_sip_kw(self):
"""
Write out SIP keywords. Returns a dictionary of key-value
pairs.
"""
if self.sip is None:
return {}
keywords = {}
def write_array(name, a):
if a is None:
return
size = a.shape[0]
keywords[str('{0}_ORDER').format(name)] = size - 1
for i in range(size):
for j in range(size - i):
if a[i, j] != 0.0:
keywords[
str('{0}_{1:d}_{2:d}').format(name, i, j)] = a[i, j]
write_array(str('A'), self.sip.a)
write_array(str('B'), self.sip.b)
write_array(str('AP'), self.sip.ap)
write_array(str('BP'), self.sip.bp)
return keywords
def _denormalize_sky(self, sky):
if self.wcs.lngtyp != 'RA':
raise ValueError(
"WCS does not have longitude type of 'RA', therefore " +
"(ra, dec) data can not be used as input")
if self.wcs.lattyp != 'DEC':
raise ValueError(
"WCS does not have longitude type of 'DEC', therefore " +
"(ra, dec) data can not be used as input")
if self.wcs.naxis == 2:
if self.wcs.lng == 0 and self.wcs.lat == 1:
return sky
elif self.wcs.lng == 1 and self.wcs.lat == 0:
# Reverse the order of the columns
return sky[:, ::-1]
else:
raise ValueError(
"WCS does not have longitude and latitude celestial " +
"axes, therefore (ra, dec) data can not be used as input")
else:
if self.wcs.lng < 0 or self.wcs.lat < 0:
raise ValueError(
"WCS does not have both longitude and latitude "
"celestial axes, therefore (ra, dec) data can not be " +
"used as input")
out = np.zeros((sky.shape[0], self.wcs.naxis))
out[:, self.wcs.lng] = sky[:, 0]
out[:, self.wcs.lat] = sky[:, 1]
return out
def _normalize_sky(self, sky):
if self.wcs.lngtyp != 'RA':
raise ValueError(
"WCS does not have longitude type of 'RA', therefore " +
"(ra, dec) data can not be returned")
if self.wcs.lattyp != 'DEC':
raise ValueError(
"WCS does not have longitude type of 'DEC', therefore " +
"(ra, dec) data can not be returned")
if self.wcs.naxis == 2:
if self.wcs.lng == 0 and self.wcs.lat == 1:
return sky
elif self.wcs.lng == 1 and self.wcs.lat == 0:
# Reverse the order of the columns
return sky[:, ::-1]
else:
raise ValueError(
"WCS does not have longitude and latitude celestial "
"axes, therefore (ra, dec) data can not be returned")
else:
if self.wcs.lng < 0 or self.wcs.lat < 0:
raise ValueError(
"WCS does not have both longitude and latitude celestial "
"axes, therefore (ra, dec) data can not be returned")
out = np.empty((sky.shape[0], 2))
out[:, 0] = sky[:, self.wcs.lng]
out[:, 1] = sky[:, self.wcs.lat]
return out
def _array_converter(self, func, sky, *args, ra_dec_order=False):
"""
A helper function to support reading either a pair of arrays
or a single Nx2 array.
"""
def _return_list_of_arrays(axes, origin):
if any([x.size == 0 for x in axes]):
return axes
try:
axes = np.broadcast_arrays(*axes)
except ValueError:
raise ValueError(
"Coordinate arrays are not broadcastable to each other")
xy = np.hstack([x.reshape((x.size, 1)) for x in axes])
if ra_dec_order and sky == 'input':
xy = self._denormalize_sky(xy)
output = func(xy, origin)
if ra_dec_order and sky == 'output':
output = self._normalize_sky(output)
return (output[:, 0].reshape(axes[0].shape),
output[:, 1].reshape(axes[0].shape))
return [output[:, i].reshape(axes[0].shape)
for i in range(output.shape[1])]
def _return_single_array(xy, origin):
if xy.shape[-1] != self.naxis:
raise ValueError(
"When providing two arguments, the array must be "
"of shape (N, {0})".format(self.naxis))
if 0 in xy.shape:
return xy
if ra_dec_order and sky == 'input':
xy = self._denormalize_sky(xy)
result = func(xy, origin)
if ra_dec_order and sky == 'output':
result = self._normalize_sky(result)
return result
if len(args) == 2:
try:
xy, origin = args
xy = np.asarray(xy)
origin = int(origin)
except Exception:
raise TypeError(
"When providing two arguments, they must be "
"(coords[N][{0}], origin)".format(self.naxis))
if xy.shape == () or len(xy.shape) == 1:
return _return_list_of_arrays([xy], origin)
return _return_single_array(xy, origin)
elif len(args) == self.naxis + 1:
axes = args[:-1]
origin = args[-1]
try:
axes = [np.asarray(x) for x in axes]
origin = int(origin)
except Exception:
raise TypeError(
"When providing more than two arguments, they must be " +
"a 1-D array for each axis, followed by an origin.")
return _return_list_of_arrays(axes, origin)
raise TypeError(
"WCS projection has {0} dimensions, so expected 2 (an Nx{0} array "
"and the origin argument) or {1} arguments (the position in each "
"dimension, and the origin argument). Instead, {2} arguments were "
"given.".format(
self.naxis, self.naxis + 1, len(args)))
def all_pix2world(self, *args, **kwargs):
return self._array_converter(
self._all_pix2world, 'output', *args, **kwargs)
all_pix2world.__doc__ = """
Transforms pixel coordinates to world coordinates.
Performs all of the following in series:
- Detector to image plane correction (if present in the
FITS file)
- `SIP`_ distortion correction (if present in the FITS
file)
- `distortion paper`_ table-lookup correction (if present
in the FITS file)
- `wcslib`_ "core" WCS transformation
Parameters
----------
{0}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{1}
Returns
-------
{2}
Notes
-----
The order of the axes for the result is determined by the
``CTYPEia`` keywords in the FITS header, therefore it may not
always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp` and `~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
""".format(docstrings.TWO_OR_MORE_ARGS('naxis', 8),
docstrings.RA_DEC_ORDER(8),
docstrings.RETURNS('sky coordinates, in degrees', 8))
def wcs_pix2world(self, *args, **kwargs):
if self.wcs is None:
raise ValueError("No basic WCS settings were created.")
return self._array_converter(
lambda xy, o: self.wcs.p2s(xy, o)['world'],
'output', *args, **kwargs)
wcs_pix2world.__doc__ = """
Transforms pixel coordinates to world coordinates by doing
only the basic `wcslib`_ transformation.
No `SIP`_ or `distortion paper`_ table lookup correction is
applied. To perform distortion correction, see
`~astropy.wcs.WCS.all_pix2world`,
`~astropy.wcs.WCS.sip_pix2foc`, `~astropy.wcs.WCS.p4_pix2foc`,
or `~astropy.wcs.WCS.pix2foc`.
Parameters
----------
{0}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{1}
Returns
-------
{2}
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
Notes
-----
The order of the axes for the result is determined by the
``CTYPEia`` keywords in the FITS header, therefore it may not
always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp` and `~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
""".format(docstrings.TWO_OR_MORE_ARGS('naxis', 8),
docstrings.RA_DEC_ORDER(8),
docstrings.RETURNS('world coordinates, in degrees', 8))
def _all_world2pix(self, world, origin, tolerance, maxiter, adaptive,
detect_divergence, quiet):
# ############################################################
# # DESCRIPTION OF THE NUMERICAL METHOD ##
# ############################################################
# In this section I will outline the method of solving
# the inverse problem of converting world coordinates to
# pixel coordinates (*inverse* of the direct transformation
# `all_pix2world`) and I will summarize some of the aspects
# of the method proposed here and some of the issues of the
# original `all_world2pix` (in relation to this method)
# discussed in https://github.com/astropy/astropy/issues/1977
# A more detailed discussion can be found here:
# https://github.com/astropy/astropy/pull/2373
#
#
# ### Background ###
#
#
# I will refer here to the [SIP Paper]
# (http://fits.gsfc.nasa.gov/registry/sip/SIP_distortion_v1_0.pdf).
# According to this paper, the effect of distortions as
# described in *their* equation (1) is:
#
# (1) x = CD*(u+f(u)),
#
# where `x` is a *vector* of "intermediate spherical
# coordinates" (equivalent to (x,y) in the paper) and `u`
# is a *vector* of "pixel coordinates", and `f` is a vector
# function describing geometrical distortions
# (see equations 2 and 3 in SIP Paper.
# However, I prefer to use `w` for "intermediate world
# coordinates", `x` for pixel coordinates, and assume that
# transformation `W` performs the **linear**
# (CD matrix + projection onto celestial sphere) part of the
# conversion from pixel coordinates to world coordinates.
# Then we can re-write (1) as:
#
# (2) w = W*(x+f(x)) = T(x)
#
# In `astropy.wcs.WCS` transformation `W` is represented by
# the `wcs_pix2world` member, while the combined ("total")
# transformation (linear part + distortions) is performed by
# `all_pix2world`. Below I summarize the notations and their
# equivalents in `astropy.wcs.WCS`:
#
# | Equation term | astropy.WCS/meaning |
# | ------------- | ---------------------------- |
# | `x` | pixel coordinates |
# | `w` | world coordinates |
# | `W` | `wcs_pix2world()` |
# | `W^{-1}` | `wcs_world2pix()` |
# | `T` | `all_pix2world()` |
# | `x+f(x)` | `pix2foc()` |
#
#
# ### Direct Solving of Equation (2) ###
#
#
# In order to find the pixel coordinates that correspond to
# given world coordinates `w`, it is necessary to invert
# equation (2): `x=T^{-1}(w)`, or solve equation `w==T(x)`
# for `x`. However, this approach has the following
# disadvantages:
# 1. It requires unnecessary transformations (see next
# section).
# 2. It is prone to "RA wrapping" issues as described in
# https://github.com/astropy/astropy/issues/1977
# (essentially because `all_pix2world` may return points with
# a different phase than user's input `w`).
#
#
# ### Description of the Method Used here ###
#
#
# By applying inverse linear WCS transformation (`W^{-1}`)
# to both sides of equation (2) and introducing notation `x'`
# (prime) for the pixels coordinates obtained from the world
# coordinates by applying inverse *linear* WCS transformation
# ("focal plane coordinates"):
#
# (3) x' = W^{-1}(w)
#
# we obtain the following equation:
#
# (4) x' = x+f(x),
#
# or,
#
# (5) x = x'-f(x)
#
# This equation is well suited for solving using the method
# of fixed-point iterations
# (http://en.wikipedia.org/wiki/Fixed-point_iteration):
#
# (6) x_{i+1} = x'-f(x_i)
#
# As an initial value of the pixel coordinate `x_0` we take
# "focal plane coordinate" `x'=W^{-1}(w)=wcs_world2pix(w)`.
# We stop iterations when `|x_{i+1}-x_i|<tolerance`. We also
# consider the process to be diverging if
# `|x_{i+1}-x_i|>|x_i-x_{i-1}|`
# **when** `|x_{i+1}-x_i|>=tolerance` (when current
# approximation is close to the true solution,
# `|x_{i+1}-x_i|>|x_i-x_{i-1}|` may be due to rounding errors
# and we ignore such "divergences" when
# `|x_{i+1}-x_i|<tolerance`). It may appear that checking for
# `|x_{i+1}-x_i|<tolerance` in order to ignore divergence is
# unnecessary since the iterative process should stop anyway,
# however, the proposed implementation of this iterative
# process is completely vectorized and, therefore, we may
# continue iterating over *some* points even though they have
# converged to within a specified tolerance (while iterating
# over other points that have not yet converged to
# a solution).
#
# In order to efficiently implement iterative process (6)
# using available methods in `astropy.wcs.WCS`, we add and
# subtract `x_i` from the right side of equation (6):
#
# (7) x_{i+1} = x'-(x_i+f(x_i))+x_i = x'-pix2foc(x_i)+x_i,
#
# where `x'=wcs_world2pix(w)` and it is computed only *once*
# before the beginning of the iterative process (and we also
# set `x_0=x'`). By using `pix2foc` at each iteration instead
# of `all_pix2world` we get about 25% increase in performance
# (by not performing the linear `W` transformation at each
# step) and we also avoid the "RA wrapping" issue described
# above (by working in focal plane coordinates and avoiding
# pix->world transformations).
#
# As an added benefit, the process converges to the correct
# solution in just one iteration when distortions are not
# present (compare to
# https://github.com/astropy/astropy/issues/1977 and
# https://github.com/astropy/astropy/pull/2294): in this case
# `pix2foc` is the identical transformation
# `x_i=pix2foc(x_i)` and from equation (7) we get:
#
# x' = x_0 = wcs_world2pix(w)
# x_1 = x' - pix2foc(x_0) + x_0 = x' - pix2foc(x') + x' = x'
# = wcs_world2pix(w) = x_0
# =>
# |x_1-x_0| = 0 < tolerance (with tolerance > 0)
#
# However, for performance reasons, it is still better to
# avoid iterations altogether and return the exact linear
# solution (`wcs_world2pix`) right-away when non-linear
# distortions are not present by checking that attributes
# `sip`, `cpdis1`, `cpdis2`, `det2im1`, and `det2im2` are
# *all* `None`.
#
#
# ### Outline of the Algorithm ###
#
#
# While the proposed code is relatively long (considering
# the simplicity of the algorithm), this is due to: 1)
# checking if iterative solution is necessary at all; 2)
# checking for divergence; 3) re-implementation of the
# completely vectorized algorithm as an "adaptive" vectorized
# algorithm (for cases when some points diverge for which we
# want to stop iterations). In my tests, the adaptive version
# of the algorithm is about 50% slower than non-adaptive
# version for all HST images.
#
# The essential part of the vectorized non-adaptive algorithm
# (without divergence and other checks) can be described
# as follows:
#
# pix0 = self.wcs_world2pix(world, origin)
# pix = pix0.copy() # 0-order solution
#
# for k in range(maxiter):
# # find correction to the previous solution:
# dpix = self.pix2foc(pix, origin) - pix0
#
# # compute norm (L2) of the correction:
# dn = np.linalg.norm(dpix, axis=1)
#
# # apply correction:
# pix -= dpix
#
# # check convergence:
# if np.max(dn) < tolerance:
# break
#
# return pix
#
# Here, the input parameter `world` can be a `MxN` array
# where `M` is the number of coordinate axes in WCS and `N`
# is the number of points to be converted simultaneously to
# image coordinates.
#
#
# ### IMPORTANT NOTE: ###
#
# If, in the future releases of the `~astropy.wcs`,
# `pix2foc` will not apply all the required distortion
# corrections then in the code below, calls to `pix2foc` will
# have to be replaced with
# wcs_world2pix(all_pix2world(pix_list, origin), origin)
#
# ############################################################
# # INITIALIZE ITERATIVE PROCESS: ##
# ############################################################
# initial approximation (linear WCS based only)
pix0 = self.wcs_world2pix(world, origin)
# Check that an iterative solution is required at all
# (when any of the non-CD-matrix-based corrections are
# present). If not required return the initial
# approximation (pix0).
if not self.has_distortion:
# No non-WCS corrections detected so
# simply return initial approximation:
return pix0
pix = pix0.copy() # 0-order solution
# initial correction:
dpix = self.pix2foc(pix, origin) - pix0
# Update initial solution:
pix -= dpix
# Norm (L2) squared of the correction:
dn = np.sum(dpix*dpix, axis=1)
dnprev = dn.copy() # if adaptive else dn
tol2 = tolerance**2
# Prepare for iterative process
k = 1
ind = None
inddiv = None
# Turn off numpy runtime warnings for 'invalid' and 'over':
old_invalid = np.geterr()['invalid']
old_over = np.geterr()['over']
np.seterr(invalid='ignore', over='ignore')
# ############################################################
# # NON-ADAPTIVE ITERATIONS: ##
# ############################################################
if not adaptive:
# Fixed-point iterations:
while (np.nanmax(dn) >= tol2 and k < maxiter):
# Find correction to the previous solution:
dpix = self.pix2foc(pix, origin) - pix0
# Compute norm (L2) squared of the correction:
dn = np.sum(dpix*dpix, axis=1)
# Check for divergence (we do this in two stages
# to optimize performance for the most common
# scenario when successive approximations converge):
if detect_divergence:
divergent = (dn >= dnprev)
if np.any(divergent):
# Find solutions that have not yet converged:
slowconv = (dn >= tol2)
inddiv, = np.where(divergent & slowconv)
if inddiv.shape[0] > 0:
# Update indices of elements that
# still need correction:
conv = (dn < dnprev)
iconv = np.where(conv)
# Apply correction:
dpixgood = dpix[iconv]
pix[iconv] -= dpixgood
dpix[iconv] = dpixgood
# For the next iteration choose
# non-divergent points that have not yet
# converged to the requested accuracy:
ind, = np.where(slowconv & conv)
pix0 = pix0[ind]
dnprev[ind] = dn[ind]
k += 1
# Switch to adaptive iterations:
adaptive = True
break
# Save current correction magnitudes for later:
dnprev = dn
# Apply correction:
pix -= dpix
k += 1
# ############################################################
# # ADAPTIVE ITERATIONS: ##
# ############################################################
if adaptive:
if ind is None:
ind, = np.where(np.isfinite(pix).all(axis=1))
pix0 = pix0[ind]
# "Adaptive" fixed-point iterations:
while (ind.shape[0] > 0 and k < maxiter):
# Find correction to the previous solution:
dpixnew = self.pix2foc(pix[ind], origin) - pix0
# Compute norm (L2) of the correction:
dnnew = np.sum(np.square(dpixnew), axis=1)
# Bookeeping of corrections:
dnprev[ind] = dn[ind].copy()
dn[ind] = dnnew
if detect_divergence:
# Find indices of pixels that are converging:
conv = (dnnew < dnprev[ind])
iconv = np.where(conv)
iiconv = ind[iconv]
# Apply correction:
dpixgood = dpixnew[iconv]
pix[iiconv] -= dpixgood
dpix[iiconv] = dpixgood
# Find indices of solutions that have not yet
# converged to the requested accuracy
# AND that do not diverge:
subind, = np.where((dnnew >= tol2) & conv)
else:
# Apply correction:
pix[ind] -= dpixnew
dpix[ind] = dpixnew
# Find indices of solutions that have not yet
# converged to the requested accuracy:
subind, = np.where(dnnew >= tol2)
# Choose solutions that need more iterations:
ind = ind[subind]
pix0 = pix0[subind]
k += 1
# ############################################################
# # FINAL DETECTION OF INVALID, DIVERGING, ##
# # AND FAILED-TO-CONVERGE POINTS ##
# ############################################################
# Identify diverging and/or invalid points:
invalid = ((~np.all(np.isfinite(pix), axis=1)) &
(np.all(np.isfinite(world), axis=1)))
# When detect_divergence==False, dnprev is outdated
# (it is the norm of the very first correction).
# Still better than nothing...
inddiv, = np.where(((dn >= tol2) & (dn >= dnprev)) | invalid)
if inddiv.shape[0] == 0:
inddiv = None
# Identify points that did not converge within 'maxiter'
# iterations:
if k >= maxiter:
ind, = np.where((dn >= tol2) & (dn < dnprev) & (~invalid))
if ind.shape[0] == 0:
ind = None
else:
ind = None
# Restore previous numpy error settings:
np.seterr(invalid=old_invalid, over=old_over)
# ############################################################
# # RAISE EXCEPTION IF DIVERGING OR TOO SLOWLY CONVERGING ##
# # DATA POINTS HAVE BEEN DETECTED: ##
# ############################################################
if (ind is not None or inddiv is not None) and not quiet:
if inddiv is None:
raise NoConvergence(
"'WCS.all_world2pix' failed to "
"converge to the requested accuracy after {:d} "
"iterations.".format(k), best_solution=pix,
accuracy=np.abs(dpix), niter=k,
slow_conv=ind, divergent=None)
else:
raise NoConvergence(
"'WCS.all_world2pix' failed to "
"converge to the requested accuracy.\n"
"After {0:d} iterations, the solution is diverging "
"at least for one input point."
.format(k), best_solution=pix,
accuracy=np.abs(dpix), niter=k,
slow_conv=ind, divergent=inddiv)
return pix
def all_world2pix(self, *args, tolerance=1e-4, maxiter=20, adaptive=False,
detect_divergence=True, quiet=False, **kwargs):
if self.wcs is None:
raise ValueError("No basic WCS settings were created.")
return self._array_converter(
lambda *args, **kwargs:
self._all_world2pix(
*args, tolerance=tolerance, maxiter=maxiter,
adaptive=adaptive, detect_divergence=detect_divergence,
quiet=quiet),
'input', *args, **kwargs
)
all_world2pix.__doc__ = """
all_world2pix(*arg, accuracy=1.0e-4, maxiter=20,
adaptive=False, detect_divergence=True, quiet=False)
Transforms world coordinates to pixel coordinates, using
numerical iteration to invert the full forward transformation
`~astropy.wcs.WCS.all_pix2world` with complete
distortion model.
Parameters
----------
{0}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{1}
tolerance : float, optional (Default = 1.0e-4)
Tolerance of solution. Iteration terminates when the
iterative solver estimates that the "true solution" is
within this many pixels current estimate, more
specifically, when the correction to the solution found
during the previous iteration is smaller
(in the sense of the L2 norm) than ``tolerance``.
maxiter : int, optional (Default = 20)
Maximum number of iterations allowed to reach a solution.
quiet : bool, optional (Default = False)
Do not throw :py:class:`NoConvergence` exceptions when
the method does not converge to a solution with the
required accuracy within a specified number of maximum
iterations set by ``maxiter`` parameter. Instead,
simply return the found solution.
Other Parameters
----------------
adaptive : bool, optional (Default = False)
Specifies whether to adaptively select only points that
did not converge to a solution within the required
accuracy for the next iteration. Default is recommended
for HST as well as most other instruments.
.. note::
The :py:meth:`all_world2pix` uses a vectorized
implementation of the method of consecutive
approximations (see ``Notes`` section below) in which it
iterates over *all* input points *regardless* until
the required accuracy has been reached for *all* input
points. In some cases it may be possible that
*almost all* points have reached the required accuracy
but there are only a few of input data points for
which additional iterations may be needed (this
depends mostly on the characteristics of the geometric
distortions for a given instrument). In this situation
it may be advantageous to set ``adaptive`` = `True` in
which case :py:meth:`all_world2pix` will continue
iterating *only* over the points that have not yet
converged to the required accuracy. However, for the
HST's ACS/WFC detector, which has the strongest
distortions of all HST instruments, testing has
shown that enabling this option would lead to a about
50-100% penalty in computational time (depending on
specifics of the image, geometric distortions, and
number of input points to be converted). Therefore,
for HST and possibly instruments, it is recommended
to set ``adaptive`` = `False`. The only danger in
getting this setting wrong will be a performance
penalty.
.. note::
When ``detect_divergence`` is `True`,
:py:meth:`all_world2pix` will automatically switch
to the adaptive algorithm once divergence has been
detected.
detect_divergence : bool, optional (Default = True)
Specifies whether to perform a more detailed analysis
of the convergence to a solution. Normally
:py:meth:`all_world2pix` may not achieve the required
accuracy if either the ``tolerance`` or ``maxiter`` arguments
are too low. However, it may happen that for some
geometric distortions the conditions of convergence for
the the method of consecutive approximations used by
:py:meth:`all_world2pix` may not be satisfied, in which
case consecutive approximations to the solution will
diverge regardless of the ``tolerance`` or ``maxiter``
settings.
When ``detect_divergence`` is `False`, these divergent
points will be detected as not having achieved the
required accuracy (without further details). In addition,
if ``adaptive`` is `False` then the algorithm will not
know that the solution (for specific points) is diverging
and will continue iterating and trying to "improve"
diverging solutions. This may result in ``NaN`` or
``Inf`` values in the return results (in addition to a
performance penalties). Even when ``detect_divergence``
is `False`, :py:meth:`all_world2pix`, at the end of the
iterative process, will identify invalid results
(``NaN`` or ``Inf``) as "diverging" solutions and will
raise :py:class:`NoConvergence` unless the ``quiet``
parameter is set to `True`.
When ``detect_divergence`` is `True`,
:py:meth:`all_world2pix` will detect points for which
current correction to the coordinates is larger than
the correction applied during the previous iteration
**if** the requested accuracy **has not yet been
achieved**. In this case, if ``adaptive`` is `True`,
these points will be excluded from further iterations and
if ``adaptive`` is `False`, :py:meth:`all_world2pix` will
automatically switch to the adaptive algorithm. Thus, the
reported divergent solution will be the latest converging
solution computed immediately *before* divergence
has been detected.
.. note::
When accuracy has been achieved, small increases in
current corrections may be possible due to rounding
errors (when ``adaptive`` is `False`) and such
increases will be ignored.
.. note::
Based on our testing using HST ACS/WFC images, setting
``detect_divergence`` to `True` will incur about 5-20%
performance penalty with the larger penalty
corresponding to ``adaptive`` set to `True`.
Because the benefits of enabling this
feature outweigh the small performance penalty,
especially when ``adaptive`` = `False`, it is
recommended to set ``detect_divergence`` to `True`,
unless extensive testing of the distortion models for
images from specific instruments show a good stability
of the numerical method for a wide range of
coordinates (even outside the image itself).
.. note::
Indices of the diverging inverse solutions will be
reported in the ``divergent`` attribute of the
raised :py:class:`NoConvergence` exception object.
Returns
-------
{2}
Notes
-----
The order of the axes for the input world array is determined by
the ``CTYPEia`` keywords in the FITS header, therefore it may
not always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp`, and
`~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
Using the method of fixed-point iterations approximations we
iterate starting with the initial approximation, which is
computed using the non-distortion-aware
:py:meth:`wcs_world2pix` (or equivalent).
The :py:meth:`all_world2pix` function uses a vectorized
implementation of the method of consecutive approximations and
therefore it is highly efficient (>30x) when *all* data points
that need to be converted from sky coordinates to image
coordinates are passed at *once*. Therefore, it is advisable,
whenever possible, to pass as input a long array of all points
that need to be converted to :py:meth:`all_world2pix` instead
of calling :py:meth:`all_world2pix` for each data point. Also
see the note to the ``adaptive`` parameter.
Raises
------
NoConvergence
The method did not converge to a
solution to the required accuracy within a specified
number of maximum iterations set by the ``maxiter``
parameter. To turn off this exception, set ``quiet`` to
`True`. Indices of the points for which the requested
accuracy was not achieved (if any) will be listed in the
``slow_conv`` attribute of the
raised :py:class:`NoConvergence` exception object.
See :py:class:`NoConvergence` documentation for
more details.
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
Examples
--------
>>> import astropy.io.fits as fits
>>> import astropy.wcs as wcs
>>> import numpy as np
>>> import os
>>> filename = os.path.join(wcs.__path__[0], 'tests/data/j94f05bgq_flt.fits')
>>> hdulist = fits.open(filename)
>>> w = wcs.WCS(hdulist[('sci',1)].header, hdulist)
>>> hdulist.close()
>>> ra, dec = w.all_pix2world([1,2,3], [1,1,1], 1)
>>> print(ra) # doctest: +FLOAT_CMP
[ 5.52645627 5.52649663 5.52653698]
>>> print(dec) # doctest: +FLOAT_CMP
[-72.05171757 -72.05171276 -72.05170795]
>>> radec = w.all_pix2world([[1,1], [2,1], [3,1]], 1)
>>> print(radec) # doctest: +FLOAT_CMP
[[ 5.52645627 -72.05171757]
[ 5.52649663 -72.05171276]
[ 5.52653698 -72.05170795]]
>>> x, y = w.all_world2pix(ra, dec, 1)
>>> print(x) # doctest: +FLOAT_CMP
[ 1.00000238 2.00000237 3.00000236]
>>> print(y) # doctest: +FLOAT_CMP
[ 0.99999996 0.99999997 0.99999997]
>>> xy = w.all_world2pix(radec, 1)
>>> print(xy) # doctest: +FLOAT_CMP
[[ 1.00000238 0.99999996]
[ 2.00000237 0.99999997]
[ 3.00000236 0.99999997]]
>>> xy = w.all_world2pix(radec, 1, maxiter=3,
... tolerance=1.0e-10, quiet=False)
Traceback (most recent call last):
...
NoConvergence: 'WCS.all_world2pix' failed to converge to the
requested accuracy. After 3 iterations, the solution is
diverging at least for one input point.
>>> # Now try to use some diverging data:
>>> divradec = w.all_pix2world([[1.0, 1.0],
... [10000.0, 50000.0],
... [3.0, 1.0]], 1)
>>> print(divradec) # doctest: +FLOAT_CMP
[[ 5.52645627 -72.05171757]
[ 7.15976932 -70.8140779 ]
[ 5.52653698 -72.05170795]]
>>> # First, turn detect_divergence on:
>>> try: # doctest: +FLOAT_CMP
... xy = w.all_world2pix(divradec, 1, maxiter=20,
... tolerance=1.0e-4, adaptive=False,
... detect_divergence=True,
... quiet=False)
... except wcs.wcs.NoConvergence as e:
... print("Indices of diverging points: {{0}}"
... .format(e.divergent))
... print("Indices of poorly converging points: {{0}}"
... .format(e.slow_conv))
... print("Best solution:\\n{{0}}".format(e.best_solution))
... print("Achieved accuracy:\\n{{0}}".format(e.accuracy))
Indices of diverging points: [1]
Indices of poorly converging points: None
Best solution:
[[ 1.00000238e+00 9.99999965e-01]
[ -1.99441636e+06 1.44309097e+06]
[ 3.00000236e+00 9.99999966e-01]]
Achieved accuracy:
[[ 6.13968380e-05 8.59638593e-07]
[ 8.59526812e+11 6.61713548e+11]
[ 6.09398446e-05 8.38759724e-07]]
>>> raise e
Traceback (most recent call last):
...
NoConvergence: 'WCS.all_world2pix' failed to converge to the
requested accuracy. After 5 iterations, the solution is
diverging at least for one input point.
>>> # This time turn detect_divergence off:
>>> try: # doctest: +FLOAT_CMP
... xy = w.all_world2pix(divradec, 1, maxiter=20,
... tolerance=1.0e-4, adaptive=False,
... detect_divergence=False,
... quiet=False)
... except wcs.wcs.NoConvergence as e:
... print("Indices of diverging points: {{0}}"
... .format(e.divergent))
... print("Indices of poorly converging points: {{0}}"
... .format(e.slow_conv))
... print("Best solution:\\n{{0}}".format(e.best_solution))
... print("Achieved accuracy:\\n{{0}}".format(e.accuracy))
Indices of diverging points: [1]
Indices of poorly converging points: None
Best solution:
[[ 1.00000009 1. ]
[ nan nan]
[ 3.00000009 1. ]]
Achieved accuracy:
[[ 2.29417358e-06 3.21222995e-08]
[ nan nan]
[ 2.27407877e-06 3.13005639e-08]]
>>> raise e
Traceback (most recent call last):
...
NoConvergence: 'WCS.all_world2pix' failed to converge to the
requested accuracy. After 6 iterations, the solution is
diverging at least for one input point.
""".format(docstrings.TWO_OR_MORE_ARGS('naxis', 8),
docstrings.RA_DEC_ORDER(8),
docstrings.RETURNS('pixel coordinates', 8))
def wcs_world2pix(self, *args, **kwargs):
if self.wcs is None:
raise ValueError("No basic WCS settings were created.")
return self._array_converter(
lambda xy, o: self.wcs.s2p(xy, o)['pixcrd'],
'input', *args, **kwargs)
wcs_world2pix.__doc__ = """
Transforms world coordinates to pixel coordinates, using only
the basic `wcslib`_ WCS transformation. No `SIP`_ or
`distortion paper`_ table lookup transformation is applied.
Parameters
----------
{0}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{1}
Returns
-------
{2}
Notes
-----
The order of the axes for the input world array is determined by
the ``CTYPEia`` keywords in the FITS header, therefore it may
not always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp` and `~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
""".format(docstrings.TWO_OR_MORE_ARGS('naxis', 8),
docstrings.RA_DEC_ORDER(8),
docstrings.RETURNS('pixel coordinates', 8))
def pix2foc(self, *args):
return self._array_converter(self._pix2foc, None, *args)
pix2foc.__doc__ = """
Convert pixel coordinates to focal plane coordinates using the
`SIP`_ polynomial distortion convention and `distortion
paper`_ table-lookup correction.
The output is in absolute pixel coordinates, not relative to
``CRPIX``.
Parameters
----------
{0}
Returns
-------
{1}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
""".format(docstrings.TWO_OR_MORE_ARGS('2', 8),
docstrings.RETURNS('focal coordinates', 8))
def p4_pix2foc(self, *args):
return self._array_converter(self._p4_pix2foc, None, *args)
p4_pix2foc.__doc__ = """
Convert pixel coordinates to focal plane coordinates using
`distortion paper`_ table-lookup correction.
The output is in absolute pixel coordinates, not relative to
``CRPIX``.
Parameters
----------
{0}
Returns
-------
{1}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
""".format(docstrings.TWO_OR_MORE_ARGS('2', 8),
docstrings.RETURNS('focal coordinates', 8))
def det2im(self, *args):
return self._array_converter(self._det2im, None, *args)
det2im.__doc__ = """
Convert detector coordinates to image plane coordinates using
`distortion paper`_ table-lookup correction.
The output is in absolute pixel coordinates, not relative to
``CRPIX``.
Parameters
----------
{0}
Returns
-------
{1}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
""".format(docstrings.TWO_OR_MORE_ARGS('2', 8),
docstrings.RETURNS('pixel coordinates', 8))
def sip_pix2foc(self, *args):
if self.sip is None:
if len(args) == 2:
return args[0]
elif len(args) == 3:
return args[:2]
else:
raise TypeError("Wrong number of arguments")
return self._array_converter(self.sip.pix2foc, None, *args)
sip_pix2foc.__doc__ = """
Convert pixel coordinates to focal plane coordinates using the
`SIP`_ polynomial distortion convention.
The output is in pixel coordinates, relative to ``CRPIX``.
FITS WCS `distortion paper`_ table lookup correction is not
applied, even if that information existed in the FITS file
that initialized this :class:`~astropy.wcs.WCS` object. To
correct for that, use `~astropy.wcs.WCS.pix2foc` or
`~astropy.wcs.WCS.p4_pix2foc`.
Parameters
----------
{0}
Returns
-------
{1}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
""".format(docstrings.TWO_OR_MORE_ARGS('2', 8),
docstrings.RETURNS('focal coordinates', 8))
def sip_foc2pix(self, *args):
if self.sip is None:
if len(args) == 2:
return args[0]
elif len(args) == 3:
return args[:2]
else:
raise TypeError("Wrong number of arguments")
return self._array_converter(self.sip.foc2pix, None, *args)
sip_foc2pix.__doc__ = """
Convert focal plane coordinates to pixel coordinates using the
`SIP`_ polynomial distortion convention.
FITS WCS `distortion paper`_ table lookup distortion
correction is not applied, even if that information existed in
the FITS file that initialized this `~astropy.wcs.WCS` object.
Parameters
----------
{0}
Returns
-------
{1}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
""".format(docstrings.TWO_OR_MORE_ARGS('2', 8),
docstrings.RETURNS('pixel coordinates', 8))
def to_fits(self, relax=False, key=None):
"""
Generate an `astropy.io.fits.HDUList` object with all of the
information stored in this object. This should be logically identical
to the input FITS file, but it will be normalized in a number of ways.
See `to_header` for some warnings about the output produced.
Parameters
----------
relax : bool or int, optional
Degree of permissiveness:
- `False` (default): Write all extensions that are
considered to be safe and recommended.
- `True`: Write all recognized informal extensions of the
WCS standard.
- `int`: a bit field selecting specific extensions to
write. See :ref:`relaxwrite` for details.
key : str
The name of a particular WCS transform to use. This may be
either ``' '`` or ``'A'``-``'Z'`` and corresponds to the ``"a"``
part of the ``CTYPEia`` cards.
Returns
-------
hdulist : `astropy.io.fits.HDUList`
"""
header = self.to_header(relax=relax, key=key)
hdu = fits.PrimaryHDU(header=header)
hdulist = fits.HDUList(hdu)
self._write_det2im(hdulist)
self._write_distortion_kw(hdulist)
return hdulist
def to_header(self, relax=None, key=None):
"""Generate an `astropy.io.fits.Header` object with the basic WCS
and SIP information stored in this object. This should be
logically identical to the input FITS file, but it will be
normalized in a number of ways.
.. warning::
This function does not write out FITS WCS `distortion
paper`_ information, since that requires multiple FITS
header data units. To get a full representation of
everything in this object, use `to_fits`.
Parameters
----------
relax : bool or int, optional
Degree of permissiveness:
- `False` (default): Write all extensions that are
considered to be safe and recommended.
- `True`: Write all recognized informal extensions of the
WCS standard.
- `int`: a bit field selecting specific extensions to
write. See :ref:`relaxwrite` for details.
If the ``relax`` keyword argument is not given and any
keywords were omitted from the output, an
`~astropy.utils.exceptions.AstropyWarning` is displayed.
To override this, explicitly pass a value to ``relax``.
key : str
The name of a particular WCS transform to use. This may be
either ``' '`` or ``'A'``-``'Z'`` and corresponds to the ``"a"``
part of the ``CTYPEia`` cards.
Returns
-------
header : `astropy.io.fits.Header`
Notes
-----
The output header will almost certainly differ from the input in a
number of respects:
1. The output header only contains WCS-related keywords. In
particular, it does not contain syntactically-required
keywords such as ``SIMPLE``, ``NAXIS``, ``BITPIX``, or
``END``.
2. Deprecated (e.g. ``CROTAn``) or non-standard usage will
be translated to standard (this is partially dependent on
whether ``fix`` was applied).
3. Quantities will be converted to the units used internally,
basically SI with the addition of degrees.
4. Floating-point quantities may be given to a different decimal
precision.
5. Elements of the ``PCi_j`` matrix will be written if and
only if they differ from the unit matrix. Thus, if the
matrix is unity then no elements will be written.
6. Additional keywords such as ``WCSAXES``, ``CUNITia``,
``LONPOLEa`` and ``LATPOLEa`` may appear.
7. The original keycomments will be lost, although
`to_header` tries hard to write meaningful comments.
8. Keyword order may be changed.
"""
# default precision for numerical WCS keywords
precision = WCSHDO_P14
display_warning = False
if relax is None:
display_warning = True
relax = False
if relax not in (True, False):
do_sip = relax & WCSHDO_SIP
relax &= ~WCSHDO_SIP
else:
do_sip = relax
relax = WCSHDO_all if relax is True else WCSHDO_safe
relax = precision | relax
if self.wcs is not None:
if key is not None:
orig_key = self.wcs.alt
self.wcs.alt = key
header_string = self.wcs.to_header(relax)
header = fits.Header.fromstring(header_string)
keys_to_remove = ["", " ", "COMMENT"]
for kw in keys_to_remove:
if kw in header:
del header[kw]
else:
header = fits.Header()
if do_sip and self.sip is not None:
if self.wcs is not None and any(not ctyp.endswith('-SIP') for ctyp in self.wcs.ctype):
self._fix_ctype(header, add_sip=True)
for kw, val in self._write_sip_kw().items():
header[kw] = val
if not do_sip and self.wcs is not None and any(self.wcs.ctype) and self.sip is not None:
# This is called when relax is not False or WCSHDO_SIP
# The default case of ``relax=None`` is handled further in the code.
header = self._fix_ctype(header, add_sip=False)
if display_warning:
full_header = self.to_header(relax=True, key=key)
missing_keys = []
for kw, val in full_header.items():
if kw not in header:
missing_keys.append(kw)
if len(missing_keys):
warnings.warn(
"Some non-standard WCS keywords were excluded: {0} "
"Use the ``relax`` kwarg to control this.".format(
', '.join(missing_keys)),
AstropyWarning)
# called when ``relax=None``
# This is different from the case of ``relax=False``.
if any(self.wcs.ctype) and self.sip is not None:
header = self._fix_ctype(header, add_sip=False, log_message=False)
# Finally reset the key. This must be called after ``_fix_ctype``.
if key is not None:
self.wcs.alt = orig_key
return header
def _fix_ctype(self, header, add_sip=True, log_message=True):
"""
Parameters
----------
header : `~astropy.io.fits.Header`
FITS header.
add_sip : bool
Flag indicating whether "-SIP" should be added or removed from CTYPE keywords.
Remove "-SIP" from CTYPE when writing out a header with relax=False.
This needs to be done outside ``to_header`` because ``to_header`` runs
twice when ``relax=False`` and the second time ``relax`` is set to ``True``
to display the missing keywords.
If the user requested SIP distortion to be written out add "-SIP" to
CTYPE if it is missing.
"""
_add_sip_to_ctype = """
Inconsistent SIP distortion information is present in the current WCS:
SIP coefficients were detected, but CTYPE is missing "-SIP" suffix,
therefore the current WCS is internally inconsistent.
Because relax has been set to True, the resulting output WCS will have
"-SIP" appended to CTYPE in order to make the header internally consistent.
However, this may produce incorrect astrometry in the output WCS, if
in fact the current WCS is already distortion-corrected.
Therefore, if current WCS is already distortion-corrected (eg, drizzled)
then SIP distortion components should not apply. In that case, for a WCS
that is already distortion-corrected, please remove the SIP coefficients
from the header.
"""
if log_message:
if add_sip:
log.info(_add_sip_to_ctype)
for i in range(1, self.naxis+1):
# strip() must be called here to cover the case of alt key= " "
kw = 'CTYPE{0}{1}'.format(i, self.wcs.alt).strip()
if kw in header:
if add_sip:
val = header[kw].strip("-SIP") + "-SIP"
else:
val = header[kw].strip("-SIP")
header[kw] = val
else:
continue
return header
def to_header_string(self, relax=None):
"""
Identical to `to_header`, but returns a string containing the
header cards.
"""
return str(self.to_header(relax))
def footprint_to_file(self, filename='footprint.reg', color='green',
width=2, coordsys=None):
"""
Writes out a `ds9`_ style regions file. It can be loaded
directly by `ds9`_.
Parameters
----------
filename : str, optional
Output file name - default is ``'footprint.reg'``
color : str, optional
Color to use when plotting the line.
width : int, optional
Width of the region line.
coordsys : str, optional
Coordinate system. If not specified (default), the ``radesys``
value is used. For all possible values, see
http://ds9.si.edu/doc/ref/region.html#RegionFileFormat
"""
comments = ('# Region file format: DS9 version 4.0 \n'
'# global color=green font="helvetica 12 bold '
'select=1 highlite=1 edit=1 move=1 delete=1 '
'include=1 fixed=0 source\n')
coordsys = coordsys or self.wcs.radesys
if coordsys not in ('PHYSICAL', 'IMAGE', 'FK4', 'B1950', 'FK5',
'J2000', 'GALACTIC', 'ECLIPTIC', 'ICRS', 'LINEAR',
'AMPLIFIER', 'DETECTOR'):
raise ValueError("Coordinate system '{}' is not supported. A valid"
" one can be given with the 'coordsys' argument."
.format(coordsys))
with open(filename, mode='w') as f:
f.write(comments)
f.write('{}\n'.format(coordsys))
f.write('polygon(')
ftpr = self.calc_footprint()
if ftpr is not None:
ftpr.tofile(f, sep=',')
f.write(') # color={0}, width={1:d} \n'.format(color, width))
@property
def _naxis1(self):
warnings.warn(NAXIS_DEPRECATE_MESSAGE, AstropyDeprecationWarning)
return self._naxis[0]
@_naxis1.setter
def _naxis1(self, value):
warnings.warn(NAXIS_DEPRECATE_MESSAGE, AstropyDeprecationWarning)
self._naxis[0] = value
@property
def _naxis2(self):
warnings.warn(NAXIS_DEPRECATE_MESSAGE, AstropyDeprecationWarning)
return self._naxis[1]
@_naxis2.setter
def _naxis2(self, value):
warnings.warn(NAXIS_DEPRECATE_MESSAGE, AstropyDeprecationWarning)
self._naxis[1] = value
def _get_naxis(self, header=None):
_naxis = []
if (header is not None and
not isinstance(header, (str, bytes))):
for naxis in itertools.count(1):
try:
_naxis.append(header['NAXIS{}'.format(naxis)])
except KeyError:
break
if len(_naxis) == 0:
_naxis = [0, 0]
elif len(_naxis) == 1:
_naxis.append(0)
self._naxis = _naxis
def printwcs(self):
print(repr(self))
def __repr__(self):
'''
Return a short description. Simply porting the behavior from
the `printwcs()` method.
'''
description = ["WCS Keywords\n",
"Number of WCS axes: {0!r}".format(self.naxis)]
sfmt = ' : ' + "".join(["{"+"{0}".format(i)+"!r} " for i in range(self.naxis)])
keywords = ['CTYPE', 'CRVAL', 'CRPIX']
values = [self.wcs.ctype, self.wcs.crval, self.wcs.crpix]
for keyword, value in zip(keywords, values):
description.append(keyword+sfmt.format(*value))
if hasattr(self.wcs, 'pc'):
for i in range(self.naxis):
s = ''
for j in range(self.naxis):
s += ''.join(['PC', str(i+1), '_', str(j+1), ' '])
s += sfmt
description.append(s.format(*self.wcs.pc[i]))
s = 'CDELT' + sfmt
description.append(s.format(*self.wcs.cdelt))
elif hasattr(self.wcs, 'cd'):
for i in range(self.naxis):
s = ''
for j in range(self.naxis):
s += "".join(['CD', str(i+1), '_', str(j+1), ' '])
s += sfmt
description.append(s.format(*self.wcs.cd[i]))
description.append('NAXIS : {}'.format(' '.join(map(str, self._naxis))))
return '\n'.join(description)
def get_axis_types(self):
"""
Similar to `self.wcsprm.axis_types <astropy.wcs.Wcsprm.axis_types>`
but provides the information in a more Python-friendly format.
Returns
-------
result : list of dicts
Returns a list of dictionaries, one for each axis, each
containing attributes about the type of that axis.
Each dictionary has the following keys:
- 'coordinate_type':
- None: Non-specific coordinate type.
- 'stokes': Stokes coordinate.
- 'celestial': Celestial coordinate (including ``CUBEFACE``).
- 'spectral': Spectral coordinate.
- 'scale':
- 'linear': Linear axis.
- 'quantized': Quantized axis (``STOKES``, ``CUBEFACE``).
- 'non-linear celestial': Non-linear celestial axis.
- 'non-linear spectral': Non-linear spectral axis.
- 'logarithmic': Logarithmic axis.
- 'tabular': Tabular axis.
- 'group'
- Group number, e.g. lookup table number
- 'number'
- For celestial axes:
- 0: Longitude coordinate.
- 1: Latitude coordinate.
- 2: ``CUBEFACE`` number.
- For lookup tables:
- the axis number in a multidimensional table.
``CTYPEia`` in ``"4-3"`` form with unrecognized algorithm code will
generate an error.
"""
if self.wcs is None:
raise AttributeError(
"This WCS object does not have a wcsprm object.")
coordinate_type_map = {
0: None,
1: 'stokes',
2: 'celestial',
3: 'spectral'}
scale_map = {
0: 'linear',
1: 'quantized',
2: 'non-linear celestial',
3: 'non-linear spectral',
4: 'logarithmic',
5: 'tabular'}
result = []
for axis_type in self.wcs.axis_types:
subresult = {}
coordinate_type = (axis_type // 1000) % 10
subresult['coordinate_type'] = coordinate_type_map[coordinate_type]
scale = (axis_type // 100) % 10
subresult['scale'] = scale_map[scale]
group = (axis_type // 10) % 10
subresult['group'] = group
number = axis_type % 10
subresult['number'] = number
result.append(subresult)
return result
def __reduce__(self):
"""
Support pickling of WCS objects. This is done by serializing
to an in-memory FITS file and dumping that as a string.
"""
hdulist = self.to_fits(relax=True)
buffer = io.BytesIO()
hdulist.writeto(buffer)
return (__WCS_unpickle__,
(self.__class__, self.__dict__, buffer.getvalue(),))
def dropaxis(self, dropax):
"""
Remove an axis from the WCS.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS with naxis to be chopped to naxis-1
dropax : int
The index of the WCS to drop, counting from 0 (i.e., python convention,
not FITS convention)
Returns
-------
A new `~astropy.wcs.WCS` instance with one axis fewer
"""
inds = list(range(self.wcs.naxis))
inds.pop(dropax)
# axis 0 has special meaning to sub
# if wcs.wcs.ctype == ['RA','DEC','VLSR'], you want
# wcs.sub([1,2]) to get 'RA','DEC' back
return self.sub([i+1 for i in inds])
def swapaxes(self, ax0, ax1):
"""
Swap axes in a WCS.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS to have its axes swapped
ax0 : int
ax1 : int
The indices of the WCS to be swapped, counting from 0 (i.e., python
convention, not FITS convention)
Returns
-------
A new `~astropy.wcs.WCS` instance with the same number of axes, but two
swapped
"""
inds = list(range(self.wcs.naxis))
inds[ax0], inds[ax1] = inds[ax1], inds[ax0]
return self.sub([i+1 for i in inds])
def reorient_celestial_first(self):
"""
Reorient the WCS such that the celestial axes are first, followed by
the spectral axis, followed by any others.
Assumes at least celestial axes are present.
"""
return self.sub([WCSSUB_CELESTIAL, WCSSUB_SPECTRAL, WCSSUB_STOKES])
def slice(self, view, numpy_order=True):
"""
Slice a WCS instance using a Numpy slice. The order of the slice should
be reversed (as for the data) compared to the natural WCS order.
Parameters
----------
view : tuple
A tuple containing the same number of slices as the WCS system.
The ``step`` method, the third argument to a slice, is not
presently supported.
numpy_order : bool
Use numpy order, i.e. slice the WCS so that an identical slice
applied to a numpy array will slice the array and WCS in the same
way. If set to `False`, the WCS will be sliced in FITS order,
meaning the first slice will be applied to the *last* numpy index
but the *first* WCS axis.
Returns
-------
wcs_new : `~astropy.wcs.WCS`
A new resampled WCS axis
"""
if hasattr(view, '__len__') and len(view) > self.wcs.naxis:
raise ValueError("Must have # of slices <= # of WCS axes")
elif not hasattr(view, '__len__'): # view MUST be an iterable
view = [view]
if not all(isinstance(x, slice) for x in view):
# We need to drop some dimensions, but this may not always be
# possible with .sub due to correlated axes, so instead we use the
# generalized slicing infrastructure from astropy.wcs.wcsapi.
return SlicedFITSWCS(self, view)
# NOTE: we could in principle use SlicedFITSWCS as above for all slicing,
# but in the simple case where there are no axes dropped, we can just
# create a full WCS object with updated WCS parameters which is faster
# for this specific case and also backward-compatible.
wcs_new = self.deepcopy()
if wcs_new.sip is not None:
sip_crpix = wcs_new.sip.crpix.tolist()
for i, iview in enumerate(view):
if iview.step is not None and iview.step < 0:
raise NotImplementedError("Reversing an axis is not "
"implemented.")
if numpy_order:
wcs_index = self.wcs.naxis - 1 - i
else:
wcs_index = i
if iview.step is not None and iview.start is None:
# Slice from "None" is equivalent to slice from 0 (but one
# might want to downsample, so allow slices with
# None,None,step or None,stop,step)
iview = slice(0, iview.stop, iview.step)
if iview.start is not None:
if iview.step not in (None, 1):
crpix = self.wcs.crpix[wcs_index]
cdelt = self.wcs.cdelt[wcs_index]
# equivalently (keep this comment so you can compare eqns):
# wcs_new.wcs.crpix[wcs_index] =
# (crpix - iview.start)*iview.step + 0.5 - iview.step/2.
crp = ((crpix - iview.start - 1.)/iview.step
+ 0.5 + 1./iview.step/2.)
wcs_new.wcs.crpix[wcs_index] = crp
if wcs_new.sip is not None:
sip_crpix[wcs_index] = crp
wcs_new.wcs.cdelt[wcs_index] = cdelt * iview.step
else:
wcs_new.wcs.crpix[wcs_index] -= iview.start
if wcs_new.sip is not None:
sip_crpix[wcs_index] -= iview.start
try:
# range requires integers but the other attributes can also
# handle arbitrary values, so this needs to be in a try/except.
nitems = len(builtins.range(self._naxis[wcs_index])[iview])
except TypeError as exc:
if 'indices must be integers' not in str(exc):
raise
warnings.warn("NAXIS{0} attribute is not updated because at "
"least one index ('{1}') is no integer."
"".format(wcs_index, iview), AstropyUserWarning)
else:
wcs_new._naxis[wcs_index] = nitems
if wcs_new.sip is not None:
wcs_new.sip = Sip(self.sip.a, self.sip.b, self.sip.ap, self.sip.bp,
sip_crpix)
return wcs_new
def __getitem__(self, item):
# "getitem" is a shortcut for self.slice; it is very limited
# there is no obvious and unambiguous interpretation of wcs[1,2,3]
# We COULD allow wcs[1] to link to wcs.sub([2])
# (wcs[i] -> wcs.sub([i+1])
return self.slice(item)
def __iter__(self):
# Having __getitem__ makes Python think WCS is iterable. However,
# Python first checks whether __iter__ is present, so we can raise an
# exception here.
raise TypeError("'{0}' object is not iterable".format(self.__class__.__name__))
@property
def axis_type_names(self):
"""
World names for each coordinate axis
Returns
-------
A list of names along each axis
"""
names = list(self.wcs.cname)
types = self.wcs.ctype
for i in range(len(names)):
if len(names[i]) > 0:
continue
names[i] = types[i].split('-')[0]
return names
@property
def celestial(self):
"""
A copy of the current WCS with only the celestial axes included
"""
return self.sub([WCSSUB_CELESTIAL])
@property
def is_celestial(self):
return self.has_celestial and self.naxis == 2
@property
def has_celestial(self):
try:
return self.wcs.lng >= 0 and self.wcs.lat >= 0
except InconsistentAxisTypesError:
return False
@property
def has_distortion(self):
"""
Returns `True` if any distortion terms are present.
"""
return (self.sip is not None or
self.cpdis1 is not None or self.cpdis2 is not None or
self.det2im1 is not None and self.det2im2 is not None)
@property
def pixel_scale_matrix(self):
try:
cdelt = np.diag(self.wcs.get_cdelt())
pc = self.wcs.get_pc()
except InconsistentAxisTypesError:
try:
# for non-celestial axes, get_cdelt doesn't work
cdelt = np.dot(self.wcs.cd, np.diag(self.wcs.cdelt))
except AttributeError:
cdelt = np.diag(self.wcs.cdelt)
try:
pc = self.wcs.pc
except AttributeError:
pc = 1
pccd = np.array(np.dot(cdelt, pc))
return pccd
def _as_mpl_axes(self):
"""
Compatibility hook for Matplotlib and WCSAxes.
With this method, one can do:
from astropy.wcs import WCS
import matplotlib.pyplot as plt
wcs = WCS('filename.fits')
fig = plt.figure()
ax = fig.add_axes([0.15, 0.1, 0.8, 0.8], projection=wcs)
...
and this will generate a plot with the correct WCS coordinates on the
axes.
"""
from astropy.visualization.wcsaxes import WCSAxes
return WCSAxes, {'wcs': self}
def footprint_contains(self, coord, **kwargs):
"""
Determines if a given SkyCoord is contained in the wcs footprint.
Parameters
----------
coord : `~astropy.coordinates.SkyCoord`
The coordinate to check if it is within the wcs coordinate.
**kwargs :
Additional arguments to pass to `~astropy.coordinates.SkyCoord.to_pixel`
Returns
-------
response : bool
True means the WCS footprint contains the coordinate, False means it does not.
"""
return coord.contained_by(self, **kwargs)
def __WCS_unpickle__(cls, dct, fits_data):
"""
Unpickles a WCS object from a serialized FITS string.
"""
self = cls.__new__(cls)
self.__dict__.update(dct)
buffer = io.BytesIO(fits_data)
hdulist = fits.open(buffer)
WCS.__init__(self, hdulist[0].header, hdulist)
return self
def find_all_wcs(header, relax=True, keysel=None, fix=True,
translate_units='',
_do_set=True):
"""
Find all the WCS transformations in the given header.
Parameters
----------
header : str or astropy.io.fits header object.
relax : bool or int, optional
Degree of permissiveness:
- `True` (default): Admit all recognized informal extensions of the
WCS standard.
- `False`: Recognize only FITS keywords defined by the
published WCS standard.
- `int`: a bit field selecting specific extensions to accept.
See :ref:`relaxread` for details.
keysel : sequence of flags, optional
A list of flags used to select the keyword types considered by
wcslib. When ``None``, only the standard image header
keywords are considered (and the underlying wcspih() C
function is called). To use binary table image array or pixel
list keywords, *keysel* must be set.
Each element in the list should be one of the following strings:
- 'image': Image header keywords
- 'binary': Binary table image array keywords
- 'pixel': Pixel list keywords
Keywords such as ``EQUIna`` or ``RFRQna`` that are common to
binary table image arrays and pixel lists (including
``WCSNna`` and ``TWCSna``) are selected by both 'binary' and
'pixel'.
fix : bool, optional
When `True` (default), call `~astropy.wcs.Wcsprm.fix` on
the resulting objects to fix any non-standard uses in the
header. `FITSFixedWarning` warnings will be emitted if any
changes were made.
translate_units : str, optional
Specify which potentially unsafe translations of non-standard
unit strings to perform. By default, performs none. See
`WCS.fix` for more information about this parameter. Only
effective when ``fix`` is `True`.
Returns
-------
wcses : list of `WCS` objects
"""
if isinstance(header, (str, bytes)):
header_string = header
elif isinstance(header, fits.Header):
header_string = header.tostring()
else:
raise TypeError(
"header must be a string or astropy.io.fits.Header object")
keysel_flags = _parse_keysel(keysel)
if isinstance(header_string, str):
header_bytes = header_string.encode('ascii')
else:
header_bytes = header_string
wcsprms = _wcs.find_all_wcs(header_bytes, relax, keysel_flags)
result = []
for wcsprm in wcsprms:
subresult = WCS(fix=False, _do_set=False)
subresult.wcs = wcsprm
result.append(subresult)
if fix:
subresult.fix(translate_units)
if _do_set:
subresult.wcs.set()
return result
def validate(source):
"""
Prints a WCS validation report for the given FITS file.
Parameters
----------
source : str path, readable file-like object or `astropy.io.fits.HDUList` object
The FITS file to validate.
Returns
-------
results : WcsValidateResults instance
The result is returned as nested lists. The first level
corresponds to the HDUs in the given file. The next level has
an entry for each WCS found in that header. The special
subclass of list will pretty-print the results as a table when
printed.
"""
class _WcsValidateWcsResult(list):
def __init__(self, key):
self._key = key
def __repr__(self):
result = [" WCS key '{0}':".format(self._key or ' ')]
if len(self):
for entry in self:
for i, line in enumerate(entry.splitlines()):
if i == 0:
initial_indent = ' - '
else:
initial_indent = ' '
result.extend(
textwrap.wrap(
line,
initial_indent=initial_indent,
subsequent_indent=' '))
else:
result.append(" No issues.")
return '\n'.join(result)
class _WcsValidateHduResult(list):
def __init__(self, hdu_index, hdu_name):
self._hdu_index = hdu_index
self._hdu_name = hdu_name
list.__init__(self)
def __repr__(self):
if len(self):
if self._hdu_name:
hdu_name = ' ({0})'.format(self._hdu_name)
else:
hdu_name = ''
result = ['HDU {0}{1}:'.format(self._hdu_index, hdu_name)]
for wcs in self:
result.append(repr(wcs))
return '\n'.join(result)
return ''
class _WcsValidateResults(list):
def __repr__(self):
result = []
for hdu in self:
content = repr(hdu)
if len(content):
result.append(content)
return '\n\n'.join(result)
global __warningregistry__
if isinstance(source, fits.HDUList):
hdulist = source
else:
hdulist = fits.open(source)
results = _WcsValidateResults()
for i, hdu in enumerate(hdulist):
hdu_results = _WcsValidateHduResult(i, hdu.name)
results.append(hdu_results)
with warnings.catch_warnings(record=True) as warning_lines:
wcses = find_all_wcs(
hdu.header, relax=_wcs.WCSHDR_reject,
fix=False, _do_set=False)
for wcs in wcses:
wcs_results = _WcsValidateWcsResult(wcs.wcs.alt)
hdu_results.append(wcs_results)
try:
del __warningregistry__
except NameError:
pass
with warnings.catch_warnings(record=True) as warning_lines:
warnings.resetwarnings()
warnings.simplefilter(
"always", FITSFixedWarning, append=True)
try:
WCS(hdu.header,
key=wcs.wcs.alt or ' ',
relax=_wcs.WCSHDR_reject,
fix=True, _do_set=False)
except WcsError as e:
wcs_results.append(str(e))
wcs_results.extend([str(x.message) for x in warning_lines])
return results
|
5790a69ca9ee34fd87c59101d81d137426d199f2bd6b6cbed101fdb54e35a7a2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# It gets to be really tedious to type long docstrings in ANSI C
# syntax (since multi-line string literals are not valid).
# Therefore, the docstrings are written here in doc/docstrings.py,
# which are then converted by setup.py into docstrings.h, which is
# included by pywcs.c
__all__ = ['TWO_OR_MORE_ARGS', 'RETURNS', 'ORIGIN', 'RA_DEC_ORDER']
def _fix(content, indent=0):
lines = content.split('\n')
indent = '\n' + ' ' * indent
return indent.join(lines)
def TWO_OR_MORE_ARGS(naxis, indent=0):
return _fix(
"""args : flexible
There are two accepted forms for the positional arguments:
- 2 arguments: An *N* x *{0}* array of coordinates, and an
*origin*.
- more than 2 arguments: An array for each axis, followed by
an *origin*. These arrays must be broadcastable to one
another.
Here, *origin* is the coordinate in the upper left corner of the
image. In FITS and Fortran standards, this is 1. In Numpy and C
standards this is 0.
""".format(naxis), indent)
def RETURNS(out_type, indent=0):
return _fix("""result : array
Returns the {0}. If the input was a single array and
origin, a single array is returned, otherwise a tuple of arrays is
returned.""".format(out_type), indent)
def ORIGIN(indent=0):
return _fix(
"""
origin : int
Specifies the origin of pixel values. The Fortran and FITS
standards use an origin of 1. Numpy and C use array indexing with
origin at 0.
""", indent)
def RA_DEC_ORDER(indent=0):
return _fix(
"""
ra_dec_order : bool, optional
When `True` will ensure that world coordinates are always given
and returned in as (*ra*, *dec*) pairs, regardless of the order of
the axes specified by the in the ``CTYPE`` keywords. Default is
`False`.
""", indent)
a = """
``double array[a_order+1][a_order+1]`` Focal plane transformation
matrix.
The `SIP`_ ``A_i_j`` matrix used for pixel to focal plane
transformation.
Its values may be changed in place, but it may not be resized, without
creating a new `~astropy.wcs.Sip` object.
"""
a_order = """
``int`` (read-only) Order of the polynomial (``A_ORDER``).
"""
all_pix2world = """
all_pix2world(pixcrd, origin) -> ``double array[ncoord][nelem]``
Transforms pixel coordinates to world coordinates.
Does the following:
- Detector to image plane correction (if present)
- SIP distortion correction (if present)
- FITS WCS distortion correction (if present)
- wcslib "core" WCS transformation
The first three (the distortion corrections) are done in parallel.
Parameters
----------
pixcrd : double array[ncoord][nelem]
Array of pixel coordinates.
{0}
Returns
-------
world : double array[ncoord][nelem]
Returns an array of world coordinates.
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
""".format(ORIGIN())
alt = """
``str`` Character code for alternate coordinate descriptions.
For example, the ``"a"`` in keyword names such as ``CTYPEia``. This
is a space character for the primary coordinate description, or one of
the 26 upper-case letters, A-Z.
"""
ap = """
``double array[ap_order+1][ap_order+1]`` Focal plane to pixel
transformation matrix.
The `SIP`_ ``AP_i_j`` matrix used for focal plane to pixel
transformation. Its values may be changed in place, but it may not be
resized, without creating a new `~astropy.wcs.Sip` object.
"""
ap_order = """
``int`` (read-only) Order of the polynomial (``AP_ORDER``).
"""
axis_types = """
``int array[naxis]`` An array of four-digit type codes for each axis.
- First digit (i.e. 1000s):
- 0: Non-specific coordinate type.
- 1: Stokes coordinate.
- 2: Celestial coordinate (including ``CUBEFACE``).
- 3: Spectral coordinate.
- Second digit (i.e. 100s):
- 0: Linear axis.
- 1: Quantized axis (``STOKES``, ``CUBEFACE``).
- 2: Non-linear celestial axis.
- 3: Non-linear spectral axis.
- 4: Logarithmic axis.
- 5: Tabular axis.
- Third digit (i.e. 10s):
- 0: Group number, e.g. lookup table number
- The fourth digit is used as a qualifier depending on the axis type.
- For celestial axes:
- 0: Longitude coordinate.
- 1: Latitude coordinate.
- 2: ``CUBEFACE`` number.
- For lookup tables: the axis number in a multidimensional table.
``CTYPEia`` in ``"4-3"`` form with unrecognized algorithm code will
have its type set to -1 and generate an error.
"""
b = """
``double array[b_order+1][b_order+1]`` Pixel to focal plane
transformation matrix.
The `SIP`_ ``B_i_j`` matrix used for pixel to focal plane
transformation. Its values may be changed in place, but it may not be
resized, without creating a new `~astropy.wcs.Sip` object.
"""
b_order = """
``int`` (read-only) Order of the polynomial (``B_ORDER``).
"""
bounds_check = """
bounds_check(pix2world, world2pix)
Enable/disable bounds checking.
Parameters
----------
pix2world : bool, optional
When `True`, enable bounds checking for the pixel-to-world (p2x)
transformations. Default is `True`.
world2pix : bool, optional
When `True`, enable bounds checking for the world-to-pixel (s2x)
transformations. Default is `True`.
Notes
-----
Note that by default (without calling `bounds_check`) strict bounds
checking is enabled.
"""
bp = """
``double array[bp_order+1][bp_order+1]`` Focal plane to pixel
transformation matrix.
The `SIP`_ ``BP_i_j`` matrix used for focal plane to pixel
transformation. Its values may be changed in place, but it may not be
resized, without creating a new `~astropy.wcs.Sip` object.
"""
bp_order = """
``int`` (read-only) Order of the polynomial (``BP_ORDER``).
"""
cd = """
``double array[naxis][naxis]`` The ``CDi_ja`` linear transformation
matrix.
For historical compatibility, three alternate specifications of the
linear transformations are available in wcslib. The canonical
``PCi_ja`` with ``CDELTia``, ``CDi_ja``, and the deprecated
``CROTAia`` keywords. Although the latter may not formally co-exist
with ``PCi_ja``, the approach here is simply to ignore them if given
in conjunction with ``PCi_ja``.
`~astropy.wcs.Wcsprm.has_pc`, `~astropy.wcs.Wcsprm.has_cd` and
`~astropy.wcs.Wcsprm.has_crota` can be used to determine which of
these alternatives are present in the header.
These alternate specifications of the linear transformation matrix are
translated immediately to ``PCi_ja`` by `~astropy.wcs.Wcsprm.set` and
are nowhere visible to the lower-level routines. In particular,
`~astropy.wcs.Wcsprm.set` resets `~astropy.wcs.Wcsprm.cdelt` to unity
if ``CDi_ja`` is present (and no ``PCi_ja``). If no ``CROTAia`` is
associated with the latitude axis, `~astropy.wcs.Wcsprm.set` reverts
to a unity ``PCi_ja`` matrix.
"""
cdelt = """
``double array[naxis]`` Coordinate increments (``CDELTia``) for each
coord axis.
If a ``CDi_ja`` linear transformation matrix is present, a warning is
raised and `~astropy.wcs.Wcsprm.cdelt` is ignored. The ``CDi_ja``
matrix may be deleted by::
del wcs.wcs.cd
An undefined value is represented by NaN.
"""
cdfix = """
cdfix()
Fix erroneously omitted ``CDi_ja`` keywords.
Sets the diagonal element of the ``CDi_ja`` matrix to unity if all
``CDi_ja`` keywords associated with a given axis were omitted.
According to Paper I, if any ``CDi_ja`` keywords at all are given in a
FITS header then those not given default to zero. This results in a
singular matrix with an intersecting row and column of zeros.
Returns
-------
success : int
Returns ``0`` for success; ``-1`` if no change required.
"""
cel_offset = """
``boolean`` Is there an offset?
If `True`, an offset will be applied to ``(x, y)`` to force ``(x, y) =
(0, 0)`` at the fiducial point, (phi_0, theta_0). Default is `False`.
"""
celfix = """
Translates AIPS-convention celestial projection types, ``-NCP`` and
``-GLS``.
Returns
-------
success : int
Returns ``0`` for success; ``-1`` if no change required.
"""
cname = """
``list of strings`` A list of the coordinate axis names, from
``CNAMEia``.
"""
colax = """
``int array[naxis]`` An array recording the column numbers for each
axis in a pixel list.
"""
colnum = """
``int`` Column of FITS binary table associated with this WCS.
Where the coordinate representation is associated with an image-array
column in a FITS binary table, this property may be used to record the
relevant column number.
It should be set to zero for an image header or pixel list.
"""
compare = """
compare(other, cmp=0, tolerance=0.0)
Compare two Wcsprm objects for equality.
Parameters
----------
other : Wcsprm
The other Wcsprm object to compare to.
cmp : int, optional
A bit field controlling the strictness of the comparison. When 0,
(the default), all fields must be identical.
The following constants may be or'ed together to loosen the
comparison.
- ``WCSCOMPARE_ANCILLARY``: Ignores ancillary keywords that don't
change the WCS transformation, such as ``DATE-OBS`` or
``EQUINOX``.
- ``WCSCOMPARE_TILING``: Ignore integral differences in
``CRPIXja``. This is the 'tiling' condition, where two WCSes
cover different regions of the same map projection and align on
the same map grid.
- ``WCSCOMPARE_CRPIX``: Ignore any differences at all in
``CRPIXja``. The two WCSes cover different regions of the same
map projection but may not align on the same grid map.
Overrides ``WCSCOMPARE_TILING``.
tolerance : float, optional
The amount of tolerance required. For example, for a value of
1e-6, all floating-point values in the objects must be equal to
the first 6 decimal places. The default value of 0.0 implies
exact equality.
Returns
-------
equal : bool
"""
convert = """
convert(array)
Perform the unit conversion on the elements of the given *array*,
returning an array of the same shape.
"""
coord = """
``double array[K_M]...[K_2][K_1][M]`` The tabular coordinate array.
Has the dimensions::
(K_M, ... K_2, K_1, M)
(see `~astropy.wcs.Tabprm.K`) i.e. with the `M` dimension
varying fastest so that the `M` elements of a coordinate vector are
stored contiguously in memory.
"""
copy = """
Creates a deep copy of the WCS object.
"""
cpdis1 = """
`~astropy.wcs.DistortionLookupTable`
The pre-linear transformation distortion lookup table, ``CPDIS1``.
"""
cpdis2 = """
`~astropy.wcs.DistortionLookupTable`
The pre-linear transformation distortion lookup table, ``CPDIS2``.
"""
crder = """
``double array[naxis]`` The random error in each coordinate axis,
``CRDERia``.
An undefined value is represented by NaN.
"""
crota = """
``double array[naxis]`` ``CROTAia`` keyvalues for each coordinate
axis.
For historical compatibility, three alternate specifications of the
linear transformations are available in wcslib. The canonical
``PCi_ja`` with ``CDELTia``, ``CDi_ja``, and the deprecated
``CROTAia`` keywords. Although the latter may not formally co-exist
with ``PCi_ja``, the approach here is simply to ignore them if given
in conjunction with ``PCi_ja``.
`~astropy.wcs.Wcsprm.has_pc`, `~astropy.wcs.Wcsprm.has_cd` and
`~astropy.wcs.Wcsprm.has_crota` can be used to determine which of
these alternatives are present in the header.
These alternate specifications of the linear transformation matrix are
translated immediately to ``PCi_ja`` by `~astropy.wcs.Wcsprm.set` and
are nowhere visible to the lower-level routines. In particular,
`~astropy.wcs.Wcsprm.set` resets `~astropy.wcs.Wcsprm.cdelt` to unity
if ``CDi_ja`` is present (and no ``PCi_ja``). If no ``CROTAia`` is
associated with the latitude axis, `~astropy.wcs.Wcsprm.set` reverts
to a unity ``PCi_ja`` matrix.
"""
crpix = """
``double array[naxis]`` Coordinate reference pixels (``CRPIXja``) for
each pixel axis.
"""
crval = """
``double array[naxis]`` Coordinate reference values (``CRVALia``) for
each coordinate axis.
"""
crval_tabprm = """
``double array[M]`` Index values for the reference pixel for each of
the tabular coord axes.
"""
csyer = """
``double array[naxis]`` The systematic error in the coordinate value
axes, ``CSYERia``.
An undefined value is represented by NaN.
"""
ctype = """
``list of strings[naxis]`` List of ``CTYPEia`` keyvalues.
The `~astropy.wcs.Wcsprm.ctype` keyword values must be in upper case
and there must be zero or one pair of matched celestial axis types,
and zero or one spectral axis.
"""
cubeface = """
``int`` Index into the ``pixcrd`` (pixel coordinate) array for the
``CUBEFACE`` axis.
This is used for quadcube projections where the cube faces are stored
on a separate axis.
The quadcube projections (``TSC``, ``CSC``, ``QSC``) may be
represented in FITS in either of two ways:
- The six faces may be laid out in one plane and numbered as
follows::
0
4 3 2 1 4 3 2
5
Faces 2, 3 and 4 may appear on one side or the other (or both).
The world-to-pixel routines map faces 2, 3 and 4 to the left but
the pixel-to-world routines accept them on either side.
- The ``COBE`` convention in which the six faces are stored in a
three-dimensional structure using a ``CUBEFACE`` axis indexed
from 0 to 5 as above.
These routines support both methods; `~astropy.wcs.Wcsprm.set`
determines which is being used by the presence or absence of a
``CUBEFACE`` axis in `~astropy.wcs.Wcsprm.ctype`.
`~astropy.wcs.Wcsprm.p2s` and `~astropy.wcs.Wcsprm.s2p` translate the
``CUBEFACE`` axis representation to the single plane representation
understood by the lower-level projection routines.
"""
cunit = """
``list of astropy.UnitBase[naxis]`` List of ``CUNITia`` keyvalues as
`astropy.units.UnitBase` instances.
These define the units of measurement of the ``CRVALia``, ``CDELTia``
and ``CDi_ja`` keywords.
As ``CUNITia`` is an optional header keyword,
`~astropy.wcs.Wcsprm.cunit` may be left blank but otherwise is
expected to contain a standard units specification as defined by WCS
Paper I. `~astropy.wcs.Wcsprm.unitfix` is available to translate
commonly used non-standard units specifications but this must be done
as a separate step before invoking `~astropy.wcs.Wcsprm.set`.
For celestial axes, if `~astropy.wcs.Wcsprm.cunit` is not blank,
`~astropy.wcs.Wcsprm.set` uses ``wcsunits`` to parse it and scale
`~astropy.wcs.Wcsprm.cdelt`, `~astropy.wcs.Wcsprm.crval`, and
`~astropy.wcs.Wcsprm.cd` to decimal degrees. It then resets
`~astropy.wcs.Wcsprm.cunit` to ``"deg"``.
For spectral axes, if `~astropy.wcs.Wcsprm.cunit` is not blank,
`~astropy.wcs.Wcsprm.set` uses ``wcsunits`` to parse it and scale
`~astropy.wcs.Wcsprm.cdelt`, `~astropy.wcs.Wcsprm.crval`, and
`~astropy.wcs.Wcsprm.cd` to SI units. It then resets
`~astropy.wcs.Wcsprm.cunit` accordingly.
`~astropy.wcs.Wcsprm.set` ignores `~astropy.wcs.Wcsprm.cunit` for
other coordinate types; `~astropy.wcs.Wcsprm.cunit` may be used to
label coordinate values.
"""
cylfix = """
cylfix()
Fixes WCS keyvalues for malformed cylindrical projections.
Returns
-------
success : int
Returns ``0`` for success; ``-1`` if no change required.
"""
data = """
``float array`` The array data for the
`~astropy.wcs.DistortionLookupTable`.
"""
data_wtbarr = """
``double array``
The array data for the BINTABLE.
"""
dateavg = """
``string`` Representative mid-point of the date of observation.
In ISO format, ``yyyy-mm-ddThh:mm:ss``.
See also
--------
astropy.wcs.Wcsprm.dateobs
"""
dateobs = """
``string`` Start of the date of observation.
In ISO format, ``yyyy-mm-ddThh:mm:ss``.
See also
--------
astropy.wcs.Wcsprm.dateavg
"""
datfix = """
datfix()
Translates the old ``DATE-OBS`` date format to year-2000 standard form
``(yyyy-mm-ddThh:mm:ss)`` and derives ``MJD-OBS`` from it if not
already set.
Alternatively, if `~astropy.wcs.Wcsprm.mjdobs` is set and
`~astropy.wcs.Wcsprm.dateobs` isn't, then `~astropy.wcs.Wcsprm.datfix`
derives `~astropy.wcs.Wcsprm.dateobs` from it. If both are set but
disagree by more than half a day then `ValueError` is raised.
Returns
-------
success : int
Returns ``0`` for success; ``-1`` if no change required.
"""
delta = """
``double array[M]`` (read-only) Interpolated indices into the coord
array.
Array of interpolated indices into the coordinate array such that
Upsilon_m, as defined in Paper III, is equal to
(`~astropy.wcs.Tabprm.p0` [m] + 1) + delta[m].
"""
det2im = """
Convert detector coordinates to image plane coordinates.
"""
det2im1 = """
A `~astropy.wcs.DistortionLookupTable` object for detector to image plane
correction in the *x*-axis.
"""
det2im2 = """
A `~astropy.wcs.DistortionLookupTable` object for detector to image plane
correction in the *y*-axis.
"""
dims = """
``int array[ndim]`` (read-only)
The dimensions of the tabular array
`~astropy.wcs.Wtbarr.data`.
"""
DistortionLookupTable = """
DistortionLookupTable(*table*, *crpix*, *crval*, *cdelt*)
Represents a single lookup table for a `distortion paper`_
transformation.
Parameters
----------
table : 2-dimensional array
The distortion lookup table.
crpix : 2-tuple
The distortion array reference pixel
crval : 2-tuple
The image array pixel coordinate
cdelt : 2-tuple
The grid step size
"""
equinox = """
``double`` The equinox associated with dynamical equatorial or
ecliptic coordinate systems.
``EQUINOXa`` (or ``EPOCH`` in older headers). Not applicable to ICRS
equatorial or ecliptic coordinates.
An undefined value is represented by NaN.
"""
extlev = """
``int`` (read-only)
``EXTLEV`` identifying the binary table extension.
"""
extnam = """
``str`` (read-only)
``EXTNAME`` identifying the binary table extension.
"""
extrema = """
``double array[K_M]...[K_2][2][M]`` (read-only)
An array recording the minimum and maximum value of each element of
the coordinate vector in each row of the coordinate array, with the
dimensions::
(K_M, ... K_2, 2, M)
(see `~astropy.wcs.Tabprm.K`). The minimum is recorded
in the first element of the compressed K_1 dimension, then the
maximum. This array is used by the inverse table lookup function to
speed up table searches.
"""
extver = """
``int`` (read-only)
``EXTVER`` identifying the binary table extension.
"""
find_all_wcs = """
find_all_wcs(relax=0, keysel=0)
Find all WCS transformations in the header.
Parameters
----------
header : str
The raw FITS header data.
relax : bool or int
Degree of permissiveness:
- `False`: Recognize only FITS keywords defined by the published
WCS standard.
- `True`: Admit all recognized informal extensions of the WCS
standard.
- `int`: a bit field selecting specific extensions to accept. See
:ref:`relaxread` for details.
keysel : sequence of flags
Used to restrict the keyword types considered:
- ``WCSHDR_IMGHEAD``: Image header keywords.
- ``WCSHDR_BIMGARR``: Binary table image array.
- ``WCSHDR_PIXLIST``: Pixel list keywords.
If zero, there is no restriction. If -1, `wcspih` is called,
rather than `wcstbh`.
Returns
-------
wcs_list : list of `~astropy.wcs.Wcsprm` objects
"""
fix = """
fix(translate_units='', naxis=0)
Applies all of the corrections handled separately by
`~astropy.wcs.Wcsprm.datfix`, `~astropy.wcs.Wcsprm.unitfix`,
`~astropy.wcs.Wcsprm.celfix`, `~astropy.wcs.Wcsprm.spcfix`,
`~astropy.wcs.Wcsprm.cylfix` and `~astropy.wcs.Wcsprm.cdfix`.
Parameters
----------
translate_units : str, optional
Specify which potentially unsafe translations of non-standard unit
strings to perform. By default, performs all.
Although ``"S"`` is commonly used to represent seconds, its
translation to ``"s"`` is potentially unsafe since the standard
recognizes ``"S"`` formally as Siemens, however rarely that may be
used. The same applies to ``"H"`` for hours (Henry), and ``"D"``
for days (Debye).
This string controls what to do in such cases, and is
case-insensitive.
- If the string contains ``"s"``, translate ``"S"`` to ``"s"``.
- If the string contains ``"h"``, translate ``"H"`` to ``"h"``.
- If the string contains ``"d"``, translate ``"D"`` to ``"d"``.
Thus ``''`` doesn't do any unsafe translations, whereas ``'shd'``
does all of them.
naxis : int array[naxis], optional
Image axis lengths. If this array is set to zero or ``None``,
then `~astropy.wcs.Wcsprm.cylfix` will not be invoked.
Returns
-------
status : dict
Returns a dictionary containing the following keys, each referring
to a status string for each of the sub-fix functions that were
called:
- `~astropy.wcs.Wcsprm.cdfix`
- `~astropy.wcs.Wcsprm.datfix`
- `~astropy.wcs.Wcsprm.unitfix`
- `~astropy.wcs.Wcsprm.celfix`
- `~astropy.wcs.Wcsprm.spcfix`
- `~astropy.wcs.Wcsprm.cylfix`
"""
get_offset = """
get_offset(x, y) -> (x, y)
Returns the offset as defined in the distortion lookup table.
Returns
-------
coordinate : coordinate pair
The offset from the distortion table for pixel point (*x*, *y*).
"""
get_cdelt = """
get_cdelt() -> double array[naxis]
Coordinate increments (``CDELTia``) for each coord axis.
Returns the ``CDELT`` offsets in read-only form. Unlike the
`~astropy.wcs.Wcsprm.cdelt` property, this works even when the header
specifies the linear transformation matrix in one of the alternative
``CDi_ja`` or ``CROTAia`` forms. This is useful when you want access
to the linear transformation matrix, but don't care how it was
specified in the header.
"""
get_pc = """
get_pc() -> double array[naxis][naxis]
Returns the ``PC`` matrix in read-only form. Unlike the
`~astropy.wcs.Wcsprm.pc` property, this works even when the header
specifies the linear transformation matrix in one of the alternative
``CDi_ja`` or ``CROTAia`` forms. This is useful when you want access
to the linear transformation matrix, but don't care how it was
specified in the header.
"""
get_ps = """
get_ps() -> list of tuples
Returns ``PSi_ma`` keywords for each *i* and *m*.
Returns
-------
ps : list of tuples
Returned as a list of tuples of the form (*i*, *m*, *value*):
- *i*: int. Axis number, as in ``PSi_ma``, (i.e. 1-relative)
- *m*: int. Parameter number, as in ``PSi_ma``, (i.e. 0-relative)
- *value*: string. Parameter value.
See also
--------
astropy.wcs.Wcsprm.set_ps : Set ``PSi_ma`` values
"""
get_pv = """
get_pv() -> list of tuples
Returns ``PVi_ma`` keywords for each *i* and *m*.
Returns
-------
Returned as a list of tuples of the form (*i*, *m*, *value*):
- *i*: int. Axis number, as in ``PVi_ma``, (i.e. 1-relative)
- *m*: int. Parameter number, as in ``PVi_ma``, (i.e. 0-relative)
- *value*: string. Parameter value.
See also
--------
astropy.wcs.Wcsprm.set_pv : Set ``PVi_ma`` values
Notes
-----
Note that, if they were not given, `~astropy.wcs.Wcsprm.set` resets
the entries for ``PVi_1a``, ``PVi_2a``, ``PVi_3a``, and ``PVi_4a`` for
longitude axis *i* to match (``phi_0``, ``theta_0``), the native
longitude and latitude of the reference point given by ``LONPOLEa``
and ``LATPOLEa``.
"""
has_cd = """
has_cd() -> bool
Returns `True` if ``CDi_ja`` is present.
``CDi_ja`` is an alternate specification of the linear transformation
matrix, maintained for historical compatibility.
Matrix elements in the IRAF convention are equivalent to the product
``CDi_ja = CDELTia * PCi_ja``, but the defaults differ from that of
the ``PCi_ja`` matrix. If one or more ``CDi_ja`` keywords are present
then all unspecified ``CDi_ja`` default to zero. If no ``CDi_ja`` (or
``CROTAia``) keywords are present, then the header is assumed to be in
``PCi_ja`` form whether or not any ``PCi_ja`` keywords are present
since this results in an interpretation of ``CDELTia`` consistent with
the original FITS specification.
While ``CDi_ja`` may not formally co-exist with ``PCi_ja``, it may
co-exist with ``CDELTia`` and ``CROTAia`` which are to be ignored.
See also
--------
astropy.wcs.Wcsprm.cd : Get the raw ``CDi_ja`` values.
"""
has_cdi_ja = """
has_cdi_ja() -> bool
Alias for `~astropy.wcs.Wcsprm.has_cd`. Maintained for backward
compatibility.
"""
has_crota = """
has_crota() -> bool
Returns `True` if ``CROTAia`` is present.
``CROTAia`` is an alternate specification of the linear transformation
matrix, maintained for historical compatibility.
In the AIPS convention, ``CROTAia`` may only be associated with the
latitude axis of a celestial axis pair. It specifies a rotation in
the image plane that is applied *after* the ``CDELTia``; any other
``CROTAia`` keywords are ignored.
``CROTAia`` may not formally co-exist with ``PCi_ja``. ``CROTAia`` and
``CDELTia`` may formally co-exist with ``CDi_ja`` but if so are to be
ignored.
See also
--------
astropy.wcs.Wcsprm.crota : Get the raw ``CROTAia`` values
"""
has_crotaia = """
has_crotaia() -> bool
Alias for `~astropy.wcs.Wcsprm.has_crota`. Maintained for backward
compatibility.
"""
has_pc = """
has_pc() -> bool
Returns `True` if ``PCi_ja`` is present. ``PCi_ja`` is the
recommended way to specify the linear transformation matrix.
See also
--------
astropy.wcs.Wcsprm.pc : Get the raw ``PCi_ja`` values
"""
has_pci_ja = """
has_pci_ja() -> bool
Alias for `~astropy.wcs.Wcsprm.has_pc`. Maintained for backward
compatibility.
"""
i = """
``int`` (read-only)
Image axis number.
"""
imgpix_matrix = """
``double array[2][2]`` (read-only) Inverse of the ``CDELT`` or ``PC``
matrix.
Inverse containing the product of the ``CDELTia`` diagonal matrix and
the ``PCi_ja`` matrix.
"""
is_unity = """
is_unity() -> bool
Returns `True` if the linear transformation matrix
(`~astropy.wcs.Wcsprm.cd`) is unity.
"""
K = """
``int array[M]`` (read-only) The lengths of the axes of the coordinate
array.
An array of length `M` whose elements record the lengths of the axes of
the coordinate array and of each indexing vector.
"""
kind = """
``str`` (read-only)
Character identifying the wcstab array type:
- ``'c'``: coordinate array,
- ``'i'``: index vector.
"""
lat = """
``int`` (read-only) The index into the world coord array containing
latitude values.
"""
latpole = """
``double`` The native latitude of the celestial pole, ``LATPOLEa`` (deg).
"""
lattyp = """
``string`` (read-only) Celestial axis type for latitude.
For example, "RA", "DEC", "GLON", "GLAT", etc. extracted from "RA--",
"DEC-", "GLON", "GLAT", etc. in the first four characters of
``CTYPEia`` but with trailing dashes removed.
"""
lng = """
``int`` (read-only) The index into the world coord array containing
longitude values.
"""
lngtyp = """
``string`` (read-only) Celestial axis type for longitude.
For example, "RA", "DEC", "GLON", "GLAT", etc. extracted from "RA--",
"DEC-", "GLON", "GLAT", etc. in the first four characters of
``CTYPEia`` but with trailing dashes removed.
"""
lonpole = """
``double`` The native longitude of the celestial pole.
``LONPOLEa`` (deg).
"""
M = """
``int`` (read-only) Number of tabular coordinate axes.
"""
m = """
``int`` (read-only)
Array axis number for index vectors.
"""
map = """
``int array[M]`` Association between axes.
A vector of length `~astropy.wcs.Tabprm.M` that defines
the association between axis *m* in the *M*-dimensional coordinate
array (1 <= *m* <= *M*) and the indices of the intermediate world
coordinate and world coordinate arrays.
When the intermediate and world coordinate arrays contain the full
complement of coordinate elements in image-order, as will usually be
the case, then ``map[m-1] == i-1`` for axis *i* in the *N*-dimensional
image (1 <= *i* <= *N*). In terms of the FITS keywords::
map[PVi_3a - 1] == i - 1.
However, a different association may result if the intermediate
coordinates, for example, only contains a (relevant) subset of
intermediate world coordinate elements. For example, if *M* == 1 for
an image with *N* > 1, it is possible to fill the intermediate
coordinates with the relevant coordinate element with ``nelem`` set to
1. In this case ``map[0] = 0`` regardless of the value of *i*.
"""
mix = """
mix(mixpix, mixcel, vspan, vstep, viter, world, pixcrd, origin)
Given either the celestial longitude or latitude plus an element of
the pixel coordinate, solves for the remaining elements by iterating
on the unknown celestial coordinate element using
`~astropy.wcs.Wcsprm.s2p`.
Parameters
----------
mixpix : int
Which element on the pixel coordinate is given.
mixcel : int
Which element of the celestial coordinate is given. If *mixcel* =
``1``, celestial longitude is given in ``world[self.lng]``,
latitude returned in ``world[self.lat]``. If *mixcel* = ``2``,
celestial latitude is given in ``world[self.lat]``, longitude
returned in ``world[self.lng]``.
vspan : pair of floats
Solution interval for the celestial coordinate, in degrees. The
ordering of the two limits is irrelevant. Longitude ranges may be
specified with any convenient normalization, for example
``(-120,+120)`` is the same as ``(240,480)``, except that the
solution will be returned with the same normalization, i.e. lie
within the interval specified.
vstep : float
Step size for solution search, in degrees. If ``0``, a sensible,
although perhaps non-optimal default will be used.
viter : int
If a solution is not found then the step size will be halved and
the search recommenced. *viter* controls how many times the step
size is halved. The allowed range is 5 - 10.
world : double array[naxis]
World coordinate elements. ``world[self.lng]`` and
``world[self.lat]`` are the celestial longitude and latitude, in
degrees. Which is given and which returned depends on the value
of *mixcel*. All other elements are given. The results will be
written to this array in-place.
pixcrd : double array[naxis].
Pixel coordinates. The element indicated by *mixpix* is given and
the remaining elements will be written in-place.
{0}
Returns
-------
result : dict
Returns a dictionary with the following keys:
- *phi* (double array[naxis])
- *theta* (double array[naxis])
- Longitude and latitude in the native coordinate system of
the projection, in degrees.
- *imgcrd* (double array[naxis])
- Image coordinate elements. ``imgcrd[self.lng]`` and
``imgcrd[self.lat]`` are the projected *x*- and
*y*-coordinates, in decimal degrees.
- *world* (double array[naxis])
- Another reference to the *world* argument passed in.
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
InvalidCoordinateError
Invalid world coordinate.
NoSolutionError
No solution found in the specified interval.
See also
--------
astropy.wcs.Wcsprm.lat, astropy.wcs.Wcsprm.lng
Get the axes numbers for latitude and longitude
Notes
-----
Initially, the specified solution interval is checked to see if it's a
\"crossing\" interval. If it isn't, a search is made for a crossing
solution by iterating on the unknown celestial coordinate starting at
the upper limit of the solution interval and decrementing by the
specified step size. A crossing is indicated if the trial value of
the pixel coordinate steps through the value specified. If a crossing
interval is found then the solution is determined by a modified form
of \"regula falsi\" division of the crossing interval. If no crossing
interval was found within the specified solution interval then a
search is made for a \"non-crossing\" solution as may arise from a
point of tangency. The process is complicated by having to make
allowance for the discontinuities that occur in all map projections.
Once one solution has been determined others may be found by
subsequent invocations of `~astropy.wcs.Wcsprm.mix` with suitably
restricted solution intervals.
Note the circumstance that arises when the solution point lies at a
native pole of a projection in which the pole is represented as a
finite curve, for example the zenithals and conics. In such cases two
or more valid solutions may exist but `~astropy.wcs.Wcsprm.mix` only
ever returns one.
Because of its generality, `~astropy.wcs.Wcsprm.mix` is very
compute-intensive. For compute-limited applications, more efficient
special-case solvers could be written for simple projections, for
example non-oblique cylindrical projections.
""".format(ORIGIN())
mjdavg = """
``double`` Modified Julian Date corresponding to ``DATE-AVG``.
``(MJD = JD - 2400000.5)``.
An undefined value is represented by NaN.
See also
--------
astropy.wcs.Wcsprm.mjdobs
"""
mjdobs = """
``double`` Modified Julian Date corresponding to ``DATE-OBS``.
``(MJD = JD - 2400000.5)``.
An undefined value is represented by NaN.
See also
--------
astropy.wcs.Wcsprm.mjdavg
"""
name = """
``string`` The name given to the coordinate representation
``WCSNAMEa``.
"""
naxis = """
``int`` (read-only) The number of axes (pixel and coordinate).
Given by the ``NAXIS`` or ``WCSAXESa`` keyvalues.
The number of coordinate axes is determined at parsing time, and can
not be subsequently changed.
It is determined from the highest of the following:
1. ``NAXIS``
2. ``WCSAXESa``
3. The highest axis number in any parameterized WCS keyword. The
keyvalue, as well as the keyword, must be syntactically valid
otherwise it will not be considered.
If none of these keyword types is present, i.e. if the header only
contains auxiliary WCS keywords for a particular coordinate
representation, then no coordinate description is constructed for it.
This value may differ for different coordinate representations of the
same image.
"""
nc = """
``int`` (read-only) Total number of coord vectors in the coord array.
Total number of coordinate vectors in the coordinate array being the
product K_1 * K_2 * ... * K_M.
"""
ndim = """
``int`` (read-only)
Expected dimensionality of the wcstab array.
"""
obsgeo = """
``double array[3]`` Location of the observer in a standard terrestrial
reference frame.
``OBSGEO-X``, ``OBSGEO-Y``, ``OBSGEO-Z`` (in meters).
An undefined value is represented by NaN.
"""
p0 = """
``int array[M]`` Interpolated indices into the coordinate array.
Vector of length `~astropy.wcs.Tabprm.M` of interpolated
indices into the coordinate array such that Upsilon_m, as defined in
Paper III, is equal to ``(p0[m] + 1) + delta[m]``.
"""
p2s = """
p2s(pixcrd, origin)
Converts pixel to world coordinates.
Parameters
----------
pixcrd : double array[ncoord][nelem]
Array of pixel coordinates.
{0}
Returns
-------
result : dict
Returns a dictionary with the following keys:
- *imgcrd*: double array[ncoord][nelem]
- Array of intermediate world coordinates. For celestial axes,
``imgcrd[][self.lng]`` and ``imgcrd[][self.lat]`` are the
projected *x*-, and *y*-coordinates, in pseudo degrees. For
spectral axes, ``imgcrd[][self.spec]`` is the intermediate
spectral coordinate, in SI units.
- *phi*: double array[ncoord]
- *theta*: double array[ncoord]
- Longitude and latitude in the native coordinate system of the
projection, in degrees.
- *world*: double array[ncoord][nelem]
- Array of world coordinates. For celestial axes,
``world[][self.lng]`` and ``world[][self.lat]`` are the
celestial longitude and latitude, in degrees. For spectral
axes, ``world[][self.spec]`` is the intermediate spectral
coordinate, in SI units.
- *stat*: int array[ncoord]
- Status return value for each coordinate. ``0`` for success,
``1+`` for invalid pixel coordinate.
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
*x*- and *y*-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
See also
--------
astropy.wcs.Wcsprm.lat, astropy.wcs.Wcsprm.lng
Definition of the latitude and longitude axes
""".format(ORIGIN())
p4_pix2foc = """
p4_pix2foc(*pixcrd, origin*) -> double array[ncoord][nelem]
Convert pixel coordinates to focal plane coordinates using `distortion
paper`_ lookup-table correction.
Parameters
----------
pixcrd : double array[ncoord][nelem].
Array of pixel coordinates.
{0}
Returns
-------
foccrd : double array[ncoord][nelem]
Returns an array of focal plane coordinates.
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
""".format(ORIGIN())
pc = """
``double array[naxis][naxis]`` The ``PCi_ja`` (pixel coordinate)
transformation matrix.
The order is::
[[PC1_1, PC1_2],
[PC2_1, PC2_2]]
For historical compatibility, three alternate specifications of the
linear transformations are available in wcslib. The canonical
``PCi_ja`` with ``CDELTia``, ``CDi_ja``, and the deprecated
``CROTAia`` keywords. Although the latter may not formally co-exist
with ``PCi_ja``, the approach here is simply to ignore them if given
in conjunction with ``PCi_ja``.
`~astropy.wcs.Wcsprm.has_pc`, `~astropy.wcs.Wcsprm.has_cd` and
`~astropy.wcs.Wcsprm.has_crota` can be used to determine which of
these alternatives are present in the header.
These alternate specifications of the linear transformation matrix are
translated immediately to ``PCi_ja`` by `~astropy.wcs.Wcsprm.set` and
are nowhere visible to the lower-level routines. In particular,
`~astropy.wcs.Wcsprm.set` resets `~astropy.wcs.Wcsprm.cdelt` to unity
if ``CDi_ja`` is present (and no ``PCi_ja``). If no ``CROTAia`` is
associated with the latitude axis, `~astropy.wcs.Wcsprm.set` reverts
to a unity ``PCi_ja`` matrix.
"""
phi0 = """
``double`` The native latitude of the fiducial point.
The point whose celestial coordinates are given in ``ref[1:2]``. If
undefined (NaN) the initialization routine, `~astropy.wcs.Wcsprm.set`,
will set this to a projection-specific default.
See also
--------
astropy.wcs.Wcsprm.theta0
"""
pix2foc = """
pix2foc(*pixcrd, origin*) -> double array[ncoord][nelem]
Perform both `SIP`_ polynomial and `distortion paper`_ lookup-table
correction in parallel.
Parameters
----------
pixcrd : double array[ncoord][nelem]
Array of pixel coordinates.
{0}
Returns
-------
foccrd : double array[ncoord][nelem]
Returns an array of focal plane coordinates.
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
""".format(ORIGIN())
piximg_matrix = """
``double array[2][2]`` (read-only) Matrix containing the product of
the ``CDELTia`` diagonal matrix and the ``PCi_ja`` matrix.
"""
print_contents = """
print_contents()
Print the contents of the `~astropy.wcs.Wcsprm` object to stdout.
Probably only useful for debugging purposes, and may be removed in the
future.
To get a string of the contents, use `repr`.
"""
print_contents_tabprm = """
print_contents()
Print the contents of the `~astropy.wcs.Tabprm` object to
stdout. Probably only useful for debugging purposes, and may be
removed in the future.
To get a string of the contents, use `repr`.
"""
radesys = """
``string`` The equatorial or ecliptic coordinate system type,
``RADESYSa``.
"""
restfrq = """
``double`` Rest frequency (Hz) from ``RESTFRQa``.
An undefined value is represented by NaN.
"""
restwav = """
``double`` Rest wavelength (m) from ``RESTWAVa``.
An undefined value is represented by NaN.
"""
row = """
``int`` (read-only)
Table row number.
"""
s2p = """
s2p(world, origin)
Transforms world coordinates to pixel coordinates.
Parameters
----------
world : double array[ncoord][nelem]
Array of world coordinates, in decimal degrees.
{0}
Returns
-------
result : dict
Returns a dictionary with the following keys:
- *phi*: double array[ncoord]
- *theta*: double array[ncoord]
- Longitude and latitude in the native coordinate system of
the projection, in degrees.
- *imgcrd*: double array[ncoord][nelem]
- Array of intermediate world coordinates. For celestial axes,
``imgcrd[][self.lng]`` and ``imgcrd[][self.lat]`` are the
projected *x*-, and *y*-coordinates, in pseudo \"degrees\".
For quadcube projections with a ``CUBEFACE`` axis, the face
number is also returned in ``imgcrd[][self.cubeface]``. For
spectral axes, ``imgcrd[][self.spec]`` is the intermediate
spectral coordinate, in SI units.
- *pixcrd*: double array[ncoord][nelem]
- Array of pixel coordinates. Pixel coordinates are
zero-based.
- *stat*: int array[ncoord]
- Status return value for each coordinate. ``0`` for success,
``1+`` for invalid pixel coordinate.
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
See also
--------
astropy.wcs.Wcsprm.lat, astropy.wcs.Wcsprm.lng
Definition of the latitude and longitude axes
""".format(ORIGIN())
sense = """
``int array[M]`` +1 if monotonically increasing, -1 if decreasing.
A vector of length `~astropy.wcs.Tabprm.M` whose elements
indicate whether the corresponding indexing vector is monotonically
increasing (+1), or decreasing (-1).
"""
set = """
set()
Sets up a WCS object for use according to information supplied within
it.
Note that this routine need not be called directly; it will be invoked
by `~astropy.wcs.Wcsprm.p2s` and `~astropy.wcs.Wcsprm.s2p` if
necessary.
Some attributes that are based on other attributes (such as
`~astropy.wcs.Wcsprm.lattyp` on `~astropy.wcs.Wcsprm.ctype`) may not
be correct until after `~astropy.wcs.Wcsprm.set` is called.
`~astropy.wcs.Wcsprm.set` strips off trailing blanks in all string
members.
`~astropy.wcs.Wcsprm.set` recognizes the ``NCP`` projection and
converts it to the equivalent ``SIN`` projection and it also
recognizes ``GLS`` as a synonym for ``SFL``. It does alias
translation for the AIPS spectral types (``FREQ-LSR``, ``FELO-HEL``,
etc.) but without changing the input header keywords.
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
"""
set_tabprm = """
set()
Allocates memory for work arrays.
Also sets up the class according to information supplied within it.
Note that this routine need not be called directly; it will be invoked
by functions that need it.
Raises
------
MemoryError
Memory allocation failed.
InvalidTabularParameters
Invalid tabular parameters.
"""
set_ps = """
set_ps(ps)
Sets ``PSi_ma`` keywords for each *i* and *m*.
Parameters
----------
ps : sequence of tuples
The input must be a sequence of tuples of the form (*i*, *m*,
*value*):
- *i*: int. Axis number, as in ``PSi_ma``, (i.e. 1-relative)
- *m*: int. Parameter number, as in ``PSi_ma``, (i.e. 0-relative)
- *value*: string. Parameter value.
See also
--------
astropy.wcs.Wcsprm.get_ps
"""
set_pv = """
set_pv(pv)
Sets ``PVi_ma`` keywords for each *i* and *m*.
Parameters
----------
pv : list of tuples
The input must be a sequence of tuples of the form (*i*, *m*,
*value*):
- *i*: int. Axis number, as in ``PVi_ma``, (i.e. 1-relative)
- *m*: int. Parameter number, as in ``PVi_ma``, (i.e. 0-relative)
- *value*: float. Parameter value.
See also
--------
astropy.wcs.Wcsprm.get_pv
"""
sip = """
Get/set the `~astropy.wcs.Sip` object for performing `SIP`_ distortion
correction.
"""
Sip = """
Sip(*a, b, ap, bp, crpix*)
The `~astropy.wcs.Sip` class performs polynomial distortion correction
using the `SIP`_ convention in both directions.
Parameters
----------
a : double array[m+1][m+1]
The ``A_i_j`` polynomial for pixel to focal plane transformation.
Its size must be (*m* + 1, *m* + 1) where *m* = ``A_ORDER``.
b : double array[m+1][m+1]
The ``B_i_j`` polynomial for pixel to focal plane transformation.
Its size must be (*m* + 1, *m* + 1) where *m* = ``B_ORDER``.
ap : double array[m+1][m+1]
The ``AP_i_j`` polynomial for pixel to focal plane transformation.
Its size must be (*m* + 1, *m* + 1) where *m* = ``AP_ORDER``.
bp : double array[m+1][m+1]
The ``BP_i_j`` polynomial for pixel to focal plane transformation.
Its size must be (*m* + 1, *m* + 1) where *m* = ``BP_ORDER``.
crpix : double array[2]
The reference pixel.
Notes
-----
Shupe, D. L., M. Moshir, J. Li, D. Makovoz and R. Narron. 2005.
"The SIP Convention for Representing Distortion in FITS Image
Headers." ADASS XIV.
"""
sip_foc2pix = """
sip_foc2pix(*foccrd, origin*) -> double array[ncoord][nelem]
Convert focal plane coordinates to pixel coordinates using the `SIP`_
polynomial distortion convention.
Parameters
----------
foccrd : double array[ncoord][nelem]
Array of focal plane coordinates.
{0}
Returns
-------
pixcrd : double array[ncoord][nelem]
Returns an array of pixel coordinates.
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
""".format(ORIGIN())
sip_pix2foc = """
sip_pix2foc(*pixcrd, origin*) -> double array[ncoord][nelem]
Convert pixel coordinates to focal plane coordinates using the `SIP`_
polynomial distortion convention.
Parameters
----------
pixcrd : double array[ncoord][nelem]
Array of pixel coordinates.
{0}
Returns
-------
foccrd : double array[ncoord][nelem]
Returns an array of focal plane coordinates.
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
""".format(ORIGIN())
spcfix = """
spcfix() -> int
Translates AIPS-convention spectral coordinate types. {``FREQ``,
``VELO``, ``FELO``}-{``OBS``, ``HEL``, ``LSR``} (e.g. ``FREQ-LSR``,
``VELO-OBS``, ``FELO-HEL``)
Returns
-------
success : int
Returns ``0`` for success; ``-1`` if no change required.
"""
spec = """
``int`` (read-only) The index containing the spectral axis values.
"""
specsys = """
``string`` Spectral reference frame (standard of rest), ``SPECSYSa``.
See also
--------
astropy.wcs.Wcsprm.ssysobs, astropy.wcs.Wcsprm.velosys
"""
sptr = """
sptr(ctype, i=-1)
Translates the spectral axis in a WCS object.
For example, a ``FREQ`` axis may be translated into ``ZOPT-F2W`` and
vice versa.
Parameters
----------
ctype : str
Required spectral ``CTYPEia``, maximum of 8 characters. The first
four characters are required to be given and are never modified.
The remaining four, the algorithm code, are completely determined
by, and must be consistent with, the first four characters.
Wildcarding may be used, i.e. if the final three characters are
specified as ``\"???\"``, or if just the eighth character is
specified as ``\"?\"``, the correct algorithm code will be
substituted and returned.
i : int
Index of the spectral axis (0-relative). If ``i < 0`` (or not
provided), it will be set to the first spectral axis identified
from the ``CTYPE`` keyvalues in the FITS header.
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
InvalidSubimageSpecificationError
Invalid subimage specification (no spectral axis).
"""
ssysobs = """
``string`` Spectral reference frame.
The spectral reference frame in which there is no differential
variation in the spectral coordinate across the field-of-view,
``SSYSOBSa``.
See also
--------
astropy.wcs.Wcsprm.specsys, astropy.wcs.Wcsprm.velosys
"""
ssyssrc = """
``string`` Spectral reference frame for redshift.
The spectral reference frame (standard of rest) in which the redshift
was measured, ``SSYSSRCa``.
"""
sub = """
sub(axes)
Extracts the coordinate description for a subimage from a
`~astropy.wcs.WCS` object.
The world coordinate system of the subimage must be separable in the
sense that the world coordinates at any point in the subimage must
depend only on the pixel coordinates of the axes extracted. In
practice, this means that the ``PCi_ja`` matrix of the original image
must not contain non-zero off-diagonal terms that associate any of the
subimage axes with any of the non-subimage axes.
`sub` can also add axes to a wcsprm object. The new axes will be
created using the defaults set by the Wcsprm constructor which produce
a simple, unnamed, linear axis with world coordinates equal to the
pixel coordinate. These default values can be changed before
invoking `set`.
Parameters
----------
axes : int or a sequence.
- If an int, include the first *N* axes in their original order.
- If a sequence, may contain a combination of image axis numbers
(1-relative) or special axis identifiers (see below). Order is
significant; ``axes[0]`` is the axis number of the input image
that corresponds to the first axis in the subimage, etc. Use an
axis number of 0 to create a new axis using the defaults.
- If ``0``, ``[]`` or ``None``, do a deep copy.
Coordinate axes types may be specified using either strings or
special integer constants. The available types are:
- ``'longitude'`` / ``WCSSUB_LONGITUDE``: Celestial longitude
- ``'latitude'`` / ``WCSSUB_LATITUDE``: Celestial latitude
- ``'cubeface'`` / ``WCSSUB_CUBEFACE``: Quadcube ``CUBEFACE`` axis
- ``'spectral'`` / ``WCSSUB_SPECTRAL``: Spectral axis
- ``'stokes'`` / ``WCSSUB_STOKES``: Stokes axis
- ``'celestial'`` / ``WCSSUB_CELESTIAL``: An alias for the
combination of ``'longitude'``, ``'latitude'`` and ``'cubeface'``.
Returns
-------
new_wcs : `~astropy.wcs.WCS` object
Raises
------
MemoryError
Memory allocation failed.
InvalidSubimageSpecificationError
Invalid subimage specification (no spectral axis).
NonseparableSubimageCoordinateSystem
Non-separable subimage coordinate system.
Notes
-----
Combinations of subimage axes of particular types may be extracted in
the same order as they occur in the input image by combining the
integer constants with the 'binary or' (``|``) operator. For
example::
wcs.sub([WCSSUB_LONGITUDE | WCSSUB_LATITUDE | WCSSUB_SPECTRAL])
would extract the longitude, latitude, and spectral axes in the same
order as the input image. If one of each were present, the resulting
object would have three dimensions.
For convenience, ``WCSSUB_CELESTIAL`` is defined as the combination
``WCSSUB_LONGITUDE | WCSSUB_LATITUDE | WCSSUB_CUBEFACE``.
The codes may also be negated to extract all but the types specified,
for example::
wcs.sub([
WCSSUB_LONGITUDE,
WCSSUB_LATITUDE,
WCSSUB_CUBEFACE,
-(WCSSUB_SPECTRAL | WCSSUB_STOKES)])
The last of these specifies all axis types other than spectral or
Stokes. Extraction is done in the order specified by ``axes``, i.e. a
longitude axis (if present) would be extracted first (via ``axes[0]``)
and not subsequently (via ``axes[3]``). Likewise for the latitude and
cubeface axes in this example.
The number of dimensions in the returned object may be less than or
greater than the length of ``axes``. However, it will never exceed the
number of axes in the input image.
"""
tab = """
``list of Tabprm`` Tabular coordinate objects.
A list of tabular coordinate objects associated with this WCS.
"""
Tabprm = """
A class to store the information related to tabular coordinates,
i.e., coordinates that are defined via a lookup table.
This class can not be constructed directly from Python, but instead is
returned from `~astropy.wcs.Wcsprm.tab`.
"""
theta0 = """
``double`` The native longitude of the fiducial point.
The point whose celestial coordinates are given in ``ref[1:2]``. If
undefined (NaN) the initialization routine, `~astropy.wcs.Wcsprm.set`,
will set this to a projection-specific default.
See also
--------
astropy.wcs.Wcsprm.phi0
"""
to_header = """
to_header(relax=False)
`to_header` translates a WCS object into a FITS header.
The details of the header depends on context:
- If the `~astropy.wcs.Wcsprm.colnum` member is non-zero then a
binary table image array header will be produced.
- Otherwise, if the `~astropy.wcs.Wcsprm.colax` member is set
non-zero then a pixel list header will be produced.
- Otherwise, a primary image or image extension header will be
produced.
The output header will almost certainly differ from the input in a
number of respects:
1. The output header only contains WCS-related keywords. In
particular, it does not contain syntactically-required keywords
such as ``SIMPLE``, ``NAXIS``, ``BITPIX``, or ``END``.
2. Deprecated (e.g. ``CROTAn``) or non-standard usage will be
translated to standard (this is partially dependent on whether
``fix`` was applied).
3. Quantities will be converted to the units used internally,
basically SI with the addition of degrees.
4. Floating-point quantities may be given to a different decimal
precision.
5. Elements of the ``PCi_j`` matrix will be written if and only if
they differ from the unit matrix. Thus, if the matrix is unity
then no elements will be written.
6. Additional keywords such as ``WCSAXES``, ``CUNITia``,
``LONPOLEa`` and ``LATPOLEa`` may appear.
7. The original keycomments will be lost, although
`~astropy.wcs.Wcsprm.to_header` tries hard to write meaningful
comments.
8. Keyword order may be changed.
Keywords can be translated between the image array, binary table, and
pixel lists forms by manipulating the `~astropy.wcs.Wcsprm.colnum` or
`~astropy.wcs.Wcsprm.colax` members of the `~astropy.wcs.WCS`
object.
Parameters
----------
relax : bool or int
Degree of permissiveness:
- `False`: Recognize only FITS keywords defined by the published
WCS standard.
- `True`: Admit all recognized informal extensions of the WCS
standard.
- `int`: a bit field selecting specific extensions to write.
See :ref:`relaxwrite` for details.
Returns
-------
header : str
Raw FITS header as a string.
"""
ttype = """
``str`` (read-only)
``TTYPEn`` identifying the column of the binary table that contains
the wcstab array.
"""
unitfix = """
unitfix(translate_units='')
Translates non-standard ``CUNITia`` keyvalues.
For example, ``DEG`` -> ``deg``, also stripping off unnecessary
whitespace.
Parameters
----------
translate_units : str, optional
Do potentially unsafe translations of non-standard unit strings.
Although ``\"S\"`` is commonly used to represent seconds, its
recognizes ``\"S\"`` formally as Siemens, however rarely that may
be translation to ``\"s\"`` is potentially unsafe since the
standard used. The same applies to ``\"H\"`` for hours (Henry),
and ``\"D\"`` for days (Debye).
This string controls what to do in such cases, and is
case-insensitive.
- If the string contains ``\"s\"``, translate ``\"S\"`` to ``\"s\"``.
- If the string contains ``\"h\"``, translate ``\"H\"`` to ``\"h\"``.
- If the string contains ``\"d\"``, translate ``\"D\"`` to ``\"d\"``.
Thus ``''`` doesn't do any unsafe translations, whereas ``'shd'``
does all of them.
Returns
-------
success : int
Returns ``0`` for success; ``-1`` if no change required.
"""
velangl = """
``double`` Velocity angle.
The angle in degrees that should be used to decompose an observed
velocity into radial and transverse components.
An undefined value is represented by NaN.
"""
velosys = """
``double`` Relative radial velocity.
The relative radial velocity (m/s) between the observer and the
selected standard of rest in the direction of the celestial reference
coordinate, ``VELOSYSa``.
An undefined value is represented by NaN.
See also
--------
astropy.wcs.Wcsprm.specsys, astropy.wcs.Wcsprm.ssysobs
"""
velref = """
``int`` AIPS velocity code.
From ``VELREF`` keyword.
"""
wcs = """
A `~astropy.wcs.Wcsprm` object to perform the basic `wcslib`_ WCS
transformation.
"""
Wcs = """
Wcs(*sip, cpdis, wcsprm, det2im*)
Wcs objects amalgamate basic WCS (as provided by `wcslib`_), with
`SIP`_ and `distortion paper`_ operations.
To perform all distortion corrections and WCS transformation, use
``all_pix2world``.
Parameters
----------
sip : `~astropy.wcs.Sip` object or `None`
cpdis : A pair of `~astropy.wcs.DistortionLookupTable` objects, or
``(None, None)``.
wcsprm : `~astropy.wcs.Wcsprm` object
det2im : A pair of `~astropy.wcs.DistortionLookupTable` objects, or
``(None, None)``.
"""
Wcsprm = """
Wcsprm(header=None, key=' ', relax=False, naxis=2, keysel=0, colsel=None)
`~astropy.wcs.Wcsprm` performs the core WCS transformations.
.. note::
The members of this object correspond roughly to the key/value
pairs in the FITS header. However, they are adjusted and
normalized in a number of ways that make performing the WCS
transformation easier. Therefore, they can not be relied upon to
get the original values in the header. For that, use
`astropy.io.fits.Header` directly.
The FITS header parsing enforces correct FITS "keyword = value" syntax
with regard to the equals sign occurring in columns 9 and 10.
However, it does recognize free-format character (NOST 100-2.0,
Sect. 5.2.1), integer (Sect. 5.2.3), and floating-point values
(Sect. 5.2.4) for all keywords.
Parameters
----------
header : An `astropy.io.fits.Header`, string, or `None`.
If ``None``, the object will be initialized to default values.
key : str, optional
The key referring to a particular WCS transform in the header.
This may be either ``' '`` or ``'A'``-``'Z'`` and corresponds to
the ``\"a\"`` part of ``\"CTYPEia\"``. (*key* may only be
provided if *header* is also provided.)
relax : bool or int, optional
Degree of permissiveness:
- `False`: Recognize only FITS keywords defined by the published
WCS standard.
- `True`: Admit all recognized informal extensions of the WCS
standard.
- `int`: a bit field selecting specific extensions to accept. See
:ref:`relaxread` for details.
naxis : int, optional
The number of world coordinates axes for the object. (*naxis* may
only be provided if *header* is `None`.)
keysel : sequence of flag bits, optional
Vector of flag bits that may be used to restrict the keyword types
considered:
- ``WCSHDR_IMGHEAD``: Image header keywords.
- ``WCSHDR_BIMGARR``: Binary table image array.
- ``WCSHDR_PIXLIST``: Pixel list keywords.
If zero, there is no restriction. If -1, the underlying wcslib
function ``wcspih()`` is called, rather than ``wcstbh()``.
colsel : sequence of int
A sequence of table column numbers used to restrict the keywords
considered. `None` indicates no restriction.
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid key.
KeyError
Key not found in FITS header.
"""
Wtbarr = """
Classes to construct coordinate lookup tables from a binary table
extension (BINTABLE).
This class can not be constructed directly from Python, but instead is
returned from `~astropy.wcs.Wcsprm.wtb`.
"""
zsource = """
``double`` The redshift, ``ZSOURCEa``, of the source.
An undefined value is represented by NaN.
"""
WcsError = """
Base class of all invalid WCS errors.
"""
SingularMatrix = """
SingularMatrixError()
The linear transformation matrix is singular.
"""
InconsistentAxisTypes = """
InconsistentAxisTypesError()
The WCS header inconsistent or unrecognized coordinate axis type(s).
"""
InvalidTransform = """
InvalidTransformError()
The WCS transformation is invalid, or the transformation parameters
are invalid.
"""
InvalidCoordinate = """
InvalidCoordinateError()
One or more of the world coordinates is invalid.
"""
NoSolution = """
NoSolutionError()
No solution can be found in the given interval.
"""
InvalidSubimageSpecification = """
InvalidSubimageSpecificationError()
The subimage specification is invalid.
"""
NonseparableSubimageCoordinateSystem = """
NonseparableSubimageCoordinateSystemError()
Non-separable subimage coordinate system.
"""
NoWcsKeywordsFound = """
NoWcsKeywordsFoundError()
No WCS keywords were found in the given header.
"""
InvalidTabularParameters = """
InvalidTabularParametersError()
The given tabular parameters are invalid.
"""
mjdbeg = """
``double`` Modified Julian Date corresponding to ``DATE-BEG``.
``(MJD = JD - 2400000.5)``.
An undefined value is represented by NaN.
See also
--------
astropy.wcs.Wcsprm.mjdbeg
"""
mjdend = """
``double`` Modified Julian Date corresponding to ``DATE-END``.
``(MJD = JD - 2400000.5)``.
An undefined value is represented by NaN.
See also
--------
astropy.wcs.Wcsprm.mjdend
"""
mjdref = """
``double`` Modified Julian Date corresponding to ``DATE-REF``.
``(MJD = JD - 2400000.5)``.
An undefined value is represented by NaN.
See also
--------
astropy.wcs.Wcsprm.dateref
"""
bepoch = """
``double`` Equivalent to ``DATE-OBS``.
Expressed as a Besselian epoch.
See also
--------
astropy.wcs.Wcsprm.dateobs
"""
jepoch = """
``double`` Equivalent to ``DATE-OBS``.
Expressed as a Julian epoch.
See also
--------
astropy.wcs.Wcsprm.dateobs
"""
datebeg = """
``string`` Date at the start of the observation.
In ISO format, ``yyyy-mm-ddThh:mm:ss``.
See also
--------
astropy.wcs.Wcsprm.datebeg
"""
dateend = """
``string`` Date at the end of the observation.
In ISO format, ``yyyy-mm-ddThh:mm:ss``.
See also
--------
astropy.wcs.Wcsprm.dateend
"""
dateref = """
``string`` Date of a reference epoch relative to which
other time measurements refer.
See also
--------
astropy.wcs.Wcsprm.dateref
"""
timesys = """
``string`` Time scale (UTC, TAI, etc.) in which all other time-related
auxiliary header values are recorded. Also defines the time scale for
an image axis with CTYPEia set to 'TIME'.
See also
--------
astropy.wcs.Wcsprm.timesys
"""
trefpos = """
``string`` Location in space where the recorded time is valid.
See also
--------
astropy.wcs.Wcsprm.trefpos
"""
trefdir = """
``string`` Reference direction used in calculating a pathlength delay.
See also
--------
astropy.wcs.Wcsprm.trefdir
"""
timeunit = """
``string`` Time units in which the following header values are expressed:
``TSTART``, ``TSTOP``, ``TIMEOFFS``, ``TIMSYER``, ``TIMRDER``, ``TIMEDEL``.
It also provides the default value for ``CUNITia`` for time axes.
See also
--------
astropy.wcs.Wcsprm.trefdir
"""
plephem = """
``string`` The Solar System ephemeris used for calculating a pathlength delay.
See also
--------
astropy.wcs.Wcsprm.plephem
"""
tstart = """
``double`` equivalent to DATE-BEG expressed as a time in units of TIMEUNIT relative to DATEREF+TIMEOFFS.
See also
--------
astropy.wcs.Wcsprm.tstop
"""
tstop = """
``double`` equivalent to DATE-END expressed as a time in units of TIMEUNIT relative to DATEREF+TIMEOFFS.
See also
--------
astropy.wcs.Wcsprm.tstart
"""
telapse = """
``double`` equivalent to the elapsed time between DATE-BEG and DATE-END, in units of TIMEUNIT.
See also
--------
astropy.wcs.Wcsprm.tstart
"""
timeoffs = """
``double`` Time offset, which may be used, for example, to provide a uniform clock correction
for times referenced to DATEREF.
See also
--------
astropy.wcs.Wcsprm.timeoffs
"""
timsyer = """
``double`` the absolute error of the time values, in units of TIMEUNIT.
See also
--------
astropy.wcs.Wcsprm.timrder
"""
timrder = """
``double`` the accuracy of time stamps relative to each other, in units of TIMEUNIT.
See also
--------
astropy.wcs.Wcsprm.timsyer
"""
timedel = """
``double`` the resolution of the time stamps.
See also
--------
astropy.wcs.Wcsprm.timedel
"""
timepixr = """
``double`` relative position of the time stamps in binned time intervals, a value between 0.0 and 1.0.
See also
--------
astropy.wcs.Wcsprm.timepixr
"""
obsorbit = """
``string`` URI, URL, or name of an orbit ephemeris file giving spacecraft coordinates relating to TREFPOS.
See also
--------
astropy.wcs.Wcsprm.trefpos
"""
xposure = """
``double`` effective exposure time in units of TIMEUNIT.
See also
--------
astropy.wcs.Wcsprm.timeunit
"""
czphs = """
``double array[naxis]`` The time at the zero point of a phase axis, ``CSPHSia``.
An undefined value is represented by NaN.
"""
cperi = """
``double array[naxis]`` period of a phase axis, CPERIia.
An undefined value is represented by NaN.
"""
|
892f120d5c5186ecee6a40286dc61c2edcf24be66d34b2972f1607b9139ff979 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The astropy.time package provides functionality for manipulating times and
dates. Specific emphasis is placed on supporting time scales (e.g. UTC, TAI,
UT1) and time representations (e.g. JD, MJD, ISO 8601) that are used in
astronomy.
"""
import copy
import operator
from datetime import datetime, date, timedelta
from time import strftime, strptime
import numpy as np
from astropy import units as u, constants as const
from astropy import _erfa as erfa
from astropy.units import UnitConversionError
from astropy.utils import ShapedLikeNDArray
from astropy.utils.compat.misc import override__dir__
from astropy.utils.data_info import MixinInfo, data_info_factory
from .utils import day_frac
from .formats import (TIME_FORMATS, TIME_DELTA_FORMATS,
TimeJD, TimeUnique, TimeAstropyTime, TimeDatetime)
# Import TimeFromEpoch to avoid breaking code that followed the old example of
# making a custom timescale in the documentation.
from .formats import TimeFromEpoch # pylint: disable=W0611
from astropy.extern import _strptime
__all__ = ['Time', 'TimeDelta', 'TIME_SCALES', 'STANDARD_TIME_SCALES', 'TIME_DELTA_SCALES',
'ScaleValueError', 'OperandTypeError', 'TimeInfo']
STANDARD_TIME_SCALES = ('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc')
LOCAL_SCALES = ('local',)
TIME_TYPES = dict((scale, scales) for scales in (STANDARD_TIME_SCALES, LOCAL_SCALES) for scale in scales)
TIME_SCALES = STANDARD_TIME_SCALES + LOCAL_SCALES
MULTI_HOPS = {('tai', 'tcb'): ('tt', 'tdb'),
('tai', 'tcg'): ('tt',),
('tai', 'ut1'): ('utc',),
('tai', 'tdb'): ('tt',),
('tcb', 'tcg'): ('tdb', 'tt'),
('tcb', 'tt'): ('tdb',),
('tcb', 'ut1'): ('tdb', 'tt', 'tai', 'utc'),
('tcb', 'utc'): ('tdb', 'tt', 'tai'),
('tcg', 'tdb'): ('tt',),
('tcg', 'ut1'): ('tt', 'tai', 'utc'),
('tcg', 'utc'): ('tt', 'tai'),
('tdb', 'ut1'): ('tt', 'tai', 'utc'),
('tdb', 'utc'): ('tt', 'tai'),
('tt', 'ut1'): ('tai', 'utc'),
('tt', 'utc'): ('tai',),
}
GEOCENTRIC_SCALES = ('tai', 'tt', 'tcg')
BARYCENTRIC_SCALES = ('tcb', 'tdb')
ROTATIONAL_SCALES = ('ut1',)
TIME_DELTA_TYPES = dict((scale, scales)
for scales in (GEOCENTRIC_SCALES, BARYCENTRIC_SCALES,
ROTATIONAL_SCALES, LOCAL_SCALES) for scale in scales)
TIME_DELTA_SCALES = GEOCENTRIC_SCALES + BARYCENTRIC_SCALES + ROTATIONAL_SCALES + LOCAL_SCALES
# For time scale changes, we need L_G and L_B, which are stored in erfam.h as
# /* L_G = 1 - d(TT)/d(TCG) */
# define ERFA_ELG (6.969290134e-10)
# /* L_B = 1 - d(TDB)/d(TCB), and TDB (s) at TAI 1977/1/1.0 */
# define ERFA_ELB (1.550519768e-8)
# These are exposed in erfa as erfa.ELG and erfa.ELB.
# Implied: d(TT)/d(TCG) = 1-L_G
# and d(TCG)/d(TT) = 1/(1-L_G) = 1 + (1-(1-L_G))/(1-L_G) = 1 + L_G/(1-L_G)
# scale offsets as second = first + first * scale_offset[(first,second)]
SCALE_OFFSETS = {('tt', 'tai'): None,
('tai', 'tt'): None,
('tcg', 'tt'): -erfa.ELG,
('tt', 'tcg'): erfa.ELG / (1. - erfa.ELG),
('tcg', 'tai'): -erfa.ELG,
('tai', 'tcg'): erfa.ELG / (1. - erfa.ELG),
('tcb', 'tdb'): -erfa.ELB,
('tdb', 'tcb'): erfa.ELB / (1. - erfa.ELB)}
# triple-level dictionary, yay!
SIDEREAL_TIME_MODELS = {
'mean': {
'IAU2006': {'function': erfa.gmst06, 'scales': ('ut1', 'tt')},
'IAU2000': {'function': erfa.gmst00, 'scales': ('ut1', 'tt')},
'IAU1982': {'function': erfa.gmst82, 'scales': ('ut1',)}},
'apparent': {
'IAU2006A': {'function': erfa.gst06a, 'scales': ('ut1', 'tt')},
'IAU2000A': {'function': erfa.gst00a, 'scales': ('ut1', 'tt')},
'IAU2000B': {'function': erfa.gst00b, 'scales': ('ut1',)},
'IAU1994': {'function': erfa.gst94, 'scales': ('ut1',)}}}
class TimeInfo(MixinInfo):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
attrs_from_parent = set(['unit']) # unit is read-only and None
attr_names = MixinInfo.attr_names | {'serialize_method'}
_supports_indexing = True
# The usual tuple of attributes needed for serialization is replaced
# by a property, since Time can be serialized different ways.
_represent_as_dict_extra_attrs = ('format', 'scale', 'precision',
'in_subfmt', 'out_subfmt', 'location',
'_delta_ut1_utc', '_delta_tdb_tt')
# When serializing, write out the `value` attribute using the column name.
_represent_as_dict_primary_data = 'value'
mask_val = np.ma.masked
@property
def _represent_as_dict_attrs(self):
method = self.serialize_method[self._serialize_context]
if method == 'formatted_value':
out = ('value',)
elif method == 'jd1_jd2':
out = ('jd1', 'jd2')
else:
raise ValueError("serialize method must be 'formatted_value' or 'jd1_jd2'")
return out + self._represent_as_dict_extra_attrs
def __init__(self, bound=False):
super().__init__(bound)
# If bound to a data object instance then create the dict of attributes
# which stores the info attribute values.
if bound:
# Specify how to serialize this object depending on context.
# If ``True`` for a context, then use formatted ``value`` attribute
# (e.g. the ISO time string). If ``False`` then use float jd1 and jd2.
self.serialize_method = {'fits': 'jd1_jd2',
'ecsv': 'formatted_value',
'hdf5': 'jd1_jd2',
'yaml': 'jd1_jd2',
None: 'jd1_jd2'}
@property
def unit(self):
return None
info_summary_stats = staticmethod(
data_info_factory(names=MixinInfo._stats,
funcs=[getattr(np, stat) for stat in MixinInfo._stats]))
# When Time has mean, std, min, max methods:
# funcs = [lambda x: getattr(x, stat)() for stat_name in MixinInfo._stats])
def _construct_from_dict_base(self, map):
if 'jd1' in map and 'jd2' in map:
format = map.pop('format')
map['format'] = 'jd'
map['val'] = map.pop('jd1')
map['val2'] = map.pop('jd2')
else:
format = map['format']
map['val'] = map.pop('value')
out = self._parent_cls(**map)
out.format = format
return out
def _construct_from_dict(self, map):
delta_ut1_utc = map.pop('_delta_ut1_utc', None)
delta_tdb_tt = map.pop('_delta_tdb_tt', None)
out = self._construct_from_dict_base(map)
if delta_ut1_utc is not None:
out._delta_ut1_utc = delta_ut1_utc
if delta_tdb_tt is not None:
out._delta_tdb_tt = delta_tdb_tt
return out
def new_like(self, cols, length, metadata_conflicts='warn', name=None):
"""
Return a new Time instance which is consistent with the input Time objects
``cols`` and has ``length`` rows.
This is intended for creating an empty Time instance whose elements can
be set in-place for table operations like join or vstack. It checks
that the input locations and attributes are consistent. This is used
when a Time object is used as a mixin column in an astropy Table.
Parameters
----------
cols : list
List of input columns (Time objects)
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : Time (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(cols, metadata_conflicts, name,
('meta', 'description'))
attrs.pop('dtype') # Not relevant for Time
col0 = cols[0]
# Check that location is consistent for all Time objects
for col in cols[1:]:
# This is the method used by __setitem__ to ensure that the right side
# has a consistent location (and coerce data if necessary, but that does
# not happen in this case since `col` is already a Time object). If this
# passes then any subsequent table operations via setitem will work.
try:
col0._make_value_equivalent(slice(None), col)
except ValueError:
raise ValueError('input columns have inconsistent locations')
# Make a new Time object with the desired shape and attributes
shape = (length,) + attrs.pop('shape')
jd2000 = 2451544.5 # Arbitrary JD value J2000.0 that will work with ERFA
jd1 = np.full(shape, jd2000, dtype='f8')
jd2 = np.zeros(shape, dtype='f8')
tm_attrs = {attr: getattr(col0, attr)
for attr in ('scale', 'location',
'precision', 'in_subfmt', 'out_subfmt')}
out = self._parent_cls(jd1, jd2, format='jd', **tm_attrs)
out.format = col0.format
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
class TimeDeltaInfo(TimeInfo):
_represent_as_dict_extra_attrs = ('format', 'scale')
def _construct_from_dict(self, map):
return self._construct_from_dict_base(map)
def new_like(self, cols, length, metadata_conflicts='warn', name=None):
"""
Return a new TimeDelta instance which is consistent with the input Time objects
``cols`` and has ``length`` rows.
This is intended for creating an empty Time instance whose elements can
be set in-place for table operations like join or vstack. It checks
that the input locations and attributes are consistent. This is used
when a Time object is used as a mixin column in an astropy Table.
Parameters
----------
cols : list
List of input columns (Time objects)
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : Time (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(cols, metadata_conflicts, name,
('meta', 'description'))
attrs.pop('dtype') # Not relevant for Time
col0 = cols[0]
# Make a new Time object with the desired shape and attributes
shape = (length,) + attrs.pop('shape')
jd1 = np.zeros(shape, dtype='f8')
jd2 = np.zeros(shape, dtype='f8')
out = self._parent_cls(jd1, jd2, format='jd', scale=col0.scale)
out.format = col0.format
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
class Time(ShapedLikeNDArray):
"""
Represent and manipulate times and dates for astronomy.
A `Time` object is initialized with one or more times in the ``val``
argument. The input times in ``val`` must conform to the specified
``format`` and must correspond to the specified time ``scale``. The
optional ``val2`` time input should be supplied only for numeric input
formats (e.g. JD) where very high precision (better than 64-bit precision)
is required.
The allowed values for ``format`` can be listed with::
>>> list(Time.FORMATS)
['jd', 'mjd', 'decimalyear', 'unix', 'cxcsec', 'gps', 'plot_date',
'datetime', 'iso', 'isot', 'yday', 'datetime64', 'fits', 'byear',
'jyear', 'byear_str', 'jyear_str']
See also: http://docs.astropy.org/en/stable/time/
Parameters
----------
val : sequence, ndarray, number, str, bytes, or `~astropy.time.Time` object
Value(s) to initialize the time or times. Bytes are decoded as ascii.
val2 : sequence, ndarray, or number; optional
Value(s) to initialize the time or times. Only used for numerical
input, to help preserve precision.
format : str, optional
Format of input value(s)
scale : str, optional
Time scale of input value(s), must be one of the following:
('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc')
precision : int, optional
Digits of precision in string representation of time
in_subfmt : str, optional
Subformat for inputting string times
out_subfmt : str, optional
Subformat for outputting string times
location : `~astropy.coordinates.EarthLocation` or tuple, optional
If given as an tuple, it should be able to initialize an
an EarthLocation instance, i.e., either contain 3 items with units of
length for geocentric coordinates, or contain a longitude, latitude,
and an optional height for geodetic coordinates.
Can be a single location, or one for each input time.
copy : bool, optional
Make a copy of the input values
"""
SCALES = TIME_SCALES
"""List of time scales"""
FORMATS = TIME_FORMATS
"""Dict of time formats"""
# Make sure that reverse arithmetic (e.g., TimeDelta.__rmul__)
# gets called over the __mul__ of Numpy arrays.
__array_priority__ = 20000
# Declare that Time can be used as a Table column by defining the
# attribute where column attributes will be stored.
_astropy_column_attrs = None
def __new__(cls, val, val2=None, format=None, scale=None,
precision=None, in_subfmt=None, out_subfmt=None,
location=None, copy=False):
if isinstance(val, cls):
self = val.replicate(format=format, copy=copy)
else:
self = super().__new__(cls)
return self
def __getnewargs__(self):
return (self._time,)
def __init__(self, val, val2=None, format=None, scale=None,
precision=None, in_subfmt=None, out_subfmt=None,
location=None, copy=False):
if location is not None:
from astropy.coordinates import EarthLocation
if isinstance(location, EarthLocation):
self.location = location
else:
self.location = EarthLocation(*location)
if self.location.size == 1:
self.location = self.location.squeeze()
else:
self.location = None
if isinstance(val, self.__class__):
# Update _time formatting parameters if explicitly specified
if precision is not None:
self._time.precision = precision
if in_subfmt is not None:
self._time.in_subfmt = in_subfmt
if out_subfmt is not None:
self._time.out_subfmt = out_subfmt
self.SCALES = TIME_TYPES[self.scale]
if scale is not None:
self._set_scale(scale)
else:
self._init_from_vals(val, val2, format, scale, copy,
precision, in_subfmt, out_subfmt)
self.SCALES = TIME_TYPES[self.scale]
if self.location is not None and (self.location.size > 1 and
self.location.shape != self.shape):
try:
# check the location can be broadcast to self's shape.
self.location = np.broadcast_to(self.location, self.shape,
subok=True)
except Exception:
raise ValueError('The location with shape {0} cannot be '
'broadcast against time with shape {1}. '
'Typically, either give a single location or '
'one for each time.'
.format(self.location.shape, self.shape))
def _init_from_vals(self, val, val2, format, scale, copy,
precision=None, in_subfmt=None, out_subfmt=None):
"""
Set the internal _format, scale, and _time attrs from user
inputs. This handles coercion into the correct shapes and
some basic input validation.
"""
if precision is None:
precision = 3
if in_subfmt is None:
in_subfmt = '*'
if out_subfmt is None:
out_subfmt = '*'
# Coerce val into an array
val = _make_array(val, copy)
# If val2 is not None, ensure consistency
if val2 is not None:
val2 = _make_array(val2, copy)
try:
np.broadcast(val, val2)
except ValueError:
raise ValueError('Input val and val2 have inconsistent shape; '
'they cannot be broadcast together.')
if scale is not None:
if not (isinstance(scale, str) and
scale.lower() in self.SCALES):
raise ScaleValueError("Scale {0!r} is not in the allowed scales "
"{1}".format(scale,
sorted(self.SCALES)))
# If either of the input val, val2 are masked arrays then
# find the masked elements and fill them.
mask, val, val2 = _check_for_masked_and_fill(val, val2)
# Parse / convert input values into internal jd1, jd2 based on format
self._time = self._get_time_fmt(val, val2, format, scale,
precision, in_subfmt, out_subfmt)
self._format = self._time.name
# If any inputs were masked then masked jd2 accordingly. From above
# routine ``mask`` must be either Python bool False or an bool ndarray
# with shape broadcastable to jd2.
if mask is not False:
mask = np.broadcast_to(mask, self._time.jd2.shape)
self._time.jd2[mask] = np.nan
def _get_time_fmt(self, val, val2, format, scale,
precision, in_subfmt, out_subfmt):
"""
Given the supplied val, val2, format and scale try to instantiate
the corresponding TimeFormat class to convert the input values into
the internal jd1 and jd2.
If format is `None` and the input is a string-type or object array then
guess available formats and stop when one matches.
"""
if format is None and val.dtype.kind in ('S', 'U', 'O', 'M'):
formats = [(name, cls) for name, cls in self.FORMATS.items()
if issubclass(cls, TimeUnique)]
err_msg = ('any of the formats where the format keyword is '
'optional {0}'.format([name for name, cls in formats]))
# AstropyTime is a pseudo-format that isn't in the TIME_FORMATS registry,
# but try to guess it at the end.
formats.append(('astropy_time', TimeAstropyTime))
elif not (isinstance(format, str) and
format.lower() in self.FORMATS):
if format is None:
raise ValueError("No time format was given, and the input is "
"not unique")
else:
raise ValueError("Format {0!r} is not one of the allowed "
"formats {1}".format(format,
sorted(self.FORMATS)))
else:
formats = [(format, self.FORMATS[format])]
err_msg = 'the format class {0}'.format(format)
for format, FormatClass in formats:
try:
return FormatClass(val, val2, scale, precision, in_subfmt, out_subfmt)
except UnitConversionError:
raise
except (ValueError, TypeError):
pass
else:
raise ValueError('Input values did not match {0}'.format(err_msg))
@classmethod
def now(cls):
"""
Creates a new object corresponding to the instant in time this
method is called.
.. note::
"Now" is determined using the `~datetime.datetime.utcnow`
function, so its accuracy and precision is determined by that
function. Generally that means it is set by the accuracy of
your system clock.
Returns
-------
nowtime
A new `Time` object (or a subclass of `Time` if this is called from
such a subclass) at the current time.
"""
# call `utcnow` immediately to be sure it's ASAP
dtnow = datetime.utcnow()
return cls(val=dtnow, format='datetime', scale='utc')
info = TimeInfo()
@classmethod
def strptime(cls, time_string, format_string, **kwargs):
"""
Parse a string to a Time according to a format specification.
See `time.strptime` documentation for format specification.
>>> Time.strptime('2012-Jun-30 23:59:60', '%Y-%b-%d %H:%M:%S')
<Time object: scale='utc' format='isot' value=2012-06-30T23:59:60.000>
Parameters
----------
time_string : string, sequence, ndarray
Objects containing time data of type string
format_string : string
String specifying format of time_string.
kwargs : dict
Any keyword arguments for ``Time``. If the ``format`` keyword
argument is present, this will be used as the Time format.
Returns
-------
time_obj : `~astropy.time.Time`
A new `~astropy.time.Time` object corresponding to the input
``time_string``.
"""
time_array = np.asarray(time_string)
if time_array.dtype.kind not in ('U', 'S'):
err = "Expected type is string, a bytes-like object or a sequence"\
" of these. Got dtype '{}'".format(time_array.dtype.kind)
raise TypeError(err)
to_string = (str if time_array.dtype.kind == 'U' else
lambda x: str(x.item(), encoding='ascii'))
iterator = np.nditer([time_array, None],
op_dtypes=[time_array.dtype, 'U30'])
for time, formatted in iterator:
tt, fraction = _strptime._strptime(to_string(time), format_string)
time_tuple = tt[:6] + (fraction,)
formatted[...] = '{:04}-{:02}-{:02}T{:02}:{:02}:{:02}.{:06}'\
.format(*time_tuple)
format = kwargs.pop('format', None)
out = cls(*iterator.operands[1:], format='isot', **kwargs)
if format is not None:
out.format = format
return out
@property
def writeable(self):
return self._time.jd1.flags.writeable & self._time.jd2.flags.writeable
@writeable.setter
def writeable(self, value):
self._time.jd1.flags.writeable = value
self._time.jd2.flags.writeable = value
@property
def format(self):
"""
Get or set time format.
The format defines the way times are represented when accessed via the
``.value`` attribute. By default it is the same as the format used for
initializing the `Time` instance, but it can be set to any other value
that could be used for initialization. These can be listed with::
>>> list(Time.FORMATS)
['jd', 'mjd', 'decimalyear', 'unix', 'cxcsec', 'gps', 'plot_date',
'datetime', 'iso', 'isot', 'yday', 'datetime64', 'fits', 'byear',
'jyear', 'byear_str', 'jyear_str']
"""
return self._format
@format.setter
def format(self, format):
"""Set time format"""
if format not in self.FORMATS:
raise ValueError('format must be one of {0}'
.format(list(self.FORMATS)))
format_cls = self.FORMATS[format]
# If current output subformat is not in the new format then replace
# with default '*'
if hasattr(format_cls, 'subfmts'):
subfmt_names = [subfmt[0] for subfmt in format_cls.subfmts]
if self.out_subfmt not in subfmt_names:
self.out_subfmt = '*'
self._time = format_cls(self._time.jd1, self._time.jd2,
self._time._scale, self.precision,
in_subfmt=self.in_subfmt,
out_subfmt=self.out_subfmt,
from_jd=True)
self._format = format
def __repr__(self):
return ("<{0} object: scale='{1}' format='{2}' value={3}>"
.format(self.__class__.__name__, self.scale, self.format,
getattr(self, self.format)))
def __str__(self):
return str(getattr(self, self.format))
def strftime(self, format_spec):
"""
Convert Time to a string or a numpy.array of strings according to a
format specification.
See `time.strftime` documentation for format specification.
Parameters
----------
format_spec : string
Format definition of return string.
Returns
-------
formatted : string, numpy.array
String or numpy.array of strings formatted according to the given
format string.
"""
formatted_strings = []
for sk in self.replicate('iso')._time.str_kwargs():
date_tuple = date(sk['year'], sk['mon'], sk['day']).timetuple()
datetime_tuple = (sk['year'], sk['mon'], sk['day'],
sk['hour'], sk['min'], sk['sec'],
date_tuple[6], date_tuple[7], -1)
fmtd_str = format_spec
if '%f' in fmtd_str:
fmtd_str = fmtd_str.replace('%f', '{frac:0{precision}}'.format(frac=sk['fracsec'], precision=self.precision))
fmtd_str = strftime(fmtd_str, datetime_tuple)
formatted_strings.append(fmtd_str)
if self.isscalar:
return formatted_strings[0]
else:
return np.array(formatted_strings).reshape(self.shape)
@property
def scale(self):
"""Time scale"""
return self._time.scale
def _set_scale(self, scale):
"""
This is the key routine that actually does time scale conversions.
This is not public and not connected to the read-only scale property.
"""
if scale == self.scale:
return
if scale not in self.SCALES:
raise ValueError("Scale {0!r} is not in the allowed scales {1}"
.format(scale, sorted(self.SCALES)))
# Determine the chain of scale transformations to get from the current
# scale to the new scale. MULTI_HOPS contains a dict of all
# transformations (xforms) that require intermediate xforms.
# The MULTI_HOPS dict is keyed by (sys1, sys2) in alphabetical order.
xform = (self.scale, scale)
xform_sort = tuple(sorted(xform))
multi = MULTI_HOPS.get(xform_sort, ())
xforms = xform_sort[:1] + multi + xform_sort[-1:]
# If we made the reverse xform then reverse it now.
if xform_sort != xform:
xforms = tuple(reversed(xforms))
# Transform the jd1,2 pairs through the chain of scale xforms.
jd1, jd2 = self._time.jd1, self._time.jd2_filled
for sys1, sys2 in zip(xforms[:-1], xforms[1:]):
# Some xforms require an additional delta_ argument that is
# provided through Time methods. These values may be supplied by
# the user or computed based on available approximations. The
# get_delta_ methods are available for only one combination of
# sys1, sys2 though the property applies for both xform directions.
args = [jd1, jd2]
for sys12 in ((sys1, sys2), (sys2, sys1)):
dt_method = '_get_delta_{0}_{1}'.format(*sys12)
try:
get_dt = getattr(self, dt_method)
except AttributeError:
pass
else:
args.append(get_dt(jd1, jd2))
break
conv_func = getattr(erfa, sys1 + sys2)
jd1, jd2 = conv_func(*args)
if self.masked:
jd2[self.mask] = np.nan
self._time = self.FORMATS[self.format](jd1, jd2, scale, self.precision,
self.in_subfmt, self.out_subfmt,
from_jd=True)
@property
def precision(self):
"""
Decimal precision when outputting seconds as floating point (int
value between 0 and 9 inclusive).
"""
return self._time.precision
@precision.setter
def precision(self, val):
del self.cache
if not isinstance(val, int) or val < 0 or val > 9:
raise ValueError('precision attribute must be an int between '
'0 and 9')
self._time.precision = val
@property
def in_subfmt(self):
"""
Unix wildcard pattern to select subformats for parsing string input
times.
"""
return self._time.in_subfmt
@in_subfmt.setter
def in_subfmt(self, val):
del self.cache
if not isinstance(val, str):
raise ValueError('in_subfmt attribute must be a string')
self._time.in_subfmt = val
@property
def out_subfmt(self):
"""
Unix wildcard pattern to select subformats for outputting times.
"""
return self._time.out_subfmt
@out_subfmt.setter
def out_subfmt(self, val):
del self.cache
if not isinstance(val, str):
raise ValueError('out_subfmt attribute must be a string')
self._time.out_subfmt = val
@property
def shape(self):
"""The shape of the time instances.
Like `~numpy.ndarray.shape`, can be set to a new shape by assigning a
tuple. Note that if different instances share some but not all
underlying data, setting the shape of one instance can make the other
instance unusable. Hence, it is strongly recommended to get new,
reshaped instances with the ``reshape`` method.
Raises
------
AttributeError
If the shape of the ``jd1``, ``jd2``, ``location``,
``delta_ut1_utc``, or ``delta_tdb_tt`` attributes cannot be changed
without the arrays being copied. For these cases, use the
`Time.reshape` method (which copies any arrays that cannot be
reshaped in-place).
"""
return self._time.jd1.shape
@shape.setter
def shape(self, shape):
del self.cache
# We have to keep track of arrays that were already reshaped,
# since we may have to return those to their original shape if a later
# shape-setting fails.
reshaped = []
oldshape = self.shape
# In-place reshape of data/attributes. Need to access _time.jd1/2 not
# self.jd1/2 because the latter are not guaranteed to be the actual
# data, and in fact should not be directly changeable from the public
# API.
for obj, attr in ((self._time, 'jd1'),
(self._time, 'jd2'),
(self, '_delta_ut1_utc'),
(self, '_delta_tdb_tt'),
(self, 'location')):
val = getattr(obj, attr, None)
if val is not None and val.size > 1:
try:
val.shape = shape
except AttributeError:
for val2 in reshaped:
val2.shape = oldshape
raise
else:
reshaped.append(val)
def _shaped_like_input(self, value):
out = value
if value.dtype.kind == 'M':
return value[()]
if not self._time.jd1.shape and not np.ma.is_masked(value):
out = value.item()
return out
@property
def jd1(self):
"""
First of the two doubles that internally store time value(s) in JD.
"""
jd1 = self._time.mask_if_needed(self._time.jd1)
return self._shaped_like_input(jd1)
@property
def jd2(self):
"""
Second of the two doubles that internally store time value(s) in JD.
"""
jd2 = self._time.mask_if_needed(self._time.jd2)
return self._shaped_like_input(jd2)
@property
def value(self):
"""Time value(s) in current format"""
# The underlying way to get the time values for the current format is:
# self._shaped_like_input(self._time.to_value(parent=self))
# This is done in __getattr__. By calling getattr(self, self.format)
# the ``value`` attribute is cached.
return getattr(self, self.format)
@property
def masked(self):
return self._time.masked
@property
def mask(self):
return self._time.mask
def insert(self, obj, values, axis=0):
"""
Insert values before the given indices in the column and return
a new `~astropy.time.Time` or `~astropy.time.TimeDelta` object.
The values to be inserted must conform to the rules for in-place setting
of ``Time`` objects (see ``Get and set values`` in the ``Time``
documentation).
The API signature matches the ``np.insert`` API, but is more limited.
The specification of insert index ``obj`` must be a single integer,
and the ``axis`` must be ``0`` for simple row insertion before the
index.
Parameters
----------
obj : int
Integer index before which ``values`` is inserted.
values : array_like
Value(s) to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
axis : int, optional
Axis along which to insert ``values``. Default is 0, which is the
only allowed value and will insert a row.
Returns
-------
out : `~astropy.time.Time` subclass
New time object with inserted value(s)
"""
# Validate inputs: obj arg is integer, axis=0, self is not a scalar, and
# input index is in bounds.
try:
idx0 = operator.index(obj)
except TypeError:
raise TypeError('obj arg must be an integer')
if axis != 0:
raise ValueError('axis must be 0')
if not self.shape:
raise TypeError('cannot insert into scalar {} object'
.format(self.__class__.__name__))
if abs(idx0) > len(self):
raise IndexError('index {} is out of bounds for axis 0 with size {}'
.format(idx0, len(self)))
# Turn negative index into positive
if idx0 < 0:
idx0 = len(self) + idx0
# For non-Time object, use numpy to help figure out the length. (Note annoying
# case of a string input that has a length which is not the length we want).
if not isinstance(values, Time):
values = np.asarray(values)
n_values = len(values) if values.shape else 1
# Finally make the new object with the correct length and set values for the
# three sections, before insert, the insert, and after the insert.
out = self.__class__.info.new_like([self], len(self) + n_values, name=self.info.name)
out._time.jd1[:idx0] = self._time.jd1[:idx0]
out._time.jd2[:idx0] = self._time.jd2[:idx0]
# This uses the Time setting machinery to coerce and validate as necessary.
out[idx0:idx0 + n_values] = values
out._time.jd1[idx0 + n_values:] = self._time.jd1[idx0:]
out._time.jd2[idx0 + n_values:] = self._time.jd2[idx0:]
return out
def _make_value_equivalent(self, item, value):
"""Coerce setitem value into an equivalent Time object"""
# If there is a vector location then broadcast to the Time shape
# and then select with ``item``
if self.location is not None and self.location.shape:
self_location = np.broadcast_to(self.location, self.shape, subok=True)[item]
else:
self_location = self.location
if isinstance(value, Time):
# Make sure locations are compatible. Location can be either None or
# a Location object.
if self_location is None and value.location is None:
match = True
elif ((self_location is None and value.location is not None) or
(self_location is not None and value.location is None)):
match = False
else:
match = np.all(self_location == value.location)
if not match:
raise ValueError('cannot set to Time with different location: '
'expected location={} and '
'got location={}'
.format(self_location, value.location))
else:
try:
value = self.__class__(value, scale=self.scale, location=self_location)
except Exception:
try:
value = self.__class__(value, scale=self.scale, format=self.format,
location=self_location)
except Exception as err:
raise ValueError('cannot convert value to a compatible Time object: {}'
.format(err))
return value
def __setitem__(self, item, value):
if not self.writeable:
if self.shape:
raise ValueError('{} object is read-only. Make a '
'copy() or set "writeable" attribute to True.'
.format(self.__class__.__name__))
else:
raise ValueError('scalar {} object is read-only.'
.format(self.__class__.__name__))
# Any use of setitem results in immediate cache invalidation
del self.cache
# Setting invalidates transform deltas
for attr in ('_delta_tdb_tt', '_delta_ut1_utc'):
if hasattr(self, attr):
delattr(self, attr)
if value is np.ma.masked or value is np.nan:
self._time.jd2[item] = np.nan
return
value = self._make_value_equivalent(item, value)
# Finally directly set the jd1/2 values. Locations are known to match.
if self.scale is not None:
value = getattr(value, self.scale)
self._time.jd1[item] = value._time.jd1
self._time.jd2[item] = value._time.jd2
def light_travel_time(self, skycoord, kind='barycentric', location=None, ephemeris=None):
"""Light travel time correction to the barycentre or heliocentre.
The frame transformations used to calculate the location of the solar
system barycentre and the heliocentre rely on the erfa routine epv00,
which is consistent with the JPL DE405 ephemeris to an accuracy of
11.2 km, corresponding to a light travel time of 4 microseconds.
The routine assumes the source(s) are at large distance, i.e., neglects
finite-distance effects.
Parameters
----------
skycoord : `~astropy.coordinates.SkyCoord`
The sky location to calculate the correction for.
kind : str, optional
``'barycentric'`` (default) or ``'heliocentric'``
location : `~astropy.coordinates.EarthLocation`, optional
The location of the observatory to calculate the correction for.
If no location is given, the ``location`` attribute of the Time
object is used
ephemeris : str, optional
Solar system ephemeris to use (e.g., 'builtin', 'jpl'). By default,
use the one set with ``astropy.coordinates.solar_system_ephemeris.set``.
For more information, see `~astropy.coordinates.solar_system_ephemeris`.
Returns
-------
time_offset : `~astropy.time.TimeDelta`
The time offset between the barycentre or Heliocentre and Earth,
in TDB seconds. Should be added to the original time to get the
time in the Solar system barycentre or the Heliocentre.
Also, the time conversion to BJD will then include the relativistic correction as well.
"""
if kind.lower() not in ('barycentric', 'heliocentric'):
raise ValueError("'kind' parameter must be one of 'heliocentric' "
"or 'barycentric'")
if location is None:
if self.location is None:
raise ValueError('An EarthLocation needs to be set or passed '
'in to calculate bary- or heliocentric '
'corrections')
location = self.location
from astropy.coordinates import (UnitSphericalRepresentation, CartesianRepresentation,
HCRS, ICRS, GCRS, solar_system_ephemeris)
# ensure sky location is ICRS compatible
if not skycoord.is_transformable_to(ICRS()):
raise ValueError("Given skycoord is not transformable to the ICRS")
# get location of observatory in ITRS coordinates at this Time
try:
itrs = location.get_itrs(obstime=self)
except Exception:
raise ValueError("Supplied location does not have a valid `get_itrs` method")
with solar_system_ephemeris.set(ephemeris):
if kind.lower() == 'heliocentric':
# convert to heliocentric coordinates, aligned with ICRS
cpos = itrs.transform_to(HCRS(obstime=self)).cartesian.xyz
else:
# first we need to convert to GCRS coordinates with the correct
# obstime, since ICRS coordinates have no frame time
gcrs_coo = itrs.transform_to(GCRS(obstime=self))
# convert to barycentric (BCRS) coordinates, aligned with ICRS
cpos = gcrs_coo.transform_to(ICRS()).cartesian.xyz
# get unit ICRS vector to star
spos = (skycoord.icrs.represent_as(UnitSphericalRepresentation).
represent_as(CartesianRepresentation).xyz)
# Move X,Y,Z to last dimension, to enable possible broadcasting below.
cpos = np.rollaxis(cpos, 0, cpos.ndim)
spos = np.rollaxis(spos, 0, spos.ndim)
# calculate light travel time correction
tcor_val = (spos * cpos).sum(axis=-1) / const.c
return TimeDelta(tcor_val, scale='tdb')
def sidereal_time(self, kind, longitude=None, model=None):
"""Calculate sidereal time.
Parameters
---------------
kind : str
``'mean'`` or ``'apparent'``, i.e., accounting for precession
only, or also for nutation.
longitude : `~astropy.units.Quantity`, `str`, or `None`; optional
The longitude on the Earth at which to compute the sidereal time.
Can be given as a `~astropy.units.Quantity` with angular units
(or an `~astropy.coordinates.Angle` or
`~astropy.coordinates.Longitude`), or as a name of an
observatory (currently, only ``'greenwich'`` is supported,
equivalent to 0 deg). If `None` (default), the ``lon`` attribute of
the Time object is used.
model : str or `None`; optional
Precession (and nutation) model to use. The available ones are:
- {0}: {1}
- {2}: {3}
If `None` (default), the last (most recent) one from the appropriate
list above is used.
Returns
-------
sidereal time : `~astropy.coordinates.Longitude`
Sidereal time as a quantity with units of hourangle
""" # docstring is formatted below
from astropy.coordinates import Longitude
if kind.lower() not in SIDEREAL_TIME_MODELS.keys():
raise ValueError('The kind of sidereal time has to be {0}'.format(
' or '.join(sorted(SIDEREAL_TIME_MODELS.keys()))))
available_models = SIDEREAL_TIME_MODELS[kind.lower()]
if model is None:
model = sorted(available_models.keys())[-1]
else:
if model.upper() not in available_models:
raise ValueError(
'Model {0} not implemented for {1} sidereal time; '
'available models are {2}'
.format(model, kind, sorted(available_models.keys())))
if longitude is None:
if self.location is None:
raise ValueError('No longitude is given but the location for '
'the Time object is not set.')
longitude = self.location.lon
elif longitude == 'greenwich':
longitude = Longitude(0., u.degree,
wrap_angle=180.*u.degree)
else:
# sanity check on input
longitude = Longitude(longitude, u.degree,
wrap_angle=180.*u.degree)
gst = self._erfa_sidereal_time(available_models[model.upper()])
return Longitude(gst + longitude, u.hourangle)
if isinstance(sidereal_time.__doc__, str):
sidereal_time.__doc__ = sidereal_time.__doc__.format(
'apparent', sorted(SIDEREAL_TIME_MODELS['apparent'].keys()),
'mean', sorted(SIDEREAL_TIME_MODELS['mean'].keys()))
def _erfa_sidereal_time(self, model):
"""Calculate a sidereal time using a IAU precession/nutation model."""
from astropy.coordinates import Longitude
erfa_function = model['function']
erfa_parameters = [getattr(getattr(self, scale)._time, jd_part)
for scale in model['scales']
for jd_part in ('jd1', 'jd2_filled')]
sidereal_time = erfa_function(*erfa_parameters)
if self.masked:
sidereal_time[self.mask] = np.nan
return Longitude(sidereal_time, u.radian).to(u.hourangle)
def copy(self, format=None):
"""
Return a fully independent copy the Time object, optionally changing
the format.
If ``format`` is supplied then the time format of the returned Time
object will be set accordingly, otherwise it will be unchanged from the
original.
In this method a full copy of the internal time arrays will be made.
The internal time arrays are normally not changeable by the user so in
most cases the ``replicate()`` method should be used.
Parameters
----------
format : str, optional
Time format of the copy.
Returns
-------
tm : Time object
Copy of this object
"""
return self._apply('copy', format=format)
def replicate(self, format=None, copy=False):
"""
Return a replica of the Time object, optionally changing the format.
If ``format`` is supplied then the time format of the returned Time
object will be set accordingly, otherwise it will be unchanged from the
original.
If ``copy`` is set to `True` then a full copy of the internal time arrays
will be made. By default the replica will use a reference to the
original arrays when possible to save memory. The internal time arrays
are normally not changeable by the user so in most cases it should not
be necessary to set ``copy`` to `True`.
The convenience method copy() is available in which ``copy`` is `True`
by default.
Parameters
----------
format : str, optional
Time format of the replica.
copy : bool, optional
Return a true copy instead of using references where possible.
Returns
-------
tm : Time object
Replica of this object
"""
return self._apply('copy' if copy else 'replicate', format=format)
def _apply(self, method, *args, format=None, **kwargs):
"""Create a new time object, possibly applying a method to the arrays.
Parameters
----------
method : str or callable
If string, can be 'replicate' or the name of a relevant
`~numpy.ndarray` method. In the former case, a new time instance
with unchanged internal data is created, while in the latter the
method is applied to the internal ``jd1`` and ``jd2`` arrays, as
well as to possible ``location``, ``_delta_ut1_utc``, and
``_delta_tdb_tt`` arrays.
If a callable, it is directly applied to the above arrays.
Examples: 'copy', '__getitem__', 'reshape', `~numpy.broadcast_to`.
args : tuple
Any positional arguments for ``method``.
kwargs : dict
Any keyword arguments for ``method``. If the ``format`` keyword
argument is present, this will be used as the Time format of the
replica.
Examples
--------
Some ways this is used internally::
copy : ``_apply('copy')``
replicate : ``_apply('replicate')``
reshape : ``_apply('reshape', new_shape)``
index or slice : ``_apply('__getitem__', item)``
broadcast : ``_apply(np.broadcast, shape=new_shape)``
"""
new_format = self.format if format is None else format
if callable(method):
apply_method = lambda array: method(array, *args, **kwargs)
else:
if method == 'replicate':
apply_method = None
else:
apply_method = operator.methodcaller(method, *args, **kwargs)
jd1, jd2 = self._time.jd1, self._time.jd2
if apply_method:
jd1 = apply_method(jd1)
jd2 = apply_method(jd2)
# Get a new instance of our class and set its attributes directly.
tm = super().__new__(self.__class__)
tm._time = TimeJD(jd1, jd2, self.scale, self.precision,
self.in_subfmt, self.out_subfmt, from_jd=True)
# Optional ndarray attributes.
for attr in ('_delta_ut1_utc', '_delta_tdb_tt', 'location',
'precision', 'in_subfmt', 'out_subfmt'):
try:
val = getattr(self, attr)
except AttributeError:
continue
if apply_method:
# Apply the method to any value arrays (though skip if there is
# only a single element and the method would return a view,
# since in that case nothing would change).
if getattr(val, 'size', 1) > 1:
val = apply_method(val)
elif method == 'copy' or method == 'flatten':
# flatten should copy also for a single element array, but
# we cannot use it directly for array scalars, since it
# always returns a one-dimensional array. So, just copy.
val = copy.copy(val)
setattr(tm, attr, val)
# Copy other 'info' attr only if it has actually been defined.
# See PR #3898 for further explanation and justification, along
# with Quantity.__array_finalize__
if 'info' in self.__dict__:
tm.info = self.info
# Make the new internal _time object corresponding to the format
# in the copy. If the format is unchanged this process is lightweight
# and does not create any new arrays.
if new_format not in tm.FORMATS:
raise ValueError('format must be one of {0}'
.format(list(tm.FORMATS)))
NewFormat = tm.FORMATS[new_format]
tm._time = NewFormat(tm._time.jd1, tm._time.jd2,
tm._time._scale, tm.precision,
tm.in_subfmt, tm.out_subfmt,
from_jd=True)
tm._format = new_format
tm.SCALES = self.SCALES
return tm
def __copy__(self):
"""
Overrides the default behavior of the `copy.copy` function in
the python stdlib to behave like `Time.copy`. Does *not* make a
copy of the JD arrays - only copies by reference.
"""
return self.replicate()
def __deepcopy__(self, memo):
"""
Overrides the default behavior of the `copy.deepcopy` function
in the python stdlib to behave like `Time.copy`. Does make a
copy of the JD arrays.
"""
return self.copy()
def _advanced_index(self, indices, axis=None, keepdims=False):
"""Turn argmin, argmax output into an advanced index.
Argmin, argmax output contains indices along a given axis in an array
shaped like the other dimensions. To use this to get values at the
correct location, a list is constructed in which the other axes are
indexed sequentially. For ``keepdims`` is ``True``, the net result is
the same as constructing an index grid with ``np.ogrid`` and then
replacing the ``axis`` item with ``indices`` with its shaped expanded
at ``axis``. For ``keepdims`` is ``False``, the result is the same but
with the ``axis`` dimension removed from all list entries.
For ``axis`` is ``None``, this calls :func:`~numpy.unravel_index`.
Parameters
----------
indices : array
Output of argmin or argmax.
axis : int or None
axis along which argmin or argmax was used.
keepdims : bool
Whether to construct indices that keep or remove the axis along
which argmin or argmax was used. Default: ``False``.
Returns
-------
advanced_index : list of arrays
Suitable for use as an advanced index.
"""
if axis is None:
return np.unravel_index(indices, self.shape)
ndim = self.ndim
if axis < 0:
axis = axis + ndim
if keepdims and indices.ndim < self.ndim:
indices = np.expand_dims(indices, axis)
return tuple([(indices if i == axis else np.arange(s).reshape(
(1,)*(i if keepdims or i < axis else i-1) + (s,) +
(1,)*(ndim-i-(1 if keepdims or i > axis else 2))))
for i, s in enumerate(self.shape)])
def argmin(self, axis=None, out=None):
"""Return indices of the minimum values along the given axis.
This is similar to :meth:`~numpy.ndarray.argmin`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used. See :func:`~numpy.argmin` for detailed documentation.
"""
# first get the minimum at normal precision.
jd = self.jd1 + self.jd2
approx = np.min(jd, axis, keepdims=True)
# Approx is very close to the true minimum, and by subtracting it at
# full precision, all numbers near 0 can be represented correctly,
# so we can be sure we get the true minimum.
# The below is effectively what would be done for
# dt = (self - self.__class__(approx, format='jd')).jd
# which translates to:
# approx_jd1, approx_jd2 = day_frac(approx, 0.)
# dt = (self.jd1 - approx_jd1) + (self.jd2 - approx_jd2)
dt = (self.jd1 - approx) + self.jd2
return dt.argmin(axis, out)
def argmax(self, axis=None, out=None):
"""Return indices of the maximum values along the given axis.
This is similar to :meth:`~numpy.ndarray.argmax`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used. See :func:`~numpy.argmax` for detailed documentation.
"""
# For procedure, see comment on argmin.
jd = self.jd1 + self.jd2
approx = np.max(jd, axis, keepdims=True)
dt = (self.jd1 - approx) + self.jd2
return dt.argmax(axis, out)
def argsort(self, axis=-1):
"""Returns the indices that would sort the time array.
This is similar to :meth:`~numpy.ndarray.argsort`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used, and that corresponding attributes are copied. Internally,
it uses :func:`~numpy.lexsort`, and hence no sort method can be chosen.
"""
jd_approx = self.jd
jd_remainder = (self - self.__class__(jd_approx, format='jd')).jd
if axis is None:
return np.lexsort((jd_remainder.ravel(), jd_approx.ravel()))
else:
return np.lexsort(keys=(jd_remainder, jd_approx), axis=axis)
def min(self, axis=None, out=None, keepdims=False):
"""Minimum along a given axis.
This is similar to :meth:`~numpy.ndarray.min`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used, and that corresponding attributes are copied.
Note that the ``out`` argument is present only for compatibility with
``np.min``; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
"""
if out is not None:
raise ValueError("Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``.")
return self[self._advanced_index(self.argmin(axis), axis, keepdims)]
def max(self, axis=None, out=None, keepdims=False):
"""Maximum along a given axis.
This is similar to :meth:`~numpy.ndarray.max`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used, and that corresponding attributes are copied.
Note that the ``out`` argument is present only for compatibility with
``np.max``; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
"""
if out is not None:
raise ValueError("Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``.")
return self[self._advanced_index(self.argmax(axis), axis, keepdims)]
def ptp(self, axis=None, out=None, keepdims=False):
"""Peak to peak (maximum - minimum) along a given axis.
This is similar to :meth:`~numpy.ndarray.ptp`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used.
Note that the ``out`` argument is present only for compatibility with
`~numpy.ptp`; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
"""
if out is not None:
raise ValueError("Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``.")
return (self.max(axis, keepdims=keepdims) -
self.min(axis, keepdims=keepdims))
def sort(self, axis=-1):
"""Return a copy sorted along the specified axis.
This is similar to :meth:`~numpy.ndarray.sort`, but internally uses
indexing with :func:`~numpy.lexsort` to ensure that the full precision
given by the two doubles ``jd1`` and ``jd2`` is kept, and that
corresponding attributes are properly sorted and copied as well.
Parameters
----------
axis : int or None
Axis to be sorted. If ``None``, the flattened array is sorted.
By default, sort over the last axis.
"""
return self[self._advanced_index(self.argsort(axis), axis,
keepdims=True)]
@property
def cache(self):
"""
Return the cache associated with this instance.
"""
return self._time.cache
@cache.deleter
def cache(self):
del self._time.cache
def __getattr__(self, attr):
"""
Get dynamic attributes to output format or do timescale conversion.
"""
if attr in self.SCALES and self.scale is not None:
cache = self.cache['scale']
if attr not in cache:
if attr == self.scale:
tm = self
else:
tm = self.replicate()
tm._set_scale(attr)
if tm.shape:
# Prevent future modification of cached array-like object
tm.writeable = False
cache[attr] = tm
return cache[attr]
elif attr in self.FORMATS:
cache = self.cache['format']
if attr not in cache:
if attr == self.format:
tm = self
else:
tm = self.replicate(format=attr)
value = tm._shaped_like_input(tm._time.to_value(parent=tm))
cache[attr] = value
return cache[attr]
elif attr in TIME_SCALES: # allowed ones done above (self.SCALES)
if self.scale is None:
raise ScaleValueError("Cannot convert TimeDelta with "
"undefined scale to any defined scale.")
else:
raise ScaleValueError("Cannot convert {0} with scale "
"'{1}' to scale '{2}'"
.format(self.__class__.__name__,
self.scale, attr))
else:
# Should raise AttributeError
return self.__getattribute__(attr)
@override__dir__
def __dir__(self):
result = set(self.SCALES)
result.update(self.FORMATS)
return result
def _match_shape(self, val):
"""
Ensure that `val` is matched to length of self. If val has length 1
then broadcast, otherwise cast to double and make sure shape matches.
"""
val = _make_array(val, copy=True) # be conservative and copy
if val.size > 1 and val.shape != self.shape:
try:
# check the value can be broadcast to the shape of self.
val = np.broadcast_to(val, self.shape, subok=True)
except Exception:
raise ValueError('Attribute shape must match or be '
'broadcastable to that of Time object. '
'Typically, give either a single value or '
'one for each time.')
return val
def get_delta_ut1_utc(self, iers_table=None, return_status=False):
"""Find UT1 - UTC differences by interpolating in IERS Table.
Parameters
----------
iers_table : ``astropy.utils.iers.IERS`` table, optional
Table containing UT1-UTC differences from IERS Bulletins A
and/or B. If `None`, use default version (see
``astropy.utils.iers``)
return_status : bool
Whether to return status values. If `False` (default), iers
raises `IndexError` if any time is out of the range
covered by the IERS table.
Returns
-------
ut1_utc : float or float array
UT1-UTC, interpolated in IERS Table
status : int or int array
Status values (if ``return_status=`True```)::
``astropy.utils.iers.FROM_IERS_B``
``astropy.utils.iers.FROM_IERS_A``
``astropy.utils.iers.FROM_IERS_A_PREDICTION``
``astropy.utils.iers.TIME_BEFORE_IERS_RANGE``
``astropy.utils.iers.TIME_BEYOND_IERS_RANGE``
Notes
-----
In normal usage, UT1-UTC differences are calculated automatically
on the first instance ut1 is needed.
Examples
--------
To check in code whether any times are before the IERS table range::
>>> from astropy.utils.iers import TIME_BEFORE_IERS_RANGE
>>> t = Time(['1961-01-01', '2000-01-01'], scale='utc')
>>> delta, status = t.get_delta_ut1_utc(return_status=True)
>>> status == TIME_BEFORE_IERS_RANGE
array([ True, False]...)
"""
if iers_table is None:
from astropy.utils.iers import IERS
iers_table = IERS.open()
return iers_table.ut1_utc(self.utc, return_status=return_status)
# Property for ERFA DUT arg = UT1 - UTC
def _get_delta_ut1_utc(self, jd1=None, jd2=None):
"""
Get ERFA DUT arg = UT1 - UTC. This getter takes optional jd1 and
jd2 args because it gets called that way when converting time scales.
If delta_ut1_utc is not yet set, this will interpolate them from the
the IERS table.
"""
# Sec. 4.3.1: the arg DUT is the quantity delta_UT1 = UT1 - UTC in
# seconds. It is obtained from tables published by the IERS.
if not hasattr(self, '_delta_ut1_utc'):
from astropy.utils.iers import IERS_Auto
iers_table = IERS_Auto.open()
# jd1, jd2 are normally set (see above), except if delta_ut1_utc
# is access directly; ensure we behave as expected for that case
if jd1 is None:
self_utc = self.utc
jd1, jd2 = self_utc._time.jd1, self_utc._time.jd2_filled
scale = 'utc'
else:
scale = self.scale
# interpolate UT1-UTC in IERS table
delta = iers_table.ut1_utc(jd1, jd2)
# if we interpolated using UT1 jds, we may be off by one
# second near leap seconds (and very slightly off elsewhere)
if scale == 'ut1':
# calculate UTC using the offset we got; the ERFA routine
# is tolerant of leap seconds, so will do this right
jd1_utc, jd2_utc = erfa.ut1utc(jd1, jd2, delta.to_value(u.s))
# calculate a better estimate using the nearly correct UTC
delta = iers_table.ut1_utc(jd1_utc, jd2_utc)
self._set_delta_ut1_utc(delta)
return self._delta_ut1_utc
def _set_delta_ut1_utc(self, val):
del self.cache
if hasattr(val, 'to'): # Matches Quantity but also TimeDelta.
val = val.to(u.second).value
val = self._match_shape(val)
self._delta_ut1_utc = val
# Note can't use @property because _get_delta_tdb_tt is explicitly
# called with the optional jd1 and jd2 args.
delta_ut1_utc = property(_get_delta_ut1_utc, _set_delta_ut1_utc)
"""UT1 - UTC time scale offset"""
# Property for ERFA DTR arg = TDB - TT
def _get_delta_tdb_tt(self, jd1=None, jd2=None):
if not hasattr(self, '_delta_tdb_tt'):
# If jd1 and jd2 are not provided (which is the case for property
# attribute access) then require that the time scale is TT or TDB.
# Otherwise the computations here are not correct.
if jd1 is None or jd2 is None:
if self.scale not in ('tt', 'tdb'):
raise ValueError('Accessing the delta_tdb_tt attribute '
'is only possible for TT or TDB time '
'scales')
else:
jd1 = self._time.jd1
jd2 = self._time.jd2_filled
# First go from the current input time (which is either
# TDB or TT) to an approximate UT1. Since TT and TDB are
# pretty close (few msec?), assume TT. Similarly, since the
# UT1 terms are very small, use UTC instead of UT1.
njd1, njd2 = erfa.tttai(jd1, jd2)
njd1, njd2 = erfa.taiutc(njd1, njd2)
# subtract 0.5, so UT is fraction of the day from midnight
ut = day_frac(njd1 - 0.5, njd2)[1]
if self.location is None:
from astropy.coordinates import EarthLocation
location = EarthLocation.from_geodetic(0., 0., 0.)
else:
location = self.location
# Geodetic params needed for d_tdb_tt()
lon = location.lon
rxy = np.hypot(location.x, location.y)
z = location.z
self._delta_tdb_tt = erfa.dtdb(
jd1, jd2, ut, lon.to_value(u.radian),
rxy.to_value(u.km), z.to_value(u.km))
return self._delta_tdb_tt
def _set_delta_tdb_tt(self, val):
del self.cache
if hasattr(val, 'to'): # Matches Quantity but also TimeDelta.
val = val.to(u.second).value
val = self._match_shape(val)
self._delta_tdb_tt = val
# Note can't use @property because _get_delta_tdb_tt is explicitly
# called with the optional jd1 and jd2 args.
delta_tdb_tt = property(_get_delta_tdb_tt, _set_delta_tdb_tt)
"""TDB - TT time scale offset"""
def __sub__(self, other):
if not isinstance(other, Time):
try:
other = TimeDelta(other)
except Exception:
return NotImplemented
# Tdelta - something is dealt with in TimeDelta, so we have
# T - Tdelta = T
# T - T = Tdelta
other_is_delta = isinstance(other, TimeDelta)
# we need a constant scale to calculate, which is guaranteed for
# TimeDelta, but not for Time (which can be UTC)
if other_is_delta: # T - Tdelta
out = self.replicate()
if self.scale in other.SCALES:
if other.scale not in (out.scale, None):
other = getattr(other, out.scale)
else:
if other.scale is None:
out._set_scale('tai')
else:
if self.scale not in TIME_TYPES[other.scale]:
raise TypeError("Cannot subtract Time and TimeDelta instances "
"with scales '{0}' and '{1}'"
.format(self.scale, other.scale))
out._set_scale(other.scale)
# remove attributes that are invalidated by changing time
for attr in ('_delta_ut1_utc', '_delta_tdb_tt'):
if hasattr(out, attr):
delattr(out, attr)
else: # T - T
# the scales should be compatible (e.g., cannot convert TDB to LOCAL)
if other.scale not in self.SCALES:
raise TypeError("Cannot subtract Time instances "
"with scales '{0}' and '{1}'"
.format(self.scale, other.scale))
self_time = (self._time if self.scale in TIME_DELTA_SCALES
else self.tai._time)
# set up TimeDelta, subtraction to be done shortly
out = TimeDelta(self_time.jd1, self_time.jd2, format='jd',
scale=self_time.scale)
if other.scale != out.scale:
other = getattr(other, out.scale)
jd1 = out._time.jd1 - other._time.jd1
jd2 = out._time.jd2 - other._time.jd2
out._time.jd1, out._time.jd2 = day_frac(jd1, jd2)
if other_is_delta:
# Go back to left-side scale if needed
out._set_scale(self.scale)
return out
def __add__(self, other):
if not isinstance(other, Time):
try:
other = TimeDelta(other)
except Exception:
return NotImplemented
# Tdelta + something is dealt with in TimeDelta, so we have
# T + Tdelta = T
# T + T = error
if not isinstance(other, TimeDelta):
raise OperandTypeError(self, other, '+')
# ideally, we calculate in the scale of the Time item, since that is
# what we want the output in, but this may not be possible, since
# TimeDelta cannot be converted arbitrarily
out = self.replicate()
if self.scale in other.SCALES:
if other.scale not in (out.scale, None):
other = getattr(other, out.scale)
else:
if other.scale is None:
out._set_scale('tai')
else:
if self.scale not in TIME_TYPES[other.scale]:
raise TypeError("Cannot add Time and TimeDelta instances "
"with scales '{0}' and '{1}'"
.format(self.scale, other.scale))
out._set_scale(other.scale)
# remove attributes that are invalidated by changing time
for attr in ('_delta_ut1_utc', '_delta_tdb_tt'):
if hasattr(out, attr):
delattr(out, attr)
jd1 = out._time.jd1 + other._time.jd1
jd2 = out._time.jd2 + other._time.jd2
out._time.jd1, out._time.jd2 = day_frac(jd1, jd2)
# Go back to left-side scale if needed
out._set_scale(self.scale)
return out
def __radd__(self, other):
return self.__add__(other)
def __rsub__(self, other):
out = self.__sub__(other)
return -out
def _time_comparison(self, other, op):
"""If other is of same class as self, compare difference in self.scale.
Otherwise, return NotImplemented
"""
if other.__class__ is not self.__class__:
try:
other = self.__class__(other, scale=self.scale)
except Exception:
# Let other have a go.
return NotImplemented
if(self.scale is not None and self.scale not in other.SCALES or
other.scale is not None and other.scale not in self.SCALES):
# Other will also not be able to do it, so raise a TypeError
# immediately, allowing us to explain why it doesn't work.
raise TypeError("Cannot compare {0} instances with scales "
"'{1}' and '{2}'".format(self.__class__.__name__,
self.scale, other.scale))
if self.scale is not None and other.scale is not None:
other = getattr(other, self.scale)
return op((self.jd1 - other.jd1) + (self.jd2 - other.jd2), 0.)
def __lt__(self, other):
return self._time_comparison(other, operator.lt)
def __le__(self, other):
return self._time_comparison(other, operator.le)
def __eq__(self, other):
"""
If other is an incompatible object for comparison, return `False`.
Otherwise, return `True` if the time difference between self and
other is zero.
"""
return self._time_comparison(other, operator.eq)
def __ne__(self, other):
"""
If other is an incompatible object for comparison, return `True`.
Otherwise, return `False` if the time difference between self and
other is zero.
"""
return self._time_comparison(other, operator.ne)
def __gt__(self, other):
return self._time_comparison(other, operator.gt)
def __ge__(self, other):
return self._time_comparison(other, operator.ge)
def to_datetime(self, timezone=None):
tm = self.replicate(format='datetime')
return tm._shaped_like_input(tm._time.to_value(timezone))
to_datetime.__doc__ = TimeDatetime.to_value.__doc__
class TimeDelta(Time):
"""
Represent the time difference between two times.
A TimeDelta object is initialized with one or more times in the ``val``
argument. The input times in ``val`` must conform to the specified
``format``. The optional ``val2`` time input should be supplied only for
numeric input formats (e.g. JD) where very high precision (better than
64-bit precision) is required.
The allowed values for ``format`` can be listed with::
>>> list(TimeDelta.FORMATS)
['sec', 'jd', 'datetime']
Note that for time differences, the scale can be among three groups:
geocentric ('tai', 'tt', 'tcg'), barycentric ('tcb', 'tdb'), and rotational
('ut1'). Within each of these, the scales for time differences are the
same. Conversion between geocentric and barycentric is possible, as there
is only a scale factor change, but one cannot convert to or from 'ut1', as
this requires knowledge of the actual times, not just their difference. For
a similar reason, 'utc' is not a valid scale for a time difference: a UTC
day is not always 86400 seconds.
See also:
- http://docs.astropy.org/en/stable/time/
- http://docs.astropy.org/en/stable/time/index.html#time-deltas
Parameters
----------
val : sequence, ndarray, number, `~astropy.units.Quantity` or `~astropy.time.TimeDelta` object
Value(s) to initialize the time difference(s). Any quantities will
be converted appropriately (with care taken to avoid rounding
errors for regular time units).
val2 : sequence, ndarray, number, or `~astropy.units.Quantity`; optional
Additional values, as needed to preserve precision.
format : str, optional
Format of input value(s)
scale : str, optional
Time scale of input value(s), must be one of the following values:
('tdb', 'tt', 'ut1', 'tcg', 'tcb', 'tai'). If not given (or
``None``), the scale is arbitrary; when added or subtracted from a
``Time`` instance, it will be used without conversion.
copy : bool, optional
Make a copy of the input values
"""
SCALES = TIME_DELTA_SCALES
"""List of time delta scales."""
FORMATS = TIME_DELTA_FORMATS
"""Dict of time delta formats."""
info = TimeDeltaInfo()
def __init__(self, val, val2=None, format=None, scale=None, copy=False):
if isinstance(val, TimeDelta):
if scale is not None:
self._set_scale(scale)
else:
if format is None:
format = 'datetime' if isinstance(val, timedelta) else 'jd'
self._init_from_vals(val, val2, format, scale, copy)
if scale is not None:
self.SCALES = TIME_DELTA_TYPES[scale]
def replicate(self, *args, **kwargs):
out = super().replicate(*args, **kwargs)
out.SCALES = self.SCALES
return out
def to_datetime(self):
"""
Convert to ``datetime.timedelta`` object.
"""
tm = self.replicate(format='datetime')
return tm._shaped_like_input(tm._time.value)
def _set_scale(self, scale):
"""
This is the key routine that actually does time scale conversions.
This is not public and not connected to the read-only scale property.
"""
if scale == self.scale:
return
if scale not in self.SCALES:
raise ValueError("Scale {0!r} is not in the allowed scales {1}"
.format(scale, sorted(self.SCALES)))
# For TimeDelta, there can only be a change in scale factor,
# which is written as time2 - time1 = scale_offset * time1
scale_offset = SCALE_OFFSETS[(self.scale, scale)]
if scale_offset is None:
self._time.scale = scale
else:
jd1, jd2 = self._time.jd1, self._time.jd2
offset1, offset2 = day_frac(jd1, jd2, factor=scale_offset)
self._time = self.FORMATS[self.format](
jd1 + offset1, jd2 + offset2, scale,
self.precision, self.in_subfmt,
self.out_subfmt, from_jd=True)
def __add__(self, other):
# only deal with TimeDelta + TimeDelta
if isinstance(other, Time):
if not isinstance(other, TimeDelta):
return other.__add__(self)
else:
try:
other = TimeDelta(other)
except Exception:
return NotImplemented
# the scales should be compatible (e.g., cannot convert TDB to TAI)
if(self.scale is not None and self.scale not in other.SCALES or
other.scale is not None and other.scale not in self.SCALES):
raise TypeError("Cannot add TimeDelta instances with scales "
"'{0}' and '{1}'".format(self.scale, other.scale))
# adjust the scale of other if the scale of self is set (or no scales)
if self.scale is not None or other.scale is None:
out = self.replicate()
if other.scale is not None:
other = getattr(other, self.scale)
else:
out = other.replicate()
jd1 = self._time.jd1 + other._time.jd1
jd2 = self._time.jd2 + other._time.jd2
out._time.jd1, out._time.jd2 = day_frac(jd1, jd2)
return out
def __sub__(self, other):
# only deal with TimeDelta - TimeDelta
if isinstance(other, Time):
if not isinstance(other, TimeDelta):
raise OperandTypeError(self, other, '-')
else:
try:
other = TimeDelta(other)
except Exception:
return NotImplemented
# the scales should be compatible (e.g., cannot convert TDB to TAI)
if(self.scale is not None and self.scale not in other.SCALES or
other.scale is not None and other.scale not in self.SCALES):
raise TypeError("Cannot subtract TimeDelta instances with scales "
"'{0}' and '{1}'".format(self.scale, other.scale))
# adjust the scale of other if the scale of self is set (or no scales)
if self.scale is not None or other.scale is None:
out = self.replicate()
if other.scale is not None:
other = getattr(other, self.scale)
else:
out = other.replicate()
jd1 = self._time.jd1 - other._time.jd1
jd2 = self._time.jd2 - other._time.jd2
out._time.jd1, out._time.jd2 = day_frac(jd1, jd2)
return out
def __neg__(self):
"""Negation of a `TimeDelta` object."""
new = self.copy()
new._time.jd1 = -self._time.jd1
new._time.jd2 = -self._time.jd2
return new
def __abs__(self):
"""Absolute value of a `TimeDelta` object."""
jd1, jd2 = self._time.jd1, self._time.jd2
negative = jd1 + jd2 < 0
new = self.copy()
new._time.jd1 = np.where(negative, -jd1, jd1)
new._time.jd2 = np.where(negative, -jd2, jd2)
return new
def __mul__(self, other):
"""Multiplication of `TimeDelta` objects by numbers/arrays."""
# Check needed since otherwise the self.jd1 * other multiplication
# would enter here again (via __rmul__)
if isinstance(other, Time) and not isinstance(other, TimeDelta):
raise OperandTypeError(self, other, '*')
elif ((isinstance(other, u.UnitBase) and
other == u.dimensionless_unscaled) or
(isinstance(other, str) and other == '')):
return self.copy()
# If other is something consistent with a dimensionless quantity
# (could just be a float or an array), then we can just multiple in.
try:
other = u.Quantity(other, u.dimensionless_unscaled, copy=False)
except Exception:
# If not consistent with a dimensionless quantity, try downgrading
# self to a quantity and see if things work.
try:
return self.to(u.day) * other
except Exception:
# The various ways we could multiply all failed;
# returning NotImplemented to give other a final chance.
return NotImplemented
jd1, jd2 = day_frac(self.jd1, self.jd2, factor=other.value)
out = TimeDelta(jd1, jd2, format='jd', scale=self.scale)
if self.format != 'jd':
out = out.replicate(format=self.format)
return out
def __rmul__(self, other):
"""Multiplication of numbers/arrays with `TimeDelta` objects."""
return self.__mul__(other)
def __truediv__(self, other):
"""Division of `TimeDelta` objects by numbers/arrays."""
# Cannot do __mul__(1./other) as that looses precision
if ((isinstance(other, u.UnitBase) and
other == u.dimensionless_unscaled) or
(isinstance(other, str) and other == '')):
return self.copy()
# If other is something consistent with a dimensionless quantity
# (could just be a float or an array), then we can just divide in.
try:
other = u.Quantity(other, u.dimensionless_unscaled, copy=False)
except Exception:
# If not consistent with a dimensionless quantity, try downgrading
# self to a quantity and see if things work.
try:
return self.to(u.day) / other
except Exception:
# The various ways we could divide all failed;
# returning NotImplemented to give other a final chance.
return NotImplemented
jd1, jd2 = day_frac(self.jd1, self.jd2, divisor=other.value)
out = TimeDelta(jd1, jd2, format='jd', scale=self.scale)
if self.format != 'jd':
out = out.replicate(format=self.format)
return out
def __rtruediv__(self, other):
"""Division by `TimeDelta` objects of numbers/arrays."""
# Here, we do not have to worry about returning NotImplemented,
# since other has already had a chance to look at us.
return other / self.to(u.day)
def to(self, *args, **kwargs):
return u.Quantity(self._time.jd1 + self._time.jd2,
u.day).to(*args, **kwargs)
def _make_value_equivalent(self, item, value):
"""Coerce setitem value into an equivalent TimeDelta object"""
if not isinstance(value, TimeDelta):
try:
value = self.__class__(value, scale=self.scale, format=self.format)
except Exception as err:
raise ValueError('cannot convert value to a compatible TimeDelta '
'object: {}'.format(err))
return value
class ScaleValueError(Exception):
pass
def _make_array(val, copy=False):
"""
Take ``val`` and convert/reshape to an array. If ``copy`` is `True`
then copy input values.
Returns
-------
val : ndarray
Array version of ``val``.
"""
val = np.array(val, copy=copy, subok=True)
# Allow only float64, string or object arrays as input
# (object is for datetime, maybe add more specific test later?)
# This also ensures the right byteorder for float64 (closes #2942).
if not (val.dtype == np.float64 or val.dtype.kind in 'OSUMa'):
val = np.asanyarray(val, dtype=np.float64)
return val
def _check_for_masked_and_fill(val, val2):
"""
If ``val`` or ``val2`` are masked arrays then fill them and cast
to ndarray.
Returns a mask corresponding to the logical-or of masked elements
in ``val`` and ``val2``. If neither is masked then the return ``mask``
is ``None``.
If either ``val`` or ``val2`` are masked then they are replaced
with filled versions of themselves.
Parameters
----------
val : ndarray or MaskedArray
Input val
val2 : ndarray or MaskedArray
Input val2
Returns
-------
mask, val, val2: ndarray or None
Mask: (None or bool ndarray), val, val2: ndarray
"""
def get_as_filled_ndarray(mask, val):
"""
Fill the given MaskedArray ``val`` from the first non-masked
element in the array. This ensures that upstream Time initialization
will succeed.
Note that nothing happens if there are no masked elements.
"""
fill_value = None
if np.any(val.mask):
# Final mask is the logical-or of inputs
mask = mask | val.mask
# First unmasked element. If all elements are masked then
# use fill_value=None from above which will use val.fill_value.
# As long as the user has set this appropriately then all will
# be fine.
val_unmasked = val.compressed() # 1-d ndarray of unmasked values
if len(val_unmasked) > 0:
fill_value = val_unmasked[0]
# Fill the input ``val``. If fill_value is None then this just returns
# an ndarray view of val (no copy).
val = val.filled(fill_value)
return mask, val
mask = False
if isinstance(val, np.ma.MaskedArray):
mask, val = get_as_filled_ndarray(mask, val)
if isinstance(val2, np.ma.MaskedArray):
mask, val2 = get_as_filled_ndarray(mask, val2)
return mask, val, val2
class OperandTypeError(TypeError):
def __init__(self, left, right, op=None):
op_string = '' if op is None else ' for {0}'.format(op)
super().__init__(
"Unsupported operand type(s){0}: "
"'{1}' and '{2}'".format(op_string,
left.__class__.__name__,
right.__class__.__name__))
|
5efc8877f29a6f5ac8815eef12a85c6e14828f06c43ddec7126a955053da251a | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Time utilities.
In particular, routines to do basic arithmetic on numbers represented by two
doubles, using the procedure of Shewchuk, 1997, Discrete & Computational
Geometry 18(3):305-363 -- http://www.cs.berkeley.edu/~jrs/papers/robustr.pdf
"""
import numpy as np
from astropy import units as u
def day_frac(val1, val2, factor=None, divisor=None):
"""
Return the sum of ``val1`` and ``val2`` as two float64s, an integer part
and the fractional remainder. If ``factor`` is given, then multiply the
sum by it. If ``divisor`` is given, then divide the sum by it.
The arithmetic is all done with exact floating point operations so no
precision is lost to rounding error. This routine assumes the sum is less
than about 1e16, otherwise the ``frac`` part will be greater than 1.0.
Returns
-------
day, frac : float64
Integer and fractional part of val1 + val2.
"""
# Add val1 and val2 exactly, returning the result as two float64s.
# The first is the approximate sum (with some floating point error)
# and the second is the error of the float64 sum.
sum12, err12 = two_sum(val1, val2)
if factor is not None:
sum12, carry = two_product(sum12, factor)
carry += err12 * factor
sum12, err12 = two_sum(sum12, carry)
if divisor is not None:
q1 = sum12 / divisor
p1, p2 = two_product(q1, divisor)
d1, d2 = two_sum(sum12, -p1)
d2 += err12
d2 -= p2
q2 = (d1 + d2) / divisor # 3-part float fine here; nothing can be lost
sum12, err12 = two_sum(q1, q2)
# get integer fraction
day = np.round(sum12)
extra, frac = two_sum(sum12, -day)
frac += extra + err12
return day, frac
def quantity_day_frac(val1, val2=None):
"""Like ``day_frac``, but for quantities with units of time.
The quantities are separately converted to days. Here, we need to take
care with the conversion since while the routines here can do accurate
multiplication, the conversion factor itself may not be accurate. For
instance, if the quantity is in seconds, the conversion factor is
1./86400., which is not exactly representable as a float.
To work around this, for conversion factors less than unity, rather than
multiply by that possibly inaccurate factor, the value is divided by the
conversion factor of a day to that unit (i.e., by 86400. for seconds). For
conversion factors larger than 1, such as 365.25 for years, we do just
multiply. With this scheme, one has precise conversion factors for all
regular time units that astropy defines. Note, however, that it does not
necessarily work for all custom time units, and cannot work when conversion
to time is via an equivalency. For those cases, one remains limited by the
fact that Quantity calculations are done in double precision, not in
quadruple precision as for time.
"""
if val2 is not None:
res11, res12 = quantity_day_frac(val1)
res21, res22 = quantity_day_frac(val2)
# This summation is can at most lose 1 ULP in the second number.
return res11 + res21, res12 + res22
try:
factor = val1.unit.to(u.day)
except Exception:
# Not a simple scaling, so cannot do the full-precision one.
# But at least try normal conversion, since equivalencies may be set.
return val1.to_value(u.day), 0.
if factor >= 1.:
return day_frac(val1.value, 0., factor=factor)
else:
divisor = u.day.to(val1.unit)
return day_frac(val1.value, 0., divisor=divisor)
def two_sum(a, b):
"""
Add ``a`` and ``b`` exactly, returning the result as two float64s.
The first is the approximate sum (with some floating point error)
and the second is the error of the float64 sum.
Using the procedure of Shewchuk, 1997,
Discrete & Computational Geometry 18(3):305-363
http://www.cs.berkeley.edu/~jrs/papers/robustr.pdf
Returns
-------
sum, err : float64
Approximate sum of a + b and the exact floating point error
"""
x = a + b
eb = x - a
eb = b - eb
ea = x - b
ea = a - ea
return x, ea + eb
def two_product(a, b):
"""
Multiple ``a`` and ``b`` exactly, returning the result as two float64s.
The first is the approximate product (with some floating point error)
and the second is the error of the float64 product.
Uses the procedure of Shewchuk, 1997,
Discrete & Computational Geometry 18(3):305-363
http://www.cs.berkeley.edu/~jrs/papers/robustr.pdf
Returns
-------
prod, err : float64
Approximate product a * b and the exact floating point error
"""
x = a * b
ah, al = split(a)
bh, bl = split(b)
y1 = ah * bh
y = x - y1
y2 = al * bh
y -= y2
y3 = ah * bl
y -= y3
y4 = al * bl
y = y4 - y
return x, y
def split(a):
"""
Split float64 in two aligned parts.
Uses the procedure of Shewchuk, 1997,
Discrete & Computational Geometry 18(3):305-363
http://www.cs.berkeley.edu/~jrs/papers/robustr.pdf
"""
c = 134217729. * a # 2**27+1.
abig = c - a
ah = c - abig
al = a - ah
return ah, al
|
086fe5c3da622bbaf74a81d55bf2f4ff184fb96bd1d75861e66482ac914dde26 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import fnmatch
import time
import re
import datetime
import warnings
from collections import OrderedDict, defaultdict
import numpy as np
from astropy.utils.decorators import lazyproperty
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy import units as u
from astropy import _erfa as erfa
from .utils import day_frac, quantity_day_frac, two_sum, two_product
__all__ = ['TimeFormat', 'TimeJD', 'TimeMJD', 'TimeFromEpoch', 'TimeUnix',
'TimeCxcSec', 'TimeGPS', 'TimeDecimalYear',
'TimePlotDate', 'TimeUnique', 'TimeDatetime', 'TimeString',
'TimeISO', 'TimeISOT', 'TimeFITS', 'TimeYearDayTime',
'TimeEpochDate', 'TimeBesselianEpoch', 'TimeJulianEpoch',
'TimeDeltaFormat', 'TimeDeltaSec', 'TimeDeltaJD',
'TimeEpochDateString', 'TimeBesselianEpochString',
'TimeJulianEpochString', 'TIME_FORMATS', 'TIME_DELTA_FORMATS',
'TimezoneInfo', 'TimeDeltaDatetime', 'TimeDatetime64']
__doctest_skip__ = ['TimePlotDate']
# These both get filled in at end after TimeFormat subclasses defined.
# Use an OrderedDict to fix the order in which formats are tried.
# This ensures, e.g., that 'isot' gets tried before 'fits'.
TIME_FORMATS = OrderedDict()
TIME_DELTA_FORMATS = OrderedDict()
# Translations between deprecated FITS timescales defined by
# Rots et al. 2015, A&A 574:A36, and timescales used here.
FITS_DEPRECATED_SCALES = {'TDT': 'tt', 'ET': 'tt',
'GMT': 'utc', 'UT': 'utc', 'IAT': 'tai'}
def _regexify_subfmts(subfmts):
"""
Iterate through each of the sub-formats and try substituting simple
regular expressions for the strptime codes for year, month, day-of-month,
hour, minute, second. If no % characters remain then turn the final string
into a compiled regex. This assumes time formats do not have a % in them.
This is done both to speed up parsing of strings and to allow mixed formats
where strptime does not quite work well enough.
"""
new_subfmts = []
for subfmt_tuple in subfmts:
subfmt_in = subfmt_tuple[1]
for strptime_code, regex in (('%Y', r'(?P<year>\d\d\d\d)'),
('%m', r'(?P<mon>\d{1,2})'),
('%d', r'(?P<mday>\d{1,2})'),
('%H', r'(?P<hour>\d{1,2})'),
('%M', r'(?P<min>\d{1,2})'),
('%S', r'(?P<sec>\d{1,2})')):
subfmt_in = subfmt_in.replace(strptime_code, regex)
if '%' not in subfmt_in:
subfmt_tuple = (subfmt_tuple[0],
re.compile(subfmt_in + '$'),
subfmt_tuple[2])
new_subfmts.append(subfmt_tuple)
return tuple(new_subfmts)
class TimeFormatMeta(type):
"""
Metaclass that adds `TimeFormat` and `TimeDeltaFormat` to the
`TIME_FORMATS` and `TIME_DELTA_FORMATS` registries, respectively.
"""
_registry = TIME_FORMATS
def __new__(mcls, name, bases, members):
cls = super().__new__(mcls, name, bases, members)
# Register time formats that have a name, but leave out astropy_time since
# it is not a user-accessible format and is only used for initialization into
# a different format.
if 'name' in members and cls.name != 'astropy_time':
mcls._registry[cls.name] = cls
if 'subfmts' in members:
cls.subfmts = _regexify_subfmts(members['subfmts'])
return cls
class TimeFormat(metaclass=TimeFormatMeta):
"""
Base class for time representations.
Parameters
----------
val1 : numpy ndarray, list, number, str, or bytes
Values to initialize the time or times. Bytes are decoded as ascii.
val2 : numpy ndarray, list, or number; optional
Value(s) to initialize the time or times. Only used for numerical
input, to help preserve precision.
scale : str
Time scale of input value(s)
precision : int
Precision for seconds as floating point
in_subfmt : str
Select subformat for inputting string times
out_subfmt : str
Select subformat for outputting string times
from_jd : bool
If true then val1, val2 are jd1, jd2
"""
_default_scale = 'utc' # As of astropy 0.4
def __init__(self, val1, val2, scale, precision,
in_subfmt, out_subfmt, from_jd=False):
self.scale = scale # validation of scale done later with _check_scale
self.precision = precision
self.in_subfmt = in_subfmt
self.out_subfmt = out_subfmt
if from_jd:
self.jd1 = val1
self.jd2 = val2
else:
val1, val2 = self._check_val_type(val1, val2)
self.set_jds(val1, val2)
def __len__(self):
return len(self.jd1)
@property
def scale(self):
"""Time scale"""
self._scale = self._check_scale(self._scale)
return self._scale
@scale.setter
def scale(self, val):
self._scale = val
def mask_if_needed(self, value):
if self.masked:
value = np.ma.array(value, mask=self.mask, copy=False)
return value
@property
def mask(self):
if 'mask' not in self.cache:
self.cache['mask'] = np.isnan(self.jd2)
if self.cache['mask'].shape:
self.cache['mask'].flags.writeable = False
return self.cache['mask']
@property
def masked(self):
if 'masked' not in self.cache:
self.cache['masked'] = bool(np.any(self.mask))
return self.cache['masked']
@property
def jd2_filled(self):
return np.nan_to_num(self.jd2) if self.masked else self.jd2
@lazyproperty
def cache(self):
"""
Return the cache associated with this instance.
"""
return defaultdict(dict)
def _check_val_type(self, val1, val2):
"""Input value validation, typically overridden by derived classes"""
# val1 cannot contain nan, but val2 can contain nan
ok1 = val1.dtype == np.double and np.all(np.isfinite(val1))
ok2 = val2 is None or (val2.dtype == np.double and not np.any(np.isinf(val2)))
if not (ok1 and ok2):
raise TypeError('Input values for {0} class must be finite doubles'
.format(self.name))
if getattr(val1, 'unit', None) is not None:
# Convert any quantity-likes to days first, attempting to be
# careful with the conversion, so that, e.g., large numbers of
# seconds get converted without loosing precision because
# 1/86400 is not exactly representable as a float.
val1 = u.Quantity(val1, copy=False)
if val2 is not None:
val2 = u.Quantity(val2, copy=False)
try:
val1, val2 = quantity_day_frac(val1, val2)
except u.UnitsError:
raise u.UnitConversionError(
"only quantities with time units can be "
"used to instantiate Time instances.")
# We now have days, but the format may expect another unit.
# On purpose, multiply with 1./day_unit because typically it is
# 1./erfa.DAYSEC, and inverting it recovers the integer.
# (This conversion will get undone in format's set_jds, hence
# there may be room for optimizing this.)
factor = 1. / getattr(self, 'unit', 1.)
if factor != 1.:
val1, carry = two_product(val1, factor)
carry += val2 * factor
val1, val2 = two_sum(val1, carry)
elif getattr(val2, 'unit', None) is not None:
raise TypeError('Cannot mix float and Quantity inputs')
if val2 is None:
val2 = np.zeros_like(val1)
def asarray_or_scalar(val):
"""
Remove ndarray subclasses since for jd1/jd2 we want a pure ndarray
or a Python or numpy scalar.
"""
return np.asarray(val) if isinstance(val, np.ndarray) else val
return asarray_or_scalar(val1), asarray_or_scalar(val2)
def _check_scale(self, scale):
"""
Return a validated scale value.
If there is a class attribute 'scale' then that defines the default /
required time scale for this format. In this case if a scale value was
provided that needs to match the class default, otherwise return
the class default.
Otherwise just make sure that scale is in the allowed list of
scales. Provide a different error message if `None` (no value) was
supplied.
"""
if scale is None:
scale = self._default_scale
if scale not in TIME_SCALES:
raise ScaleValueError("Scale value '{0}' not in "
"allowed values {1}"
.format(scale, TIME_SCALES))
return scale
def set_jds(self, val1, val2):
"""
Set internal jd1 and jd2 from val1 and val2. Must be provided
by derived classes.
"""
raise NotImplementedError
def to_value(self, parent=None):
"""
Return time representation from internal jd1 and jd2. This is
the base method that ignores ``parent`` and requires that
subclasses implement the ``value`` property. Subclasses that
require ``parent`` or have other optional args for ``to_value``
should compute and return the value directly.
"""
return self.mask_if_needed(self.value)
@property
def value(self):
raise NotImplementedError
class TimeJD(TimeFormat):
"""
Julian Date time format.
This represents the number of days since the beginning of
the Julian Period.
For example, 2451544.5 in JD is midnight on January 1, 2000.
"""
name = 'jd'
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
self.jd1, self.jd2 = day_frac(val1, val2)
@property
def value(self):
return self.jd1 + self.jd2
class TimeMJD(TimeFormat):
"""
Modified Julian Date time format.
This represents the number of days since midnight on November 17, 1858.
For example, 51544.0 in MJD is midnight on January 1, 2000.
"""
name = 'mjd'
def set_jds(self, val1, val2):
# TODO - this routine and vals should be Cythonized to follow the ERFA
# convention of preserving precision by adding to the larger of the two
# values in a vectorized operation. But in most practical cases the
# first one is probably biggest.
self._check_scale(self._scale) # Validate scale.
jd1, jd2 = day_frac(val1, val2)
jd1 += erfa.DJM0 # erfa.DJM0=2400000.5 (from erfam.h)
self.jd1, self.jd2 = day_frac(jd1, jd2)
@property
def value(self):
return (self.jd1 - erfa.DJM0) + self.jd2
class TimeDecimalYear(TimeFormat):
"""
Time as a decimal year, with integer values corresponding to midnight
of the first day of each year. For example 2000.5 corresponds to the
ISO time '2000-07-02 00:00:00'.
"""
name = 'decimalyear'
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
sum12, err12 = two_sum(val1, val2)
iy_start = np.trunc(sum12).astype(int)
extra, y_frac = two_sum(sum12, -iy_start)
y_frac += extra + err12
val = (val1 + val2).astype(np.double)
iy_start = np.trunc(val).astype(int)
imon = np.ones_like(iy_start)
iday = np.ones_like(iy_start)
ihr = np.zeros_like(iy_start)
imin = np.zeros_like(iy_start)
isec = np.zeros_like(y_frac)
# Possible enhancement: use np.unique to only compute start, stop
# for unique values of iy_start.
scale = self.scale.upper().encode('ascii')
jd1_start, jd2_start = erfa.dtf2d(scale, iy_start, imon, iday,
ihr, imin, isec)
jd1_end, jd2_end = erfa.dtf2d(scale, iy_start + 1, imon, iday,
ihr, imin, isec)
t_start = Time(jd1_start, jd2_start, scale=self.scale, format='jd')
t_end = Time(jd1_end, jd2_end, scale=self.scale, format='jd')
t_frac = t_start + (t_end - t_start) * y_frac
self.jd1, self.jd2 = day_frac(t_frac.jd1, t_frac.jd2)
@property
def value(self):
scale = self.scale.upper().encode('ascii')
iy_start, ims, ids, ihmsfs = erfa.d2dtf(scale, 0, # precision=0
self.jd1, self.jd2_filled)
imon = np.ones_like(iy_start)
iday = np.ones_like(iy_start)
ihr = np.zeros_like(iy_start)
imin = np.zeros_like(iy_start)
isec = np.zeros_like(self.jd1)
# Possible enhancement: use np.unique to only compute start, stop
# for unique values of iy_start.
scale = self.scale.upper().encode('ascii')
jd1_start, jd2_start = erfa.dtf2d(scale, iy_start, imon, iday,
ihr, imin, isec)
jd1_end, jd2_end = erfa.dtf2d(scale, iy_start + 1, imon, iday,
ihr, imin, isec)
dt = (self.jd1 - jd1_start) + (self.jd2 - jd2_start)
dt_end = (jd1_end - jd1_start) + (jd2_end - jd2_start)
decimalyear = iy_start + dt / dt_end
return decimalyear
class TimeFromEpoch(TimeFormat):
"""
Base class for times that represent the interval from a particular
epoch as a floating point multiple of a unit time interval (e.g. seconds
or days).
"""
def __init__(self, val1, val2, scale, precision,
in_subfmt, out_subfmt, from_jd=False):
self.scale = scale
# Initialize the reference epoch (a single time defined in subclasses)
epoch = Time(self.epoch_val, self.epoch_val2, scale=self.epoch_scale,
format=self.epoch_format)
self.epoch = epoch
# Now create the TimeFormat object as normal
super().__init__(val1, val2, scale, precision, in_subfmt, out_subfmt,
from_jd)
def set_jds(self, val1, val2):
"""
Initialize the internal jd1 and jd2 attributes given val1 and val2.
For an TimeFromEpoch subclass like TimeUnix these will be floats giving
the effective seconds since an epoch time (e.g. 1970-01-01 00:00:00).
"""
# Form new JDs based on epoch time + time from epoch (converted to JD).
# One subtlety that might not be obvious is that 1.000 Julian days in
# UTC can be 86400 or 86401 seconds. For the TimeUnix format the
# assumption is that every day is exactly 86400 seconds, so this is, in
# principle, doing the math incorrectly, *except* that it matches the
# definition of Unix time which does not include leap seconds.
# note: use divisor=1./self.unit, since this is either 1 or 1/86400,
# and 1/86400 is not exactly representable as a float64, so multiplying
# by that will cause rounding errors. (But inverting it as a float64
# recovers the exact number)
day, frac = day_frac(val1, val2, divisor=1. / self.unit)
jd1 = self.epoch.jd1 + day
jd2 = self.epoch.jd2 + frac
# Create a temporary Time object corresponding to the new (jd1, jd2) in
# the epoch scale (e.g. UTC for TimeUnix) then convert that to the
# desired time scale for this object.
#
# A known limitation is that the transform from self.epoch_scale to
# self.scale cannot involve any metadata like lat or lon.
try:
tm = getattr(Time(jd1, jd2, scale=self.epoch_scale,
format='jd'), self.scale)
except Exception as err:
raise ScaleValueError("Cannot convert from '{0}' epoch scale '{1}'"
"to specified scale '{2}', got error:\n{3}"
.format(self.name, self.epoch_scale,
self.scale, err))
self.jd1, self.jd2 = day_frac(tm._time.jd1, tm._time.jd2)
def to_value(self, parent=None):
# Make sure that scale is the same as epoch scale so we can just
# subtract the epoch and convert
if self.scale != self.epoch_scale:
if parent is None:
raise ValueError('cannot compute value without parent Time object')
try:
tm = getattr(parent, self.epoch_scale)
except Exception as err:
raise ScaleValueError("Cannot convert from '{0}' epoch scale '{1}'"
"to specified scale '{2}', got error:\n{3}"
.format(self.name, self.epoch_scale,
self.scale, err))
jd1, jd2 = tm._time.jd1, tm._time.jd2
else:
jd1, jd2 = self.jd1, self.jd2
time_from_epoch = ((jd1 - self.epoch.jd1) +
(jd2 - self.epoch.jd2)) / self.unit
return self.mask_if_needed(time_from_epoch)
value = property(to_value)
@property
def _default_scale(self):
return self.epoch_scale
class TimeUnix(TimeFromEpoch):
"""
Unix time: seconds from 1970-01-01 00:00:00 UTC.
For example, 946684800.0 in Unix time is midnight on January 1, 2000.
NOTE: this quantity is not exactly unix time and differs from the strict
POSIX definition by up to 1 second on days with a leap second. POSIX
unix time actually jumps backward by 1 second at midnight on leap second
days while this class value is monotonically increasing at 86400 seconds
per UTC day.
"""
name = 'unix'
unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds)
epoch_val = '1970-01-01 00:00:00'
epoch_val2 = None
epoch_scale = 'utc'
epoch_format = 'iso'
class TimeCxcSec(TimeFromEpoch):
"""
Chandra X-ray Center seconds from 1998-01-01 00:00:00 TT.
For example, 63072064.184 is midnight on January 1, 2000.
"""
name = 'cxcsec'
unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds)
epoch_val = '1998-01-01 00:00:00'
epoch_val2 = None
epoch_scale = 'tt'
epoch_format = 'iso'
class TimeGPS(TimeFromEpoch):
"""GPS time: seconds from 1980-01-06 00:00:00 UTC
For example, 630720013.0 is midnight on January 1, 2000.
Notes
=====
This implementation is strictly a representation of the number of seconds
(including leap seconds) since midnight UTC on 1980-01-06. GPS can also be
considered as a time scale which is ahead of TAI by a fixed offset
(to within about 100 nanoseconds).
For details, see http://tycho.usno.navy.mil/gpstt.html
"""
name = 'gps'
unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds)
epoch_val = '1980-01-06 00:00:19'
# above epoch is the same as Time('1980-01-06 00:00:00', scale='utc').tai
epoch_val2 = None
epoch_scale = 'tai'
epoch_format = 'iso'
class TimePlotDate(TimeFromEpoch):
"""
Matplotlib `~matplotlib.pyplot.plot_date` input:
1 + number of days from 0001-01-01 00:00:00 UTC
This can be used directly in the matplotlib `~matplotlib.pyplot.plot_date`
function::
>>> import matplotlib.pyplot as plt
>>> jyear = np.linspace(2000, 2001, 20)
>>> t = Time(jyear, format='jyear', scale='utc')
>>> plt.plot_date(t.plot_date, jyear)
>>> plt.gcf().autofmt_xdate() # orient date labels at a slant
>>> plt.draw()
For example, 730120.0003703703 is midnight on January 1, 2000.
"""
# This corresponds to the zero reference time for matplotlib plot_date().
# Note that TAI and UTC are equivalent at the reference time.
name = 'plot_date'
unit = 1.0
epoch_val = 1721424.5 # Time('0001-01-01 00:00:00', scale='tai').jd - 1
epoch_val2 = None
epoch_scale = 'utc'
epoch_format = 'jd'
class TimeUnique(TimeFormat):
"""
Base class for time formats that can uniquely create a time object
without requiring an explicit format specifier. This class does
nothing but provide inheritance to identify a class as unique.
"""
class TimeAstropyTime(TimeUnique):
"""
Instantiate date from an Astropy Time object (or list thereof).
This is purely for instantiating from a Time object. The output
format is the same as the first time instance.
"""
name = 'astropy_time'
def __new__(cls, val1, val2, scale, precision,
in_subfmt, out_subfmt, from_jd=False):
"""
Use __new__ instead of __init__ to output a class instance that
is the same as the class of the first Time object in the list.
"""
val1_0 = val1.flat[0]
if not (isinstance(val1_0, Time) and all(type(val) is type(val1_0)
for val in val1.flat)):
raise TypeError('Input values for {0} class must all be same '
'astropy Time type.'.format(cls.name))
if scale is None:
scale = val1_0.scale
if val1.shape:
vals = [getattr(val, scale)._time for val in val1]
jd1 = np.concatenate([np.atleast_1d(val.jd1) for val in vals])
jd2 = np.concatenate([np.atleast_1d(val.jd2) for val in vals])
else:
val = getattr(val1_0, scale)._time
jd1, jd2 = val.jd1, val.jd2
OutTimeFormat = val1_0._time.__class__
self = OutTimeFormat(jd1, jd2, scale, precision, in_subfmt, out_subfmt,
from_jd=True)
return self
class TimeDatetime(TimeUnique):
"""
Represent date as Python standard library `~datetime.datetime` object
Example::
>>> from astropy.time import Time
>>> from datetime import datetime
>>> t = Time(datetime(2000, 1, 2, 12, 0, 0), scale='utc')
>>> t.iso
'2000-01-02 12:00:00.000'
>>> t.tt.datetime
datetime.datetime(2000, 1, 2, 12, 1, 4, 184000)
"""
name = 'datetime'
def _check_val_type(self, val1, val2):
# Note: don't care about val2 for this class
if not all(isinstance(val, datetime.datetime) for val in val1.flat):
raise TypeError('Input values for {0} class must be '
'datetime objects'.format(self.name))
return val1, None
def set_jds(self, val1, val2):
"""Convert datetime object contained in val1 to jd1, jd2"""
# Iterate through the datetime objects, getting year, month, etc.
iterator = np.nditer([val1, None, None, None, None, None, None],
flags=['refs_ok'],
op_dtypes=[object] + 5*[np.intc] + [np.double])
for val, iy, im, id, ihr, imin, dsec in iterator:
dt = val.item()
if dt.tzinfo is not None:
dt = (dt - dt.utcoffset()).replace(tzinfo=None)
iy[...] = dt.year
im[...] = dt.month
id[...] = dt.day
ihr[...] = dt.hour
imin[...] = dt.minute
dsec[...] = dt.second + dt.microsecond / 1e6
jd1, jd2 = erfa.dtf2d(self.scale.upper().encode('ascii'),
*iterator.operands[1:])
self.jd1, self.jd2 = day_frac(jd1, jd2)
def to_value(self, timezone=None, parent=None):
"""
Convert to (potentially timezone-aware) `~datetime.datetime` object.
If ``timezone`` is not ``None``, return a timezone-aware datetime
object.
Parameters
----------
timezone : {`~datetime.tzinfo`, None} (optional)
If not `None`, return timezone-aware datetime.
Returns
-------
`~datetime.datetime`
If ``timezone`` is not ``None``, output will be timezone-aware.
"""
if timezone is not None:
if self._scale != 'utc':
raise ScaleValueError("scale is {}, must be 'utc' when timezone "
"is supplied.".format(self._scale))
# Rather than define a value property directly, we have a function,
# since we want to be able to pass in timezone information.
scale = self.scale.upper().encode('ascii')
iys, ims, ids, ihmsfs = erfa.d2dtf(scale, 6, # 6 for microsec
self.jd1, self.jd2_filled)
ihrs = ihmsfs['h']
imins = ihmsfs['m']
isecs = ihmsfs['s']
ifracs = ihmsfs['f']
iterator = np.nditer([iys, ims, ids, ihrs, imins, isecs, ifracs, None],
flags=['refs_ok'],
op_dtypes=7*[iys.dtype] + [object])
for iy, im, id, ihr, imin, isec, ifracsec, out in iterator:
if isec >= 60:
raise ValueError('Time {} is within a leap second but datetime '
'does not support leap seconds'
.format((iy, im, id, ihr, imin, isec, ifracsec)))
if timezone is not None:
out[...] = datetime.datetime(iy, im, id, ihr, imin, isec, ifracsec,
tzinfo=TimezoneInfo()).astimezone(timezone)
else:
out[...] = datetime.datetime(iy, im, id, ihr, imin, isec, ifracsec)
return self.mask_if_needed(iterator.operands[-1])
value = property(to_value)
class TimezoneInfo(datetime.tzinfo):
"""
Subclass of the `~datetime.tzinfo` object, used in the
to_datetime method to specify timezones.
It may be safer in most cases to use a timezone database package like
pytz rather than defining your own timezones - this class is mainly
a workaround for users without pytz.
"""
@u.quantity_input(utc_offset=u.day, dst=u.day)
def __init__(self, utc_offset=0*u.day, dst=0*u.day, tzname=None):
"""
Parameters
----------
utc_offset : `~astropy.units.Quantity` (optional)
Offset from UTC in days. Defaults to zero.
dst : `~astropy.units.Quantity` (optional)
Daylight Savings Time offset in days. Defaults to zero
(no daylight savings).
tzname : string, `None` (optional)
Name of timezone
Examples
--------
>>> from datetime import datetime
>>> from astropy.time import TimezoneInfo # Specifies a timezone
>>> import astropy.units as u
>>> utc = TimezoneInfo() # Defaults to UTC
>>> utc_plus_one_hour = TimezoneInfo(utc_offset=1*u.hour) # UTC+1
>>> dt_aware = datetime(2000, 1, 1, 0, 0, 0, tzinfo=utc_plus_one_hour)
>>> print(dt_aware)
2000-01-01 00:00:00+01:00
>>> print(dt_aware.astimezone(utc))
1999-12-31 23:00:00+00:00
"""
if utc_offset == 0 and dst == 0 and tzname is None:
tzname = 'UTC'
self._utcoffset = datetime.timedelta(utc_offset.to_value(u.day))
self._tzname = tzname
self._dst = datetime.timedelta(dst.to_value(u.day))
def utcoffset(self, dt):
return self._utcoffset
def tzname(self, dt):
return str(self._tzname)
def dst(self, dt):
return self._dst
class TimeString(TimeUnique):
"""
Base class for string-like time representations.
This class assumes that anything following the last decimal point to the
right is a fraction of a second.
This is a reference implementation can be made much faster with effort.
"""
def _check_val_type(self, val1, val2):
# Note: don't care about val2 for these classes
if val1.dtype.kind not in ('S', 'U'):
raise TypeError('Input values for {0} class must be strings'
.format(self.name))
return val1, None
def parse_string(self, timestr, subfmts):
"""Read time from a single string, using a set of possible formats."""
# Datetime components required for conversion to JD by ERFA, along
# with the default values.
components = ('year', 'mon', 'mday', 'hour', 'min', 'sec')
defaults = (None, 1, 1, 0, 0, 0)
# Assume that anything following "." on the right side is a
# floating fraction of a second.
try:
idot = timestr.rindex('.')
except Exception:
fracsec = 0.0
else:
timestr, fracsec = timestr[:idot], timestr[idot:]
fracsec = float(fracsec)
for _, strptime_fmt_or_regex, _ in subfmts:
if isinstance(strptime_fmt_or_regex, str):
try:
tm = time.strptime(timestr, strptime_fmt_or_regex)
except ValueError:
continue
else:
vals = [getattr(tm, 'tm_' + component)
for component in components]
else:
tm = re.match(strptime_fmt_or_regex, timestr)
if tm is None:
continue
tm = tm.groupdict()
vals = [int(tm.get(component, default)) for component, default
in zip(components, defaults)]
# Add fractional seconds
vals[-1] = vals[-1] + fracsec
return vals
else:
raise ValueError('Time {0} does not match {1} format'
.format(timestr, self.name))
def set_jds(self, val1, val2):
"""Parse the time strings contained in val1 and set jd1, jd2"""
# Select subformats based on current self.in_subfmt
subfmts = self._select_subfmts(self.in_subfmt)
# Be liberal in what we accept: convert bytes to ascii.
# Here .item() is needed for arrays with entries of unequal length,
# to strip trailing 0 bytes.
to_string = (str if val1.dtype.kind == 'U' else
lambda x: str(x.item(), encoding='ascii'))
iterator = np.nditer([val1, None, None, None, None, None, None],
op_dtypes=[val1.dtype] + 5*[np.intc] + [np.double])
for val, iy, im, id, ihr, imin, dsec in iterator:
val = to_string(val)
iy[...], im[...], id[...], ihr[...], imin[...], dsec[...] = (
self.parse_string(val, subfmts))
jd1, jd2 = erfa.dtf2d(self.scale.upper().encode('ascii'),
*iterator.operands[1:])
self.jd1, self.jd2 = day_frac(jd1, jd2)
def str_kwargs(self):
"""
Generator that yields a dict of values corresponding to the
calendar date and time for the internal JD values.
"""
scale = self.scale.upper().encode('ascii'),
iys, ims, ids, ihmsfs = erfa.d2dtf(scale, self.precision,
self.jd1, self.jd2_filled)
# Get the str_fmt element of the first allowed output subformat
_, _, str_fmt = self._select_subfmts(self.out_subfmt)[0]
if '{yday:' in str_fmt:
has_yday = True
else:
has_yday = False
yday = None
ihrs = ihmsfs['h']
imins = ihmsfs['m']
isecs = ihmsfs['s']
ifracs = ihmsfs['f']
for iy, im, id, ihr, imin, isec, ifracsec in np.nditer(
[iys, ims, ids, ihrs, imins, isecs, ifracs]):
if has_yday:
yday = datetime.datetime(iy, im, id).timetuple().tm_yday
yield {'year': int(iy), 'mon': int(im), 'day': int(id),
'hour': int(ihr), 'min': int(imin), 'sec': int(isec),
'fracsec': int(ifracsec), 'yday': yday}
def format_string(self, str_fmt, **kwargs):
"""Write time to a string using a given format.
By default, just interprets str_fmt as a format string,
but subclasses can add to this.
"""
return str_fmt.format(**kwargs)
@property
def value(self):
# Select the first available subformat based on current
# self.out_subfmt
subfmts = self._select_subfmts(self.out_subfmt)
_, _, str_fmt = subfmts[0]
# TODO: fix this ugly hack
if self.precision > 0 and str_fmt.endswith('{sec:02d}'):
str_fmt += '.{fracsec:0' + str(self.precision) + 'd}'
# Try to optimize this later. Can't pre-allocate because length of
# output could change, e.g. year rolls from 999 to 1000.
outs = []
for kwargs in self.str_kwargs():
outs.append(str(self.format_string(str_fmt, **kwargs)))
return np.array(outs).reshape(self.jd1.shape)
def _select_subfmts(self, pattern):
"""
Return a list of subformats where name matches ``pattern`` using
fnmatch.
"""
fnmatchcase = fnmatch.fnmatchcase
subfmts = [x for x in self.subfmts if fnmatchcase(x[0], pattern)]
if len(subfmts) == 0:
raise ValueError('No subformats match {0}'.format(pattern))
return subfmts
class TimeISO(TimeString):
"""
ISO 8601 compliant date-time format "YYYY-MM-DD HH:MM:SS.sss...".
For example, 2000-01-01 00:00:00.000 is midnight on January 1, 2000.
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date_hm': date + hours, mins
- 'date': date
"""
name = 'iso'
subfmts = (('date_hms',
'%Y-%m-%d %H:%M:%S',
# XXX To Do - use strftime for output ??
'{year:d}-{mon:02d}-{day:02d} {hour:02d}:{min:02d}:{sec:02d}'),
('date_hm',
'%Y-%m-%d %H:%M',
'{year:d}-{mon:02d}-{day:02d} {hour:02d}:{min:02d}'),
('date',
'%Y-%m-%d',
'{year:d}-{mon:02d}-{day:02d}'))
def parse_string(self, timestr, subfmts):
# Handle trailing 'Z' for UTC time
if timestr.endswith('Z'):
if self.scale != 'utc':
raise ValueError("Time input terminating in 'Z' must have "
"scale='UTC'")
timestr = timestr[:-1]
return super().parse_string(timestr, subfmts)
class TimeISOT(TimeISO):
"""
ISO 8601 compliant date-time format "YYYY-MM-DDTHH:MM:SS.sss...".
This is the same as TimeISO except for a "T" instead of space between
the date and time.
For example, 2000-01-01T00:00:00.000 is midnight on January 1, 2000.
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date_hm': date + hours, mins
- 'date': date
"""
name = 'isot'
subfmts = (('date_hms',
'%Y-%m-%dT%H:%M:%S',
'{year:d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}'),
('date_hm',
'%Y-%m-%dT%H:%M',
'{year:d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}'),
('date',
'%Y-%m-%d',
'{year:d}-{mon:02d}-{day:02d}'))
class TimeYearDayTime(TimeISO):
"""
Year, day-of-year and time as "YYYY:DOY:HH:MM:SS.sss...".
The day-of-year (DOY) goes from 001 to 365 (366 in leap years).
For example, 2000:001:00:00:00.000 is midnight on January 1, 2000.
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date_hm': date + hours, mins
- 'date': date
"""
name = 'yday'
subfmts = (('date_hms',
'%Y:%j:%H:%M:%S',
'{year:d}:{yday:03d}:{hour:02d}:{min:02d}:{sec:02d}'),
('date_hm',
'%Y:%j:%H:%M',
'{year:d}:{yday:03d}:{hour:02d}:{min:02d}'),
('date',
'%Y:%j',
'{year:d}:{yday:03d}'))
class TimeDatetime64(TimeISOT):
name = 'datetime64'
def _check_val_type(self, val1, val2):
# Note: don't care about val2 for this class`
if not val1.dtype.kind == 'M':
raise TypeError('Input values for {0} class must be '
'datetime64 objects'.format(self.name))
return val1, None
def set_jds(self, val1, val2):
# If there are any masked values in the ``val1`` datetime64 array
# ('NaT') then stub them with a valid date so downstream parse_string
# will work. The value under the mask is arbitrary but a "modern" date
# is good.
mask = np.isnat(val1)
masked = np.any(mask)
if masked:
val1 = val1.copy()
val1[mask] = '2000'
# Make sure M(onth) and Y(ear) dates will parse and convert to bytestring
if val1.dtype.name in ['datetime64[M]', 'datetime64[Y]']:
val1 = val1.astype('datetime64[D]')
val1 = val1.astype('S')
# Standard ISO string parsing now
super().set_jds(val1, val2)
# Finally apply mask if necessary
if masked:
self.jd2[mask] = np.nan
@property
def value(self):
precision = self.precision
self.precision = 9
ret = super().value
self.precision = precision
return ret.astype('datetime64')
class TimeFITS(TimeString):
"""
FITS format: "[±Y]YYYY-MM-DD[THH:MM:SS[.sss]]".
ISOT but can give signed five-digit year (mostly for negative years);
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date': date
- 'longdate_hms': as 'date_hms', but with signed 5-digit year
- 'longdate': as 'date', but with signed 5-digit year
See Rots et al., 2015, A&A 574:A36 (arXiv:1409.7583).
"""
name = 'fits'
subfmts = (
('date_hms',
(r'(?P<year>\d{4})-(?P<mon>\d\d)-(?P<mday>\d\d)T'
r'(?P<hour>\d\d):(?P<min>\d\d):(?P<sec>\d\d(\.\d*)?)'),
'{year:04d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}'),
('date',
r'(?P<year>\d{4})-(?P<mon>\d\d)-(?P<mday>\d\d)',
'{year:04d}-{mon:02d}-{day:02d}'),
('longdate_hms',
(r'(?P<year>[+-]\d{5})-(?P<mon>\d\d)-(?P<mday>\d\d)T'
r'(?P<hour>\d\d):(?P<min>\d\d):(?P<sec>\d\d(\.\d*)?)'),
'{year:+06d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}'),
('longdate',
r'(?P<year>[+-]\d{5})-(?P<mon>\d\d)-(?P<mday>\d\d)',
'{year:+06d}-{mon:02d}-{day:02d}'))
# Add the regex that parses the scale and possible realization.
# Support for this is deprecated. Read old style but no longer write
# in this style.
subfmts = tuple(
(subfmt[0],
subfmt[1] + r'(\((?P<scale>\w+)(\((?P<realization>\w+)\))?\))?',
subfmt[2]) for subfmt in subfmts)
def parse_string(self, timestr, subfmts):
"""Read time and deprecated scale if present"""
# Try parsing with any of the allowed sub-formats.
for _, regex, _ in subfmts:
tm = re.match(regex, timestr)
if tm:
break
else:
raise ValueError('Time {0} does not match {1} format'
.format(timestr, self.name))
tm = tm.groupdict()
# Scale and realization are deprecated and strings in this form
# are no longer created. We issue a warning but still use the value.
if tm['scale'] is not None:
warnings.warn("FITS time strings should no longer have embedded time scale.",
AstropyDeprecationWarning)
# If a scale was given, translate from a possible deprecated
# timescale identifier to the scale used by Time.
fits_scale = tm['scale'].upper()
scale = FITS_DEPRECATED_SCALES.get(fits_scale, fits_scale.lower())
if scale not in TIME_SCALES:
raise ValueError("Scale {0!r} is not in the allowed scales {1}"
.format(scale, sorted(TIME_SCALES)))
# If no scale was given in the initialiser, set the scale to
# that given in the string. Realization is ignored
# and is only supported to allow old-style strings to be
# parsed.
if self._scale is None:
self._scale = scale
if scale != self.scale:
raise ValueError("Input strings for {0} class must all "
"have consistent time scales."
.format(self.name))
return [int(tm['year']), int(tm['mon']), int(tm['mday']),
int(tm.get('hour', 0)), int(tm.get('min', 0)),
float(tm.get('sec', 0.))]
@property
def value(self):
"""Convert times to strings, using signed 5 digit if necessary."""
if 'long' not in self.out_subfmt:
# If we have times before year 0 or after year 9999, we can
# output only in a "long" format, using signed 5-digit years.
jd = self.jd1 + self.jd2
if jd.min() < 1721425.5 or jd.max() >= 5373484.5:
self.out_subfmt = 'long' + self.out_subfmt
return super().value
class TimeEpochDate(TimeFormat):
"""
Base class for support floating point Besselian and Julian epoch dates
"""
_default_scale = 'tt' # As of astropy 3.2, this is no longer 'utc'.
def set_jds(self, val1, val2):
self._check_scale(self._scale) # validate scale.
epoch_to_jd = getattr(erfa, self.epoch_to_jd)
jd1, jd2 = epoch_to_jd(val1 + val2)
self.jd1, self.jd2 = day_frac(jd1, jd2)
@property
def value(self):
jd_to_epoch = getattr(erfa, self.jd_to_epoch)
return jd_to_epoch(self.jd1, self.jd2)
class TimeBesselianEpoch(TimeEpochDate):
"""Besselian Epoch year as floating point value(s) like 1950.0"""
name = 'byear'
epoch_to_jd = 'epb2jd'
jd_to_epoch = 'epb'
def _check_val_type(self, val1, val2):
"""Input value validation, typically overridden by derived classes"""
if hasattr(val1, 'to') and hasattr(val1, 'unit'):
raise ValueError("Cannot use Quantities for 'byear' format, "
"as the interpretation would be ambiguous. "
"Use float with Besselian year instead. ")
return super()._check_val_type(val1, val2)
class TimeJulianEpoch(TimeEpochDate):
"""Julian Epoch year as floating point value(s) like 2000.0"""
name = 'jyear'
unit = erfa.DJY # 365.25, the Julian year, for conversion to quantities
epoch_to_jd = 'epj2jd'
jd_to_epoch = 'epj'
class TimeEpochDateString(TimeString):
"""
Base class to support string Besselian and Julian epoch dates
such as 'B1950.0' or 'J2000.0' respectively.
"""
_default_scale = 'tt' # As of astropy 3.2, this is no longer 'utc'.
def set_jds(self, val1, val2):
epoch_prefix = self.epoch_prefix
# Be liberal in what we accept: convert bytes to ascii.
to_string = (str if val1.dtype.kind == 'U' else
lambda x: str(x.item(), encoding='ascii'))
iterator = np.nditer([val1, None], op_dtypes=[val1.dtype, np.double])
for val, years in iterator:
try:
time_str = to_string(val)
epoch_type, year_str = time_str[0], time_str[1:]
year = float(year_str)
if epoch_type.upper() != epoch_prefix:
raise ValueError
except (IndexError, ValueError, UnicodeEncodeError):
raise ValueError('Time {0} does not match {1} format'
.format(time_str, self.name))
else:
years[...] = year
self._check_scale(self._scale) # validate scale.
epoch_to_jd = getattr(erfa, self.epoch_to_jd)
jd1, jd2 = epoch_to_jd(iterator.operands[-1])
self.jd1, self.jd2 = day_frac(jd1, jd2)
@property
def value(self):
jd_to_epoch = getattr(erfa, self.jd_to_epoch)
years = jd_to_epoch(self.jd1, self.jd2)
# Use old-style format since it is a factor of 2 faster
str_fmt = self.epoch_prefix + '%.' + str(self.precision) + 'f'
outs = [str_fmt % year for year in years.flat]
return np.array(outs).reshape(self.jd1.shape)
class TimeBesselianEpochString(TimeEpochDateString):
"""Besselian Epoch year as string value(s) like 'B1950.0'"""
name = 'byear_str'
epoch_to_jd = 'epb2jd'
jd_to_epoch = 'epb'
epoch_prefix = 'B'
class TimeJulianEpochString(TimeEpochDateString):
"""Julian Epoch year as string value(s) like 'J2000.0'"""
name = 'jyear_str'
epoch_to_jd = 'epj2jd'
jd_to_epoch = 'epj'
epoch_prefix = 'J'
class TimeDeltaFormatMeta(TimeFormatMeta):
_registry = TIME_DELTA_FORMATS
class TimeDeltaFormat(TimeFormat, metaclass=TimeDeltaFormatMeta):
"""Base class for time delta representations"""
def _check_scale(self, scale):
"""
Check that the scale is in the allowed list of scales, or is `None`
"""
if scale is not None and scale not in TIME_DELTA_SCALES:
raise ScaleValueError("Scale value '{0}' not in "
"allowed values {1}"
.format(scale, TIME_DELTA_SCALES))
return scale
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
self.jd1, self.jd2 = day_frac(val1, val2, divisor=1./self.unit)
@property
def value(self):
return (self.jd1 + self.jd2) / self.unit
class TimeDeltaSec(TimeDeltaFormat):
"""Time delta in SI seconds"""
name = 'sec'
unit = 1. / erfa.DAYSEC # for quantity input
class TimeDeltaJD(TimeDeltaFormat):
"""Time delta in Julian days (86400 SI seconds)"""
name = 'jd'
unit = 1.
class TimeDeltaDatetime(TimeDeltaFormat, TimeUnique):
"""Time delta in datetime.timedelta"""
name = 'datetime'
def _check_val_type(self, val1, val2):
# Note: don't care about val2 for this class
if not all(isinstance(val, datetime.timedelta) for val in val1.flat):
raise TypeError('Input values for {0} class must be '
'datetime.timedelta objects'.format(self.name))
return val1, None
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
iterator = np.nditer([val1, None],
flags=['refs_ok'],
op_dtypes=[object] + [np.double])
for val, sec in iterator:
sec[...] = val.item().total_seconds()
self.jd1, self.jd2 = day_frac(iterator.operands[-1], 0.0,
divisor=erfa.DAYSEC)
@property
def value(self):
iterator = np.nditer([self.jd1 + self.jd2, None],
flags=['refs_ok'],
op_dtypes=[self.jd1.dtype] + [object])
for jd, out in iterator:
out[...] = datetime.timedelta(days=jd.item())
return self.mask_if_needed(iterator.operands[-1])
from .core import Time, TIME_SCALES, TIME_DELTA_SCALES, ScaleValueError
|
46a663cdafb6a061a2965f5a6163d871bed3be6e4e46c909ccc61ee3325a9b0f | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
__all__ = ['quantity_input']
import inspect
from astropy.utils.decorators import wraps
from astropy.utils.misc import isiterable
from .core import Unit, UnitsError, add_enabled_equivalencies
from .physical import _unit_physical_mapping
def _get_allowed_units(targets):
"""
From a list of target units (either as strings or unit objects) and physical
types, return a list of Unit objects.
"""
allowed_units = []
for target in targets:
try: # unit passed in as a string
target_unit = Unit(target)
except ValueError:
try: # See if the function writer specified a physical type
physical_type_id = _unit_physical_mapping[target]
except KeyError: # Function argument target is invalid
raise ValueError("Invalid unit or physical type '{0}'."
.format(target))
# get unit directly from physical type id
target_unit = Unit._from_physical_type_id(physical_type_id)
allowed_units.append(target_unit)
return allowed_units
def _validate_arg_value(param_name, func_name, arg, targets, equivalencies):
"""
Validates the object passed in to the wrapped function, ``arg``, with target
unit or physical type, ``target``.
"""
allowed_units = _get_allowed_units(targets)
for allowed_unit in allowed_units:
try:
is_equivalent = arg.unit.is_equivalent(allowed_unit,
equivalencies=equivalencies)
if is_equivalent:
break
except AttributeError: # Either there is no .unit or no .is_equivalent
if hasattr(arg, "unit"):
error_msg = "a 'unit' attribute without an 'is_equivalent' method"
else:
error_msg = "no 'unit' attribute"
raise TypeError("Argument '{0}' to function '{1}' has {2}. "
"You may want to pass in an astropy Quantity instead."
.format(param_name, func_name, error_msg))
else:
if len(targets) > 1:
raise UnitsError("Argument '{0}' to function '{1}' must be in units"
" convertible to one of: {2}."
.format(param_name, func_name,
[str(targ) for targ in targets]))
else:
raise UnitsError("Argument '{0}' to function '{1}' must be in units"
" convertible to '{2}'."
.format(param_name, func_name,
str(targets[0])))
class QuantityInput:
@classmethod
def as_decorator(cls, func=None, **kwargs):
r"""
A decorator for validating the units of arguments to functions.
Unit specifications can be provided as keyword arguments to the decorator,
or by using function annotation syntax. Arguments to the decorator
take precedence over any function annotations present.
A `~astropy.units.UnitsError` will be raised if the unit attribute of
the argument is not equivalent to the unit specified to the decorator
or in the annotation.
If the argument has no unit attribute, i.e. it is not a Quantity object, a
`ValueError` will be raised.
Where an equivalency is specified in the decorator, the function will be
executed with that equivalency in force.
Notes
-----
The checking of arguments inside variable arguments to a function is not
supported (i.e. \*arg or \**kwargs).
Examples
--------
.. code-block:: python
import astropy.units as u
@u.quantity_input(myangle=u.arcsec)
def myfunction(myangle):
return myangle**2
.. code-block:: python
import astropy.units as u
@u.quantity_input
def myfunction(myangle: u.arcsec):
return myangle**2
Also you can specify a return value annotation, which will
cause the function to always return a `~astropy.units.Quantity` in that
unit.
.. code-block:: python
import astropy.units as u
@u.quantity_input
def myfunction(myangle: u.arcsec) -> u.deg**2:
return myangle**2
Using equivalencies::
import astropy.units as u
@u.quantity_input(myenergy=u.eV, equivalencies=u.mass_energy())
def myfunction(myenergy):
return myenergy**2
"""
self = cls(**kwargs)
if func is not None and not kwargs:
return self(func)
else:
return self
def __init__(self, func=None, **kwargs):
self.equivalencies = kwargs.pop('equivalencies', [])
self.decorator_kwargs = kwargs
def __call__(self, wrapped_function):
# Extract the function signature for the function we are wrapping.
wrapped_signature = inspect.signature(wrapped_function)
# Define a new function to return in place of the wrapped one
@wraps(wrapped_function)
def wrapper(*func_args, **func_kwargs):
# Bind the arguments to our new function to the signature of the original.
bound_args = wrapped_signature.bind(*func_args, **func_kwargs)
# Iterate through the parameters of the original signature
for param in wrapped_signature.parameters.values():
# We do not support variable arguments (*args, **kwargs)
if param.kind in (inspect.Parameter.VAR_KEYWORD,
inspect.Parameter.VAR_POSITIONAL):
continue
# Catch the (never triggered) case where bind relied on a default value.
if param.name not in bound_args.arguments and param.default is not param.empty:
bound_args.arguments[param.name] = param.default
# Get the value of this parameter (argument to new function)
arg = bound_args.arguments[param.name]
# Get target unit or physical type, either from decorator kwargs
# or annotations
if param.name in self.decorator_kwargs:
targets = self.decorator_kwargs[param.name]
else:
targets = param.annotation
# If the targets is empty, then no target units or physical
# types were specified so we can continue to the next arg
if targets is inspect.Parameter.empty:
continue
# If the argument value is None, and the default value is None,
# pass through the None even if there is a target unit
if arg is None and param.default is None:
continue
# Here, we check whether multiple target unit/physical type's
# were specified in the decorator/annotation, or whether a
# single string (unit or physical type) or a Unit object was
# specified
if isinstance(targets, str) or not isiterable(targets):
valid_targets = [targets]
# Check for None in the supplied list of allowed units and, if
# present and the passed value is also None, ignore.
elif None in targets:
if arg is None:
continue
else:
valid_targets = [t for t in targets if t is not None]
else:
valid_targets = targets
# Now we loop over the allowed units/physical types and validate
# the value of the argument:
_validate_arg_value(param.name, wrapped_function.__name__,
arg, valid_targets, self.equivalencies)
# Call the original function with any equivalencies in force.
with add_enabled_equivalencies(self.equivalencies):
return_ = wrapped_function(*func_args, **func_kwargs)
if wrapped_signature.return_annotation not in (inspect.Signature.empty, None):
return return_.to(wrapped_signature.return_annotation)
else:
return return_
return wrapper
quantity_input = QuantityInput.as_decorator
|
d72589f2ce406b08ea9fc34ad6bb678ff8c4beffd999e50e8a0a1b845bb2a310 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package defines units used in the CDS format, both the units
defined in `Centre de Données astronomiques de Strasbourg
<http://cds.u-strasbg.fr/>`_ `Standards for Astronomical Catalogues 2.0
<http://cds.u-strasbg.fr/doc/catstd-3.2.htx>`_ format and the `complete
set of supported units <http://vizier.u-strasbg.fr/cgi-bin/Unit>`_.
This format is used by VOTable up to version 1.2.
These units are not available in the top-level `astropy.units`
namespace. To use these units, you must import the `astropy.units.cds`
module::
>>> from astropy.units import cds
>>> q = 10. * cds.lyr # doctest: +SKIP
To include them in `~astropy.units.UnitBase.compose` and the results of
`~astropy.units.UnitBase.find_equivalent_units`, do::
>>> from astropy.units import cds
>>> cds.enable() # doctest: +SKIP
"""
_ns = globals()
def _initialize_module():
# Local imports to avoid polluting top-level namespace
import numpy as np
from . import core
from astropy import units as u
from astropy.constants import si as _si
# The CDS format also supports power-of-2 prefixes as defined here:
# http://physics.nist.gov/cuu/Units/binary.html
prefixes = core.si_prefixes + core.binary_prefixes
# CDS only uses the short prefixes
prefixes = [(short, short, factor) for (short, long, factor) in prefixes]
# The following units are defined in alphabetical order, directly from
# here: http://vizier.u-strasbg.fr/cgi-bin/Unit
mapping = [
(['A'], u.A, "Ampere"),
(['a'], u.a, "year", ['P']),
(['a0'], _si.a0, "Bohr radius"),
(['al'], u.lyr, "Light year", ['c', 'd']),
(['lyr'], u.lyr, "Light year"),
(['alpha'], _si.alpha, "Fine structure constant"),
((['AA', 'Å'], ['Angstrom', 'Angstroem']), u.AA, "Angstrom"),
(['arcmin', 'arcm'], u.arcminute, "minute of arc"),
(['arcsec', 'arcs'], u.arcsecond, "second of arc"),
(['atm'], _si.atm, "atmosphere"),
(['AU', 'au'], u.au, "astronomical unit"),
(['bar'], u.bar, "bar"),
(['barn'], u.barn, "barn"),
(['bit'], u.bit, "bit"),
(['byte'], u.byte, "byte"),
(['C'], u.C, "Coulomb"),
(['c'], _si.c, "speed of light", ['p']),
(['cal'], 4.1854 * u.J, "calorie"),
(['cd'], u.cd, "candela"),
(['ct'], u.ct, "count"),
(['D'], u.D, "Debye (dipole)"),
(['d'], u.d, "Julian day", ['c']),
((['deg', '°'], ['degree']), u.degree, "degree"),
(['dyn'], u.dyn, "dyne"),
(['e'], _si.e, "electron charge", ['m']),
(['eps0'], _si.eps0, "electric constant"),
(['erg'], u.erg, "erg"),
(['eV'], u.eV, "electron volt"),
(['F'], u.F, "Farad"),
(['G'], _si.G, "Gravitation constant"),
(['g'], u.g, "gram"),
(['gauss'], u.G, "Gauss"),
(['geoMass', 'Mgeo'], u.M_earth, "Earth mass"),
(['H'], u.H, "Henry"),
(['h'], u.h, "hour", ['p']),
(['hr'], u.h, "hour"),
(['\\h'], _si.h, "Planck constant"),
(['Hz'], u.Hz, "Hertz"),
(['inch'], 0.0254 * u.m, "inch"),
(['J'], u.J, "Joule"),
(['JD'], u.d, "Julian day", ['M']),
(['jovMass', 'Mjup'], u.M_jup, "Jupiter mass"),
(['Jy'], u.Jy, "Jansky"),
(['K'], u.K, "Kelvin"),
(['k'], _si.k_B, "Boltzmann"),
(['l'], u.l, "litre", ['a']),
(['lm'], u.lm, "lumen"),
(['Lsun', 'solLum'], u.solLum, "solar luminosity"),
(['lx'], u.lx, "lux"),
(['m'], u.m, "meter"),
(['mag'], u.mag, "magnitude"),
(['me'], _si.m_e, "electron mass"),
(['min'], u.minute, "minute"),
(['MJD'], u.d, "Julian day"),
(['mmHg'], 133.322387415 * u.Pa, "millimeter of mercury"),
(['mol'], u.mol, "mole"),
(['mp'], _si.m_p, "proton mass"),
(['Msun', 'solMass'], u.solMass, "solar mass"),
((['mu0', 'µ0'], []), _si.mu0, "magnetic constant"),
(['muB'], _si.muB, "Bohr magneton"),
(['N'], u.N, "Newton"),
(['Ohm'], u.Ohm, "Ohm"),
(['Pa'], u.Pa, "Pascal"),
(['pc'], u.pc, "parsec"),
(['ph'], u.ph, "photon"),
(['pi'], u.Unit(np.pi), "π"),
(['pix'], u.pix, "pixel"),
(['ppm'], u.Unit(1e-6), "parts per million"),
(['R'], _si.R, "gas constant"),
(['rad'], u.radian, "radian"),
(['Rgeo'], _si.R_earth, "Earth equatorial radius"),
(['Rjup'], _si.R_jup, "Jupiter equatorial radius"),
(['Rsun', 'solRad'], u.solRad, "solar radius"),
(['Ry'], u.Ry, "Rydberg"),
(['S'], u.S, "Siemens"),
(['s', 'sec'], u.s, "second"),
(['sr'], u.sr, "steradian"),
(['Sun'], u.Sun, "solar unit"),
(['T'], u.T, "Tesla"),
(['t'], 1e3 * u.kg, "metric tonne", ['c']),
(['u'], _si.u, "atomic mass", ['da', 'a']),
(['V'], u.V, "Volt"),
(['W'], u.W, "Watt"),
(['Wb'], u.Wb, "Weber"),
(['yr'], u.a, "year"),
]
for entry in mapping:
if len(entry) == 3:
names, unit, doc = entry
excludes = []
else:
names, unit, doc, excludes = entry
core.def_unit(names, unit, prefixes=prefixes, namespace=_ns, doc=doc,
exclude_prefixes=excludes)
core.def_unit(['µas'], u.microarcsecond,
doc="microsecond of arc", namespace=_ns)
core.def_unit(['mas'], u.milliarcsecond,
doc="millisecond of arc", namespace=_ns)
core.def_unit(['---'], u.dimensionless_unscaled,
doc="dimensionless and unscaled", namespace=_ns)
core.def_unit(['%'], u.percent,
doc="percent", namespace=_ns)
# The Vizier "standard" defines this in units of "kg s-3", but
# that may not make a whole lot of sense, so here we just define
# it as its own new disconnected unit.
core.def_unit(['Crab'], prefixes=prefixes, namespace=_ns,
doc="Crab (X-ray) flux")
_initialize_module()
###########################################################################
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import generate_unit_summary as _generate_unit_summary
if __doc__ is not None:
__doc__ += _generate_unit_summary(globals())
def enable():
"""
Enable CDS units so they appear in results of
`~astropy.units.UnitBase.find_equivalent_units` and
`~astropy.units.UnitBase.compose`. This will disable
all of the "default" `astropy.units` units, since there
are some namespace clashes between the two.
This may be used with the ``with`` statement to enable CDS
units only temporarily.
"""
# Local import to avoid cyclical import
from .core import set_enabled_units
# Local import to avoid polluting namespace
import inspect
return set_enabled_units(inspect.getmodule(enable))
|
770c5976a6397ad1581d44732886df784981bfd49f893def2f1f216787e372d8 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module defines magnitude zero points and related photometric quantities.
The corresponding magnitudes are given in the description of each unit
(the actual definitions are in `~astropy.units.function.logarithmic`).
"""
import numpy as _numpy
from .core import UnitBase, def_unit, Unit
from astropy.constants import si as _si
from . import cgs, si, astrophys
_ns = globals()
def_unit(['Bol', 'L_bol'], _si.L_bol0, namespace=_ns, prefixes=False,
doc="Luminosity corresponding to absolute bolometric magnitude zero "
"(magnitude ``M_bol``).")
def_unit(['bol', 'f_bol'], _si.L_bol0 / (4 * _numpy.pi * (10.*astrophys.pc)**2),
namespace=_ns, prefixes=False, doc="Irradiance corresponding to "
"appparent bolometric magnitude zero (magnitude ``m_bol``).")
def_unit(['AB', 'ABflux'], 10.**(48.6/-2.5) * cgs.erg * cgs.cm**-2 / si.s / si.Hz,
namespace=_ns, prefixes=False,
doc="AB magnitude zero flux density (magnitude ``ABmag``).")
def_unit(['ST', 'STflux'], 10.**(21.1/-2.5) * cgs.erg * cgs.cm**-2 / si.s / si.AA,
namespace=_ns, prefixes=False,
doc="ST magnitude zero flux density (magnitude ``STmag``).")
def_unit(['mgy', 'maggy'],
namespace=_ns, prefixes=[(['n'], ['nano'], 1e-9)],
doc="Maggies - a linear flux unit that is the flux for a mag=0 object."
"To tie this onto a specific calibrated unit system, the "
"zero_point_flux equivalency should be used.")
def zero_point_flux(flux0):
"""
An equivalency for converting linear flux units ("maggys") defined relative
to a standard source into a standardized system.
Parameters
----------
flux0 : u.Quantity
The flux of a magnitude-0 object in the "maggy" system.
"""
flux_unit0 = Unit(flux0)
return [(maggy, flux_unit0)]
###########################################################################
# CLEANUP
del UnitBase
del def_unit
del cgs, si, astrophys
###########################################################################
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import generate_unit_summary as _generate_unit_summary
if __doc__ is not None:
__doc__ += _generate_unit_summary(globals())
|
452949ebd115c25780746dfcfc3e553f618eb928ede6195492f57a2703f0315c | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Core units classes and functions
"""
import inspect
import operator
import textwrap
import warnings
import numpy as np
from astropy.utils.decorators import lazyproperty
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.misc import isiterable, InheritDocstrings
from .utils import (is_effectively_unity, sanitize_scale, validate_power,
resolve_fractions)
from . import format as unit_format
__all__ = [
'UnitsError', 'UnitsWarning', 'UnitConversionError', 'UnitTypeError',
'UnitBase', 'NamedUnit', 'IrreducibleUnit', 'Unit', 'CompositeUnit',
'PrefixUnit', 'UnrecognizedUnit', 'def_unit', 'get_current_unit_registry',
'set_enabled_units', 'add_enabled_units',
'set_enabled_equivalencies', 'add_enabled_equivalencies',
'dimensionless_unscaled', 'one']
UNITY = 1.0
def _flatten_units_collection(items):
"""
Given a list of sequences, modules or dictionaries of units, or
single units, return a flat set of all the units found.
"""
if not isinstance(items, list):
items = [items]
result = set()
for item in items:
if isinstance(item, UnitBase):
result.add(item)
else:
if isinstance(item, dict):
units = item.values()
elif inspect.ismodule(item):
units = vars(item).values()
elif isiterable(item):
units = item
else:
continue
for unit in units:
if isinstance(unit, UnitBase):
result.add(unit)
return result
def _normalize_equivalencies(equivalencies):
"""
Normalizes equivalencies, ensuring each is a 4-tuple of the form::
(from_unit, to_unit, forward_func, backward_func)
Parameters
----------
equivalencies : list of equivalency pairs
Raises
------
ValueError if an equivalency cannot be interpreted
"""
if equivalencies is None:
return []
normalized = []
for i, equiv in enumerate(equivalencies):
if len(equiv) == 2:
funit, tunit = equiv
a = b = lambda x: x
elif len(equiv) == 3:
funit, tunit, a = equiv
b = a
elif len(equiv) == 4:
funit, tunit, a, b = equiv
else:
raise ValueError(
"Invalid equivalence entry {0}: {1!r}".format(i, equiv))
if not (funit is Unit(funit) and
(tunit is None or tunit is Unit(tunit)) and
callable(a) and
callable(b)):
raise ValueError(
"Invalid equivalence entry {0}: {1!r}".format(i, equiv))
normalized.append((funit, tunit, a, b))
return normalized
class _UnitRegistry:
"""
Manages a registry of the enabled units.
"""
def __init__(self, init=[], equivalencies=[]):
if isinstance(init, _UnitRegistry):
# If passed another registry we don't need to rebuild everything.
# but because these are mutable types we don't want to create
# conflicts so everything needs to be copied.
self._equivalencies = init._equivalencies.copy()
self._all_units = init._all_units.copy()
self._registry = init._registry.copy()
self._non_prefix_units = init._non_prefix_units.copy()
# The physical type is a dictionary containing sets as values.
# All of these must be copied otherwise we could alter the old
# registry.
self._by_physical_type = {k: v.copy() for k, v in
init._by_physical_type.items()}
else:
self._reset_units()
self._reset_equivalencies()
self.add_enabled_units(init)
self.add_enabled_equivalencies(equivalencies)
def _reset_units(self):
self._all_units = set()
self._non_prefix_units = set()
self._registry = {}
self._by_physical_type = {}
def _reset_equivalencies(self):
self._equivalencies = set()
@property
def registry(self):
return self._registry
@property
def all_units(self):
return self._all_units
@property
def non_prefix_units(self):
return self._non_prefix_units
def set_enabled_units(self, units):
"""
Sets the units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
Parameters
----------
units : list of sequences, dicts, or modules containing units, or units
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be "enabled" for searching through by
methods like `UnitBase.find_equivalent_units` and
`UnitBase.compose`.
"""
self._reset_units()
return self.add_enabled_units(units)
def add_enabled_units(self, units):
"""
Adds to the set of units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
Parameters
----------
units : list of sequences, dicts, or modules containing units, or units
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be added to the "enabled" set for
searching through by methods like
`UnitBase.find_equivalent_units` and `UnitBase.compose`.
"""
units = _flatten_units_collection(units)
for unit in units:
# Loop through all of the names first, to ensure all of them
# are new, then add them all as a single "transaction" below.
for st in unit._names:
if (st in self._registry and unit != self._registry[st]):
raise ValueError(
"Object with name {0!r} already exists in namespace. "
"Filter the set of units to avoid name clashes before "
"enabling them.".format(st))
for st in unit._names:
self._registry[st] = unit
self._all_units.add(unit)
if not isinstance(unit, PrefixUnit):
self._non_prefix_units.add(unit)
hash = unit._get_physical_type_id()
self._by_physical_type.setdefault(hash, set()).add(unit)
def get_units_with_physical_type(self, unit):
"""
Get all units in the registry with the same physical type as
the given unit.
Parameters
----------
unit : UnitBase instance
"""
return self._by_physical_type.get(unit._get_physical_type_id(), set())
@property
def equivalencies(self):
return list(self._equivalencies)
def set_enabled_equivalencies(self, equivalencies):
"""
Sets the equivalencies enabled in the unit registry.
These equivalencies are used if no explicit equivalencies are given,
both in unit conversion and in finding equivalent units.
This is meant in particular for allowing angles to be dimensionless.
Use with care.
Parameters
----------
equivalencies : list of equivalent pairs
E.g., as returned by
`~astropy.units.equivalencies.dimensionless_angles`.
"""
self._reset_equivalencies()
return self.add_enabled_equivalencies(equivalencies)
def add_enabled_equivalencies(self, equivalencies):
"""
Adds to the set of equivalencies enabled in the unit registry.
These equivalencies are used if no explicit equivalencies are given,
both in unit conversion and in finding equivalent units.
This is meant in particular for allowing angles to be dimensionless.
Use with care.
Parameters
----------
equivalencies : list of equivalent pairs
E.g., as returned by
`~astropy.units.equivalencies.dimensionless_angles`.
"""
# pre-normalize list to help catch mistakes
equivalencies = _normalize_equivalencies(equivalencies)
self._equivalencies |= set(equivalencies)
class _UnitContext:
def __init__(self, init=[], equivalencies=[]):
_unit_registries.append(
_UnitRegistry(init=init, equivalencies=equivalencies))
def __enter__(self):
pass
def __exit__(self, type, value, tb):
_unit_registries.pop()
_unit_registries = [_UnitRegistry()]
def get_current_unit_registry():
return _unit_registries[-1]
def set_enabled_units(units):
"""
Sets the units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
This may be used either permanently, or as a context manager using
the ``with`` statement (see example below).
Parameters
----------
units : list of sequences, dicts, or modules containing units, or units
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be "enabled" for searching through by methods
like `UnitBase.find_equivalent_units` and `UnitBase.compose`.
Examples
--------
>>> from astropy import units as u
>>> with u.set_enabled_units([u.pc]):
... u.m.find_equivalent_units()
...
Primary name | Unit definition | Aliases
[
pc | 3.08568e+16 m | parsec ,
]
>>> u.m.find_equivalent_units()
Primary name | Unit definition | Aliases
[
AU | 1.49598e+11 m | au, astronomical_unit ,
Angstrom | 1e-10 m | AA, angstrom ,
cm | 0.01 m | centimeter ,
earthRad | 6.3781e+06 m | R_earth, Rearth ,
jupiterRad | 7.1492e+07 m | R_jup, Rjup, R_jupiter, Rjupiter ,
lyr | 9.46073e+15 m | lightyear ,
m | irreducible | meter ,
micron | 1e-06 m | ,
pc | 3.08568e+16 m | parsec ,
solRad | 6.957e+08 m | R_sun, Rsun ,
]
"""
# get a context with a new registry, using equivalencies of the current one
context = _UnitContext(
equivalencies=get_current_unit_registry().equivalencies)
# in this new current registry, enable the units requested
get_current_unit_registry().set_enabled_units(units)
return context
def add_enabled_units(units):
"""
Adds to the set of units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
This may be used either permanently, or as a context manager using
the ``with`` statement (see example below).
Parameters
----------
units : list of sequences, dicts, or modules containing units, or units
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be added to the "enabled" set for searching
through by methods like `UnitBase.find_equivalent_units` and
`UnitBase.compose`.
Examples
--------
>>> from astropy import units as u
>>> from astropy.units import imperial
>>> with u.add_enabled_units(imperial):
... u.m.find_equivalent_units()
...
Primary name | Unit definition | Aliases
[
AU | 1.49598e+11 m | au, astronomical_unit ,
Angstrom | 1e-10 m | AA, angstrom ,
cm | 0.01 m | centimeter ,
earthRad | 6.3781e+06 m | R_earth, Rearth ,
ft | 0.3048 m | foot ,
fur | 201.168 m | furlong ,
inch | 0.0254 m | ,
jupiterRad | 7.1492e+07 m | R_jup, Rjup, R_jupiter, Rjupiter ,
lyr | 9.46073e+15 m | lightyear ,
m | irreducible | meter ,
mi | 1609.34 m | mile ,
micron | 1e-06 m | ,
mil | 2.54e-05 m | thou ,
nmi | 1852 m | nauticalmile, NM ,
pc | 3.08568e+16 m | parsec ,
solRad | 6.957e+08 m | R_sun, Rsun ,
yd | 0.9144 m | yard ,
]
"""
# get a context with a new registry, which is a copy of the current one
context = _UnitContext(get_current_unit_registry())
# in this new current registry, enable the further units requested
get_current_unit_registry().add_enabled_units(units)
return context
def set_enabled_equivalencies(equivalencies):
"""
Sets the equivalencies enabled in the unit registry.
These equivalencies are used if no explicit equivalencies are given,
both in unit conversion and in finding equivalent units.
This is meant in particular for allowing angles to be dimensionless.
Use with care.
Parameters
----------
equivalencies : list of equivalent pairs
E.g., as returned by
`~astropy.units.equivalencies.dimensionless_angles`.
Examples
--------
Exponentiation normally requires dimensionless quantities. To avoid
problems with complex phases::
>>> from astropy import units as u
>>> with u.set_enabled_equivalencies(u.dimensionless_angles()):
... phase = 0.5 * u.cycle
... np.exp(1j*phase) # doctest: +SKIP
<Quantity -1. +1.22464680e-16j>
"""
# doctest skipped as the complex number formatting changed in numpy 1.14.
#
# get a context with a new registry, using all units of the current one
context = _UnitContext(get_current_unit_registry())
# in this new current registry, enable the equivalencies requested
get_current_unit_registry().set_enabled_equivalencies(equivalencies)
return context
def add_enabled_equivalencies(equivalencies):
"""
Adds to the equivalencies enabled in the unit registry.
These equivalencies are used if no explicit equivalencies are given,
both in unit conversion and in finding equivalent units.
This is meant in particular for allowing angles to be dimensionless.
Since no equivalencies are enabled by default, generally it is recommended
to use `set_enabled_equivalencies`.
Parameters
----------
equivalencies : list of equivalent pairs
E.g., as returned by
`~astropy.units.equivalencies.dimensionless_angles`.
"""
# get a context with a new registry, which is a copy of the current one
context = _UnitContext(get_current_unit_registry())
# in this new current registry, enable the further equivalencies requested
get_current_unit_registry().add_enabled_equivalencies(equivalencies)
return context
class UnitsError(Exception):
"""
The base class for unit-specific exceptions.
"""
class UnitScaleError(UnitsError, ValueError):
"""
Used to catch the errors involving scaled units,
which are not recognized by FITS format.
"""
pass
class UnitConversionError(UnitsError, ValueError):
"""
Used specifically for errors related to converting between units or
interpreting units in terms of other units.
"""
class UnitTypeError(UnitsError, TypeError):
"""
Used specifically for errors in setting to units not allowed by a class.
E.g., would be raised if the unit of an `~astropy.coordinates.Angle`
instances were set to a non-angular unit.
"""
class UnitsWarning(AstropyWarning):
"""
The base class for unit-specific warnings.
"""
class UnitBase(metaclass=InheritDocstrings):
"""
Abstract base class for units.
Most of the arithmetic operations on units are defined in this
base class.
Should not be instantiated by users directly.
"""
# Make sure that __rmul__ of units gets called over the __mul__ of Numpy
# arrays to avoid element-wise multiplication.
__array_priority__ = 1000
_hash = None
def __deepcopy__(self, memo):
# This may look odd, but the units conversion will be very
# broken after deep-copying if we don't guarantee that a given
# physical unit corresponds to only one instance
return self
def _repr_latex_(self):
"""
Generate latex representation of unit name. This is used by
the IPython notebook to print a unit with a nice layout.
Returns
-------
Latex string
"""
return unit_format.Latex.to_string(self)
def __bytes__(self):
"""Return string representation for unit"""
return unit_format.Generic.to_string(self).encode('unicode_escape')
def __str__(self):
"""Return string representation for unit"""
return unit_format.Generic.to_string(self)
def __repr__(self):
string = unit_format.Generic.to_string(self)
return 'Unit("{0}")'.format(string)
def _get_physical_type_id(self):
"""
Returns an identifier that uniquely identifies the physical
type of this unit. It is comprised of the bases and powers of
this unit, without the scale. Since it is hashable, it is
useful as a dictionary key.
"""
unit = self.decompose()
r = zip([x.name for x in unit.bases], unit.powers)
# bases and powers are already sorted in a unique way
# r.sort()
r = tuple(r)
return r
@property
def names(self):
"""
Returns all of the names associated with this unit.
"""
raise AttributeError(
"Can not get names from unnamed units. "
"Perhaps you meant to_string()?")
@property
def name(self):
"""
Returns the canonical (short) name associated with this unit.
"""
raise AttributeError(
"Can not get names from unnamed units. "
"Perhaps you meant to_string()?")
@property
def aliases(self):
"""
Returns the alias (long) names for this unit.
"""
raise AttributeError(
"Can not get aliases from unnamed units. "
"Perhaps you meant to_string()?")
@property
def scale(self):
"""
Return the scale of the unit.
"""
return 1.0
@property
def bases(self):
"""
Return the bases of the unit.
"""
return [self]
@property
def powers(self):
"""
Return the powers of the unit.
"""
return [1]
def to_string(self, format=unit_format.Generic):
"""
Output the unit in the given format as a string.
Parameters
----------
format : `astropy.units.format.Base` instance or str
The name of a format or a formatter object. If not
provided, defaults to the generic format.
"""
f = unit_format.get_format(format)
return f.to_string(self)
def __format__(self, format_spec):
"""Try to format units using a formatter."""
try:
return self.to_string(format=format_spec)
except ValueError:
return format(str(self), format_spec)
@staticmethod
def _normalize_equivalencies(equivalencies):
"""
Normalizes equivalencies, ensuring each is a 4-tuple of the form::
(from_unit, to_unit, forward_func, backward_func)
Parameters
----------
equivalencies : list of equivalency pairs, or `None`
Returns
-------
A normalized list, including possible global defaults set by, e.g.,
`set_enabled_equivalencies`, except when `equivalencies`=`None`,
in which case the returned list is always empty.
Raises
------
ValueError if an equivalency cannot be interpreted
"""
normalized = _normalize_equivalencies(equivalencies)
if equivalencies is not None:
normalized += get_current_unit_registry().equivalencies
return normalized
def __pow__(self, p):
p = validate_power(p)
return CompositeUnit(1, [self], [p], _error_check=False)
def __div__(self, m):
if isinstance(m, (bytes, str)):
m = Unit(m)
if isinstance(m, UnitBase):
if m.is_unity():
return self
return CompositeUnit(1, [self, m], [1, -1], _error_check=False)
try:
# Cannot handle this as Unit, re-try as Quantity
from .quantity import Quantity
return Quantity(1, self) / m
except TypeError:
return NotImplemented
def __rdiv__(self, m):
if isinstance(m, (bytes, str)):
return Unit(m) / self
try:
# Cannot handle this as Unit. Here, m cannot be a Quantity,
# so we make it into one, fasttracking when it does not have a
# unit, for the common case of <array> / <unit>.
from .quantity import Quantity
if hasattr(m, 'unit'):
result = Quantity(m)
result /= self
return result
else:
return Quantity(m, self**(-1))
except TypeError:
return NotImplemented
__truediv__ = __div__
__rtruediv__ = __rdiv__
def __mul__(self, m):
if isinstance(m, (bytes, str)):
m = Unit(m)
if isinstance(m, UnitBase):
if m.is_unity():
return self
elif self.is_unity():
return m
return CompositeUnit(1, [self, m], [1, 1], _error_check=False)
# Cannot handle this as Unit, re-try as Quantity.
try:
from .quantity import Quantity
return Quantity(1, self) * m
except TypeError:
return NotImplemented
def __rmul__(self, m):
if isinstance(m, (bytes, str)):
return Unit(m) * self
# Cannot handle this as Unit. Here, m cannot be a Quantity,
# so we make it into one, fasttracking when it does not have a unit
# for the common case of <array> * <unit>.
try:
from .quantity import Quantity
if hasattr(m, 'unit'):
result = Quantity(m)
result *= self
return result
else:
return Quantity(m, self)
except TypeError:
return NotImplemented
def __rlshift__(self, m):
try:
from .quantity import Quantity
return Quantity(m, self, copy=False, subok=True)
except Exception:
return NotImplemented
def __rrshift__(self, m):
warnings.warn(">> is not implemented. Did you mean to convert "
"to a Quantity with unit {} using '<<'?".format(self),
AstropyWarning)
return NotImplemented
def __hash__(self):
if self._hash is None:
parts = ([str(self.scale)] +
[x.name for x in self.bases] +
[str(x) for x in self.powers])
self._hash = hash(tuple(parts))
return self._hash
def __eq__(self, other):
if self is other:
return True
try:
other = Unit(other, parse_strict='silent')
except (ValueError, UnitsError, TypeError):
return NotImplemented
# Other is Unit-like, but the test below requires it is a UnitBase
# instance; if it is not, give up (so that other can try).
if not isinstance(other, UnitBase):
return NotImplemented
try:
return is_effectively_unity(self._to(other))
except UnitsError:
return False
def __ne__(self, other):
return not (self == other)
def __le__(self, other):
scale = self._to(Unit(other))
return scale <= 1. or is_effectively_unity(scale)
def __ge__(self, other):
scale = self._to(Unit(other))
return scale >= 1. or is_effectively_unity(scale)
def __lt__(self, other):
return not (self >= other)
def __gt__(self, other):
return not (self <= other)
def __neg__(self):
return self * -1.
def is_equivalent(self, other, equivalencies=[]):
"""
Returns `True` if this unit is equivalent to ``other``.
Parameters
----------
other : unit object or string or tuple
The unit to convert to. If a tuple of units is specified, this
method returns true if the unit matches any of those in the tuple.
equivalencies : list of equivalence pairs, optional
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`unit_equivalencies`.
This list is in addition to possible global defaults set by, e.g.,
`set_enabled_equivalencies`.
Use `None` to turn off all equivalencies.
Returns
-------
bool
"""
equivalencies = self._normalize_equivalencies(equivalencies)
if isinstance(other, tuple):
return any(self.is_equivalent(u, equivalencies=equivalencies)
for u in other)
other = Unit(other, parse_strict='silent')
return self._is_equivalent(other, equivalencies)
def _is_equivalent(self, other, equivalencies=[]):
"""Returns `True` if this unit is equivalent to `other`.
See `is_equivalent`, except that a proper Unit object should be
given (i.e., no string) and that the equivalency list should be
normalized using `_normalize_equivalencies`.
"""
if isinstance(other, UnrecognizedUnit):
return False
if (self._get_physical_type_id() ==
other._get_physical_type_id()):
return True
elif len(equivalencies):
unit = self.decompose()
other = other.decompose()
for a, b, forward, backward in equivalencies:
if b is None:
# after canceling, is what's left convertible
# to dimensionless (according to the equivalency)?
try:
(other/unit).decompose([a])
return True
except Exception:
pass
else:
if(a._is_equivalent(unit) and b._is_equivalent(other) or
b._is_equivalent(unit) and a._is_equivalent(other)):
return True
return False
def _apply_equivalencies(self, unit, other, equivalencies):
"""
Internal function (used from `_get_converter`) to apply
equivalence pairs.
"""
def make_converter(scale1, func, scale2):
def convert(v):
return func(_condition_arg(v) / scale1) * scale2
return convert
for funit, tunit, a, b in equivalencies:
if tunit is None:
try:
ratio_in_funit = (other.decompose() /
unit.decompose()).decompose([funit])
return make_converter(ratio_in_funit.scale, a, 1.)
except UnitsError:
pass
else:
try:
scale1 = funit._to(unit)
scale2 = tunit._to(other)
return make_converter(scale1, a, scale2)
except UnitsError:
pass
try:
scale1 = tunit._to(unit)
scale2 = funit._to(other)
return make_converter(scale1, b, scale2)
except UnitsError:
pass
def get_err_str(unit):
unit_str = unit.to_string('unscaled')
physical_type = unit.physical_type
if physical_type != 'unknown':
unit_str = "'{0}' ({1})".format(
unit_str, physical_type)
else:
unit_str = "'{0}'".format(unit_str)
return unit_str
unit_str = get_err_str(unit)
other_str = get_err_str(other)
raise UnitConversionError(
"{0} and {1} are not convertible".format(
unit_str, other_str))
def _get_converter(self, other, equivalencies=[]):
other = Unit(other)
# First see if it is just a scaling.
try:
scale = self._to(other)
except UnitsError:
pass
else:
return lambda val: scale * _condition_arg(val)
# if that doesn't work, maybe we can do it with equivalencies?
try:
return self._apply_equivalencies(
self, other, self._normalize_equivalencies(equivalencies))
except UnitsError as exc:
# Last hope: maybe other knows how to do it?
# We assume the equivalencies have the unit itself as first item.
# TODO: maybe better for other to have a `_back_converter` method?
if hasattr(other, 'equivalencies'):
for funit, tunit, a, b in other.equivalencies:
if other is funit:
try:
return lambda v: b(self._get_converter(
tunit, equivalencies=equivalencies)(v))
except Exception:
pass
raise exc
def _to(self, other):
"""
Returns the scale to the specified unit.
See `to`, except that a Unit object should be given (i.e., no
string), and that all defaults are used, i.e., no
equivalencies and value=1.
"""
# There are many cases where we just want to ensure a Quantity is
# of a particular unit, without checking whether it's already in
# a particular unit. If we're being asked to convert from a unit
# to itself, we can short-circuit all of this.
if self is other:
return 1.0
# Don't presume decomposition is possible; e.g.,
# conversion to function units is through equivalencies.
if isinstance(other, UnitBase):
self_decomposed = self.decompose()
other_decomposed = other.decompose()
# Check quickly whether equivalent. This is faster than
# `is_equivalent`, because it doesn't generate the entire
# physical type list of both units. In other words it "fails
# fast".
if(self_decomposed.powers == other_decomposed.powers and
all(self_base is other_base for (self_base, other_base)
in zip(self_decomposed.bases, other_decomposed.bases))):
return self_decomposed.scale / other_decomposed.scale
raise UnitConversionError(
"'{0!r}' is not a scaled version of '{1!r}'".format(self, other))
def to(self, other, value=UNITY, equivalencies=[]):
"""
Return the converted values in the specified unit.
Parameters
----------
other : unit object or string
The unit to convert to.
value : scalar int or float, or sequence convertible to array, optional
Value(s) in the current unit to be converted to the
specified unit. If not provided, defaults to 1.0
equivalencies : list of equivalence pairs, optional
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`unit_equivalencies`.
This list is in addition to possible global defaults set by, e.g.,
`set_enabled_equivalencies`.
Use `None` to turn off all equivalencies.
Returns
-------
values : scalar or array
Converted value(s). Input value sequences are returned as
numpy arrays.
Raises
------
UnitsError
If units are inconsistent
"""
if other is self and value is UNITY:
return UNITY
else:
return self._get_converter(other, equivalencies=equivalencies)(value)
def in_units(self, other, value=1.0, equivalencies=[]):
"""
Alias for `to` for backward compatibility with pynbody.
"""
return self.to(
other, value=value, equivalencies=equivalencies)
def decompose(self, bases=set()):
"""
Return a unit object composed of only irreducible units.
Parameters
----------
bases : sequence of UnitBase, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `UnitsError` if it's not possible
to do so.
Returns
-------
unit : CompositeUnit object
New object containing only irreducible unit objects.
"""
raise NotImplementedError()
def _compose(self, equivalencies=[], namespace=[], max_depth=2, depth=0,
cached_results=None):
def is_final_result(unit):
# Returns True if this result contains only the expected
# units
for base in unit.bases:
if base not in namespace:
return False
return True
unit = self.decompose()
key = hash(unit)
cached = cached_results.get(key)
if cached is not None:
if isinstance(cached, Exception):
raise cached
return cached
# Prevent too many levels of recursion
# And special case for dimensionless unit
if depth >= max_depth:
cached_results[key] = [unit]
return [unit]
# Make a list including all of the equivalent units
units = [unit]
for funit, tunit, a, b in equivalencies:
if tunit is not None:
if self._is_equivalent(funit):
scale = funit.decompose().scale / unit.scale
units.append(Unit(a(1.0 / scale) * tunit).decompose())
elif self._is_equivalent(tunit):
scale = tunit.decompose().scale / unit.scale
units.append(Unit(b(1.0 / scale) * funit).decompose())
else:
if self._is_equivalent(funit):
units.append(Unit(unit.scale))
# Store partial results
partial_results = []
# Store final results that reduce to a single unit or pair of
# units
if len(unit.bases) == 0:
final_results = [set([unit]), set()]
else:
final_results = [set(), set()]
for tunit in namespace:
tunit_decomposed = tunit.decompose()
for u in units:
# If the unit is a base unit, look for an exact match
# to one of the bases of the target unit. If found,
# factor by the same power as the target unit's base.
# This allows us to factor out fractional powers
# without needing to do an exhaustive search.
if len(tunit_decomposed.bases) == 1:
for base, power in zip(u.bases, u.powers):
if tunit_decomposed._is_equivalent(base):
tunit = tunit ** power
tunit_decomposed = tunit_decomposed ** power
break
composed = (u / tunit_decomposed).decompose()
factored = composed * tunit
len_bases = len(composed.bases)
if is_final_result(factored) and len_bases <= 1:
final_results[len_bases].add(factored)
else:
partial_results.append(
(len_bases, composed, tunit))
# Do we have any minimal results?
for final_result in final_results:
if len(final_result):
results = final_results[0].union(final_results[1])
cached_results[key] = results
return results
partial_results.sort(key=operator.itemgetter(0))
# ...we have to recurse and try to further compose
results = []
for len_bases, composed, tunit in partial_results:
try:
composed_list = composed._compose(
equivalencies=equivalencies,
namespace=namespace,
max_depth=max_depth, depth=depth + 1,
cached_results=cached_results)
except UnitsError:
composed_list = []
for subcomposed in composed_list:
results.append(
(len(subcomposed.bases), subcomposed, tunit))
if len(results):
results.sort(key=operator.itemgetter(0))
min_length = results[0][0]
subresults = set()
for len_bases, composed, tunit in results:
if len_bases > min_length:
break
else:
factored = composed * tunit
if is_final_result(factored):
subresults.add(factored)
if len(subresults):
cached_results[key] = subresults
return subresults
if not is_final_result(self):
result = UnitsError(
"Cannot represent unit {0} in terms of the given "
"units".format(self))
cached_results[key] = result
raise result
cached_results[key] = [self]
return [self]
def compose(self, equivalencies=[], units=None, max_depth=2,
include_prefix_units=None):
"""
Return the simplest possible composite unit(s) that represent
the given unit. Since there may be multiple equally simple
compositions of the unit, a list of units is always returned.
Parameters
----------
equivalencies : list of equivalence pairs, optional
A list of equivalence pairs to also list. See
:ref:`unit_equivalencies`.
This list is in addition to possible global defaults set by, e.g.,
`set_enabled_equivalencies`.
Use `None` to turn off all equivalencies.
units : set of units to compose to, optional
If not provided, any known units may be used to compose
into. Otherwise, ``units`` is a dict, module or sequence
containing the units to compose into.
max_depth : int, optional
The maximum recursion depth to use when composing into
composite units.
include_prefix_units : bool, optional
When `True`, include prefixed units in the result.
Default is `True` if a sequence is passed in to ``units``,
`False` otherwise.
Returns
-------
units : list of `CompositeUnit`
A list of candidate compositions. These will all be
equally simple, but it may not be possible to
automatically determine which of the candidates are
better.
"""
# if units parameter is specified and is a sequence (list|tuple),
# include_prefix_units is turned on by default. Ex: units=[u.kpc]
if include_prefix_units is None:
include_prefix_units = isinstance(units, (list, tuple))
# Pre-normalize the equivalencies list
equivalencies = self._normalize_equivalencies(equivalencies)
# The namespace of units to compose into should be filtered to
# only include units with bases in common with self, otherwise
# they can't possibly provide useful results. Having too many
# destination units greatly increases the search space.
def has_bases_in_common(a, b):
if len(a.bases) == 0 and len(b.bases) == 0:
return True
for ab in a.bases:
for bb in b.bases:
if ab == bb:
return True
return False
def has_bases_in_common_with_equiv(unit, other):
if has_bases_in_common(unit, other):
return True
for funit, tunit, a, b in equivalencies:
if tunit is not None:
if unit._is_equivalent(funit):
if has_bases_in_common(tunit.decompose(), other):
return True
elif unit._is_equivalent(tunit):
if has_bases_in_common(funit.decompose(), other):
return True
else:
if unit._is_equivalent(funit):
if has_bases_in_common(dimensionless_unscaled, other):
return True
return False
def filter_units(units):
filtered_namespace = set()
for tunit in units:
if (isinstance(tunit, UnitBase) and
(include_prefix_units or
not isinstance(tunit, PrefixUnit)) and
has_bases_in_common_with_equiv(
decomposed, tunit.decompose())):
filtered_namespace.add(tunit)
return filtered_namespace
decomposed = self.decompose()
if units is None:
units = filter_units(self._get_units_with_same_physical_type(
equivalencies=equivalencies))
if len(units) == 0:
units = get_current_unit_registry().non_prefix_units
elif isinstance(units, dict):
units = set(filter_units(units.values()))
elif inspect.ismodule(units):
units = filter_units(vars(units).values())
else:
units = filter_units(_flatten_units_collection(units))
def sort_results(results):
if not len(results):
return []
# Sort the results so the simplest ones appear first.
# Simplest is defined as "the minimum sum of absolute
# powers" (i.e. the fewest bases), and preference should
# be given to results where the sum of powers is positive
# and the scale is exactly equal to 1.0
results = list(results)
results.sort(key=lambda x: np.abs(x.scale))
results.sort(key=lambda x: np.sum(np.abs(x.powers)))
results.sort(key=lambda x: np.sum(x.powers) < 0.0)
results.sort(key=lambda x: not is_effectively_unity(x.scale))
last_result = results[0]
filtered = [last_result]
for result in results[1:]:
if str(result) != str(last_result):
filtered.append(result)
last_result = result
return filtered
return sort_results(self._compose(
equivalencies=equivalencies, namespace=units,
max_depth=max_depth, depth=0, cached_results={}))
def to_system(self, system):
"""
Converts this unit into ones belonging to the given system.
Since more than one result may be possible, a list is always
returned.
Parameters
----------
system : module
The module that defines the unit system. Commonly used
ones include `astropy.units.si` and `astropy.units.cgs`.
To use your own module it must contain unit objects and a
sequence member named ``bases`` containing the base units of
the system.
Returns
-------
units : list of `CompositeUnit`
The list is ranked so that units containing only the base
units of that system will appear first.
"""
bases = set(system.bases)
def score(compose):
# In case that compose._bases has no elements we return
# 'np.inf' as 'score value'. It does not really matter which
# number we would return. This case occurs for instance for
# dimensionless quantities:
compose_bases = compose.bases
if len(compose_bases) == 0:
return np.inf
else:
sum = 0
for base in compose_bases:
if base in bases:
sum += 1
return sum / float(len(compose_bases))
x = self.decompose(bases=bases)
composed = x.compose(units=system)
composed = sorted(composed, key=score, reverse=True)
return composed
@lazyproperty
def si(self):
"""
Returns a copy of the current `Unit` instance in SI units.
"""
from . import si
return self.to_system(si)[0]
@lazyproperty
def cgs(self):
"""
Returns a copy of the current `Unit` instance with CGS units.
"""
from . import cgs
return self.to_system(cgs)[0]
@property
def physical_type(self):
"""
Return the physical type on the unit.
Examples
--------
>>> from astropy import units as u
>>> print(u.m.physical_type)
length
"""
from . import physical
return physical.get_physical_type(self)
def _get_units_with_same_physical_type(self, equivalencies=[]):
"""
Return a list of registered units with the same physical type
as this unit.
This function is used by Quantity to add its built-in
conversions to equivalent units.
This is a private method, since end users should be encouraged
to use the more powerful `compose` and `find_equivalent_units`
methods (which use this under the hood).
Parameters
----------
equivalencies : list of equivalence pairs, optional
A list of equivalence pairs to also pull options from.
See :ref:`unit_equivalencies`. It must already be
normalized using `_normalize_equivalencies`.
"""
unit_registry = get_current_unit_registry()
units = set(unit_registry.get_units_with_physical_type(self))
for funit, tunit, a, b in equivalencies:
if tunit is not None:
if self.is_equivalent(funit) and tunit not in units:
units.update(
unit_registry.get_units_with_physical_type(tunit))
if self._is_equivalent(tunit) and funit not in units:
units.update(
unit_registry.get_units_with_physical_type(funit))
else:
if self.is_equivalent(funit):
units.add(dimensionless_unscaled)
return units
class EquivalentUnitsList(list):
"""
A class to handle pretty-printing the result of
`find_equivalent_units`.
"""
def __repr__(self):
if len(self) == 0:
return "[]"
else:
lines = []
for u in self:
irred = u.decompose().to_string()
if irred == u.name:
irred = "irreducible"
lines.append((u.name, irred, ', '.join(u.aliases)))
lines.sort()
lines.insert(0, ('Primary name', 'Unit definition', 'Aliases'))
widths = [0, 0, 0]
for line in lines:
for i, col in enumerate(line):
widths[i] = max(widths[i], len(col))
f = " {{0:<{0}s}} | {{1:<{1}s}} | {{2:<{2}s}}".format(*widths)
lines = [f.format(*line) for line in lines]
lines = (lines[0:1] +
['['] +
['{0} ,'.format(x) for x in lines[1:]] +
[']'])
return '\n'.join(lines)
def find_equivalent_units(self, equivalencies=[], units=None,
include_prefix_units=False):
"""
Return a list of all the units that are the same type as ``self``.
Parameters
----------
equivalencies : list of equivalence pairs, optional
A list of equivalence pairs to also list. See
:ref:`unit_equivalencies`.
Any list given, including an empty one, supersedes global defaults
that may be in effect (as set by `set_enabled_equivalencies`)
units : set of units to search in, optional
If not provided, all defined units will be searched for
equivalencies. Otherwise, may be a dict, module or
sequence containing the units to search for equivalencies.
include_prefix_units : bool, optional
When `True`, include prefixed units in the result.
Default is `False`.
Returns
-------
units : list of `UnitBase`
A list of unit objects that match ``u``. A subclass of
`list` (``EquivalentUnitsList``) is returned that
pretty-prints the list of units when output.
"""
results = self.compose(
equivalencies=equivalencies, units=units, max_depth=1,
include_prefix_units=include_prefix_units)
results = set(
x.bases[0] for x in results if len(x.bases) == 1)
return self.EquivalentUnitsList(results)
def is_unity(self):
"""
Returns `True` if the unit is unscaled and dimensionless.
"""
return False
class NamedUnit(UnitBase):
"""
The base class of units that have a name.
Parameters
----------
st : str, list of str, 2-tuple
The name of the unit. If a list of strings, the first element
is the canonical (short) name, and the rest of the elements
are aliases. If a tuple of lists, the first element is a list
of short names, and the second element is a list of long
names; all but the first short name are considered "aliases".
Each name *should* be a valid Python identifier to make it
easy to access, but this is not required.
namespace : dict, optional
When provided, inject the unit, and all of its aliases, in the
given namespace dictionary. If a unit by the same name is
already in the namespace, a ValueError is raised.
doc : str, optional
A docstring describing the unit.
format : dict, optional
A mapping to format-specific representations of this unit.
For example, for the ``Ohm`` unit, it might be nice to have it
displayed as ``\\Omega`` by the ``latex`` formatter. In that
case, `format` argument should be set to::
{'latex': r'\\Omega'}
Raises
------
ValueError
If any of the given unit names are already in the registry.
ValueError
If any of the given unit names are not valid Python tokens.
"""
def __init__(self, st, doc=None, format=None, namespace=None):
UnitBase.__init__(self)
if isinstance(st, (bytes, str)):
self._names = [st]
self._short_names = [st]
self._long_names = []
elif isinstance(st, tuple):
if not len(st) == 2:
raise ValueError("st must be string, list or 2-tuple")
self._names = st[0] + [n for n in st[1] if n not in st[0]]
if not len(self._names):
raise ValueError("must provide at least one name")
self._short_names = st[0][:]
self._long_names = st[1][:]
else:
if len(st) == 0:
raise ValueError(
"st list must have at least one entry")
self._names = st[:]
self._short_names = [st[0]]
self._long_names = st[1:]
if format is None:
format = {}
self._format = format
if doc is None:
doc = self._generate_doc()
else:
doc = textwrap.dedent(doc)
doc = textwrap.fill(doc)
self.__doc__ = doc
self._inject(namespace)
def _generate_doc(self):
"""
Generate a docstring for the unit if the user didn't supply
one. This is only used from the constructor and may be
overridden in subclasses.
"""
names = self.names
if len(self.names) > 1:
return "{1} ({0})".format(*names[:2])
else:
return names[0]
def get_format_name(self, format):
"""
Get a name for this unit that is specific to a particular
format.
Uses the dictionary passed into the `format` kwarg in the
constructor.
Parameters
----------
format : str
The name of the format
Returns
-------
name : str
The name of the unit for the given format.
"""
return self._format.get(format, self.name)
@property
def names(self):
"""
Returns all of the names associated with this unit.
"""
return self._names
@property
def name(self):
"""
Returns the canonical (short) name associated with this unit.
"""
return self._names[0]
@property
def aliases(self):
"""
Returns the alias (long) names for this unit.
"""
return self._names[1:]
@property
def short_names(self):
"""
Returns all of the short names associated with this unit.
"""
return self._short_names
@property
def long_names(self):
"""
Returns all of the long names associated with this unit.
"""
return self._long_names
def _inject(self, namespace=None):
"""
Injects the unit, and all of its aliases, in the given
namespace dictionary.
"""
if namespace is None:
return
# Loop through all of the names first, to ensure all of them
# are new, then add them all as a single "transaction" below.
for name in self._names:
if name in namespace and self != namespace[name]:
raise ValueError(
"Object with name {0!r} already exists in "
"given namespace ({1!r}).".format(
name, namespace[name]))
for name in self._names:
namespace[name] = self
def _recreate_irreducible_unit(cls, names, registered):
"""
This is used to reconstruct units when passed around by
multiprocessing.
"""
registry = get_current_unit_registry().registry
if names[0] in registry:
# If in local registry return that object.
return registry[names[0]]
else:
# otherwise, recreate the unit.
unit = cls(names)
if registered:
# If not in local registry but registered in origin registry,
# enable unit in local registry.
get_current_unit_registry().add_enabled_units([unit])
return unit
class IrreducibleUnit(NamedUnit):
"""
Irreducible units are the units that all other units are defined
in terms of.
Examples are meters, seconds, kilograms, amperes, etc. There is
only once instance of such a unit per type.
"""
def __reduce__(self):
# When IrreducibleUnit objects are passed to other processes
# over multiprocessing, they need to be recreated to be the
# ones already in the subprocesses' namespace, not new
# objects, or they will be considered "unconvertible".
# Therefore, we have a custom pickler/unpickler that
# understands how to recreate the Unit on the other side.
registry = get_current_unit_registry().registry
return (_recreate_irreducible_unit,
(self.__class__, list(self.names), self.name in registry),
self.__dict__)
@property
def represents(self):
"""The unit that this named unit represents.
For an irreducible unit, that is always itself.
"""
return self
def decompose(self, bases=set()):
if len(bases) and self not in bases:
for base in bases:
try:
scale = self._to(base)
except UnitsError:
pass
else:
if is_effectively_unity(scale):
return base
else:
return CompositeUnit(scale, [base], [1],
_error_check=False)
raise UnitConversionError(
"Unit {0} can not be decomposed into the requested "
"bases".format(self))
return self
class UnrecognizedUnit(IrreducibleUnit):
"""
A unit that did not parse correctly. This allows for
round-tripping it as a string, but no unit operations actually work
on it.
Parameters
----------
st : str
The name of the unit.
"""
# For UnrecognizedUnits, we want to use "standard" Python
# pickling, not the special case that is used for
# IrreducibleUnits.
__reduce__ = object.__reduce__
def __repr__(self):
return "UnrecognizedUnit({0})".format(str(self))
def __bytes__(self):
return self.name.encode('ascii', 'replace')
def __str__(self):
return self.name
def to_string(self, format=None):
return self.name
def _unrecognized_operator(self, *args, **kwargs):
raise ValueError(
"The unit {0!r} is unrecognized, so all arithmetic operations "
"with it are invalid.".format(self.name))
__pow__ = __div__ = __rdiv__ = __truediv__ = __rtruediv__ = __mul__ = \
__rmul__ = __lt__ = __gt__ = __le__ = __ge__ = __neg__ = \
_unrecognized_operator
def __eq__(self, other):
try:
other = Unit(other, parse_strict='silent')
except (ValueError, UnitsError, TypeError):
return NotImplemented
return isinstance(other, type(self)) and self.name == other.name
def __ne__(self, other):
return not (self == other)
def is_equivalent(self, other, equivalencies=None):
self._normalize_equivalencies(equivalencies)
return self == other
def _get_converter(self, other, equivalencies=None):
self._normalize_equivalencies(equivalencies)
raise ValueError(
"The unit {0!r} is unrecognized. It can not be converted "
"to other units.".format(self.name))
def get_format_name(self, format):
return self.name
def is_unity(self):
return False
class _UnitMetaClass(InheritDocstrings):
"""
This metaclass exists because the Unit constructor should
sometimes return instances that already exist. This "overrides"
the constructor before the new instance is actually created, so we
can return an existing one.
"""
def __call__(self, s, represents=None, format=None, namespace=None,
doc=None, parse_strict='raise'):
# Short-circuit if we're already a unit
if hasattr(s, '_get_physical_type_id'):
return s
# turn possible Quantity input for s or represents into a Unit
from .quantity import Quantity
if isinstance(represents, Quantity):
if is_effectively_unity(represents.value):
represents = represents.unit
else:
represents = CompositeUnit(represents.value *
represents.unit.scale,
bases=represents.unit.bases,
powers=represents.unit.powers,
_error_check=False)
if isinstance(s, Quantity):
if is_effectively_unity(s.value):
s = s.unit
else:
s = CompositeUnit(s.value * s.unit.scale,
bases=s.unit.bases,
powers=s.unit.powers,
_error_check=False)
# now decide what we really need to do; define derived Unit?
if isinstance(represents, UnitBase):
# This has the effect of calling the real __new__ and
# __init__ on the Unit class.
return super().__call__(
s, represents, format=format, namespace=namespace, doc=doc)
# or interpret a Quantity (now became unit), string or number?
if isinstance(s, UnitBase):
return s
elif isinstance(s, (bytes, str)):
if len(s.strip()) == 0:
# Return the NULL unit
return dimensionless_unscaled
if format is None:
format = unit_format.Generic
f = unit_format.get_format(format)
if isinstance(s, bytes):
s = s.decode('ascii')
try:
return f.parse(s)
except Exception as e:
if parse_strict == 'silent':
pass
else:
# Deliberately not issubclass here. Subclasses
# should use their name.
if f is not unit_format.Generic:
format_clause = f.name + ' '
else:
format_clause = ''
msg = ("'{0}' did not parse as {1}unit: {2}"
.format(s, format_clause, str(e)))
if parse_strict == 'raise':
raise ValueError(msg)
elif parse_strict == 'warn':
warnings.warn(msg, UnitsWarning)
else:
raise ValueError("'parse_strict' must be 'warn', "
"'raise' or 'silent'")
return UnrecognizedUnit(s)
elif isinstance(s, (int, float, np.floating, np.integer)):
return CompositeUnit(s, [], [], _error_check=False)
elif s is None:
raise TypeError("None is not a valid Unit")
else:
raise TypeError("{0} can not be converted to a Unit".format(s))
class Unit(NamedUnit, metaclass=_UnitMetaClass):
"""
The main unit class.
There are a number of different ways to construct a Unit, but
always returns a `UnitBase` instance. If the arguments refer to
an already-existing unit, that existing unit instance is returned,
rather than a new one.
- From a string::
Unit(s, format=None, parse_strict='silent')
Construct from a string representing a (possibly compound) unit.
The optional `format` keyword argument specifies the format the
string is in, by default ``"generic"``. For a description of
the available formats, see `astropy.units.format`.
The optional ``parse_strict`` keyword controls what happens when an
unrecognized unit string is passed in. It may be one of the following:
- ``'raise'``: (default) raise a ValueError exception.
- ``'warn'``: emit a Warning, and return an
`UnrecognizedUnit` instance.
- ``'silent'``: return an `UnrecognizedUnit` instance.
- From a number::
Unit(number)
Creates a dimensionless unit.
- From a `UnitBase` instance::
Unit(unit)
Returns the given unit unchanged.
- From `None`::
Unit()
Returns the null unit.
- The last form, which creates a new `Unit` is described in detail
below.
See also: http://docs.astropy.org/en/stable/units/
Parameters
----------
st : str or list of str
The name of the unit. If a list, the first element is the
canonical (short) name, and the rest of the elements are
aliases.
represents : UnitBase instance
The unit that this named unit represents.
doc : str, optional
A docstring describing the unit.
format : dict, optional
A mapping to format-specific representations of this unit.
For example, for the ``Ohm`` unit, it might be nice to have it
displayed as ``\\Omega`` by the ``latex`` formatter. In that
case, `format` argument should be set to::
{'latex': r'\\Omega'}
namespace : dictionary, optional
When provided, inject the unit (and all of its aliases) into
the given namespace.
Raises
------
ValueError
If any of the given unit names are already in the registry.
ValueError
If any of the given unit names are not valid Python tokens.
"""
def __init__(self, st, represents=None, doc=None,
format=None, namespace=None):
represents = Unit(represents)
self._represents = represents
NamedUnit.__init__(self, st, namespace=namespace, doc=doc,
format=format)
@property
def represents(self):
"""The unit that this named unit represents."""
return self._represents
def decompose(self, bases=set()):
return self._represents.decompose(bases=bases)
def is_unity(self):
return self._represents.is_unity()
def __hash__(self):
if self._hash is None:
self._hash = hash((self.name, self._represents))
return self._hash
@classmethod
def _from_physical_type_id(cls, physical_type_id):
# get string bases and powers from the ID tuple
bases = [cls(base) for base, _ in physical_type_id]
powers = [power for _, power in physical_type_id]
if len(physical_type_id) == 1 and powers[0] == 1:
unit = bases[0]
else:
unit = CompositeUnit(1, bases, powers,
_error_check=False)
return unit
class PrefixUnit(Unit):
"""
A unit that is simply a SI-prefixed version of another unit.
For example, ``mm`` is a `PrefixUnit` of ``.001 * m``.
The constructor is the same as for `Unit`.
"""
class CompositeUnit(UnitBase):
"""
Create a composite unit using expressions of previously defined
units.
Direct use of this class is not recommended. Instead use the
factory function `Unit` and arithmetic operators to compose
units.
Parameters
----------
scale : number
A scaling factor for the unit.
bases : sequence of `UnitBase`
A sequence of units this unit is composed of.
powers : sequence of numbers
A sequence of powers (in parallel with ``bases``) for each
of the base units.
"""
_decomposed_cache = None
def __init__(self, scale, bases, powers, decompose=False,
decompose_bases=set(), _error_check=True):
# There are many cases internal to astropy.units where we
# already know that all the bases are Unit objects, and the
# powers have been validated. In those cases, we can skip the
# error checking for performance reasons. When the private
# kwarg `_error_check` is False, the error checking is turned
# off.
if _error_check:
for base in bases:
if not isinstance(base, UnitBase):
raise TypeError(
"bases must be sequence of UnitBase instances")
powers = [validate_power(p) for p in powers]
if not decompose and len(bases) == 1 and powers[0] >= 0:
# Short-cut; with one unit there's nothing to expand and gather,
# as that has happened already when creating the unit. But do only
# positive powers, since for negative powers we need to re-sort.
unit = bases[0]
power = powers[0]
if power == 1:
scale *= unit.scale
self._bases = unit.bases
self._powers = unit.powers
elif power == 0:
self._bases = []
self._powers = []
else:
scale *= unit.scale ** power
self._bases = unit.bases
self._powers = [operator.mul(*resolve_fractions(p, power))
for p in unit.powers]
self._scale = sanitize_scale(scale)
else:
# Regular case: use inputs as preliminary scale, bases, and powers,
# then "expand and gather" identical bases, sanitize the scale, &c.
self._scale = scale
self._bases = bases
self._powers = powers
self._expand_and_gather(decompose=decompose,
bases=decompose_bases)
def __repr__(self):
if len(self._bases):
return super().__repr__()
else:
if self._scale != 1.0:
return 'Unit(dimensionless with a scale of {0})'.format(
self._scale)
else:
return 'Unit(dimensionless)'
@property
def scale(self):
"""
Return the scale of the composite unit.
"""
return self._scale
@property
def bases(self):
"""
Return the bases of the composite unit.
"""
return self._bases
@property
def powers(self):
"""
Return the powers of the composite unit.
"""
return self._powers
def _expand_and_gather(self, decompose=False, bases=set()):
def add_unit(unit, power, scale):
if bases and unit not in bases:
for base in bases:
try:
scale *= unit._to(base) ** power
except UnitsError:
pass
else:
unit = base
break
if unit in new_parts:
a, b = resolve_fractions(new_parts[unit], power)
new_parts[unit] = a + b
else:
new_parts[unit] = power
return scale
new_parts = {}
scale = self._scale
for b, p in zip(self._bases, self._powers):
if decompose and b not in bases:
b = b.decompose(bases=bases)
if isinstance(b, CompositeUnit):
scale *= b._scale ** p
for b_sub, p_sub in zip(b._bases, b._powers):
a, b = resolve_fractions(p_sub, p)
scale = add_unit(b_sub, a * b, scale)
else:
scale = add_unit(b, p, scale)
new_parts = [x for x in new_parts.items() if x[1] != 0]
new_parts.sort(key=lambda x: (-x[1], getattr(x[0], 'name', '')))
self._bases = [x[0] for x in new_parts]
self._powers = [x[1] for x in new_parts]
self._scale = sanitize_scale(scale)
def __copy__(self):
"""
For compatibility with python copy module.
"""
return CompositeUnit(self._scale, self._bases[:], self._powers[:])
def decompose(self, bases=set()):
if len(bases) == 0 and self._decomposed_cache is not None:
return self._decomposed_cache
for base in self.bases:
if (not isinstance(base, IrreducibleUnit) or
(len(bases) and base not in bases)):
break
else:
if len(bases) == 0:
self._decomposed_cache = self
return self
x = CompositeUnit(self.scale, self.bases, self.powers, decompose=True,
decompose_bases=bases)
if len(bases) == 0:
self._decomposed_cache = x
return x
def is_unity(self):
unit = self.decompose()
return len(unit.bases) == 0 and unit.scale == 1.0
si_prefixes = [
(['Y'], ['yotta'], 1e24),
(['Z'], ['zetta'], 1e21),
(['E'], ['exa'], 1e18),
(['P'], ['peta'], 1e15),
(['T'], ['tera'], 1e12),
(['G'], ['giga'], 1e9),
(['M'], ['mega'], 1e6),
(['k'], ['kilo'], 1e3),
(['h'], ['hecto'], 1e2),
(['da'], ['deka', 'deca'], 1e1),
(['d'], ['deci'], 1e-1),
(['c'], ['centi'], 1e-2),
(['m'], ['milli'], 1e-3),
(['u'], ['micro'], 1e-6),
(['n'], ['nano'], 1e-9),
(['p'], ['pico'], 1e-12),
(['f'], ['femto'], 1e-15),
(['a'], ['atto'], 1e-18),
(['z'], ['zepto'], 1e-21),
(['y'], ['yocto'], 1e-24)
]
binary_prefixes = [
(['Ki'], ['kibi'], 2. ** 10),
(['Mi'], ['mebi'], 2. ** 20),
(['Gi'], ['gibi'], 2. ** 30),
(['Ti'], ['tebi'], 2. ** 40),
(['Pi'], ['pebi'], 2. ** 50),
(['Ei'], ['exbi'], 2. ** 60)
]
def _add_prefixes(u, excludes=[], namespace=None, prefixes=False):
"""
Set up all of the standard metric prefixes for a unit. This
function should not be used directly, but instead use the
`prefixes` kwarg on `def_unit`.
Parameters
----------
excludes : list of str, optional
Any prefixes to exclude from creation to avoid namespace
collisions.
namespace : dict, optional
When provided, inject the unit (and all of its aliases) into
the given namespace dictionary.
prefixes : list, optional
When provided, it is a list of prefix definitions of the form:
(short_names, long_tables, factor)
"""
if prefixes is True:
prefixes = si_prefixes
elif prefixes is False:
prefixes = []
for short, full, factor in prefixes:
names = []
format = {}
for prefix in short:
if prefix in excludes:
continue
for alias in u.short_names:
names.append(prefix + alias)
# This is a hack to use Greek mu as a prefix
# for some formatters.
if prefix == 'u':
format['latex'] = r'\mu ' + u.get_format_name('latex')
format['unicode'] = 'μ' + u.get_format_name('unicode')
for key, val in u._format.items():
format.setdefault(key, prefix + val)
for prefix in full:
if prefix in excludes:
continue
for alias in u.long_names:
names.append(prefix + alias)
if len(names):
PrefixUnit(names, CompositeUnit(factor, [u], [1],
_error_check=False),
namespace=namespace, format=format)
def def_unit(s, represents=None, doc=None, format=None, prefixes=False,
exclude_prefixes=[], namespace=None):
"""
Factory function for defining new units.
Parameters
----------
s : str or list of str
The name of the unit. If a list, the first element is the
canonical (short) name, and the rest of the elements are
aliases.
represents : UnitBase instance, optional
The unit that this named unit represents. If not provided,
a new `IrreducibleUnit` is created.
doc : str, optional
A docstring describing the unit.
format : dict, optional
A mapping to format-specific representations of this unit.
For example, for the ``Ohm`` unit, it might be nice to
have it displayed as ``\\Omega`` by the ``latex``
formatter. In that case, `format` argument should be set
to::
{'latex': r'\\Omega'}
prefixes : bool or list, optional
When `True`, generate all of the SI prefixed versions of the
unit as well. For example, for a given unit ``m``, will
generate ``mm``, ``cm``, ``km``, etc. When a list, it is a list of
prefix definitions of the form:
(short_names, long_tables, factor)
Default is `False`. This function always returns the base
unit object, even if multiple scaled versions of the unit were
created.
exclude_prefixes : list of str, optional
If any of the SI prefixes need to be excluded, they may be
listed here. For example, ``Pa`` can be interpreted either as
"petaannum" or "Pascal". Therefore, when defining the
prefixes for ``a``, ``exclude_prefixes`` should be set to
``["P"]``.
namespace : dict, optional
When provided, inject the unit (and all of its aliases and
prefixes), into the given namespace dictionary.
Returns
-------
unit : `UnitBase` object
The newly-defined unit, or a matching unit that was already
defined.
"""
if represents is not None:
result = Unit(s, represents, namespace=namespace, doc=doc,
format=format)
else:
result = IrreducibleUnit(
s, namespace=namespace, doc=doc, format=format)
if prefixes:
_add_prefixes(result, excludes=exclude_prefixes, namespace=namespace,
prefixes=prefixes)
return result
def _condition_arg(value):
"""
Validate value is acceptable for conversion purposes.
Will convert into an array if not a scalar, and can be converted
into an array
Parameters
----------
value : int or float value, or sequence of such values
Returns
-------
Scalar value or numpy array
Raises
------
ValueError
If value is not as expected
"""
if isinstance(value, (float, int, complex)):
return value
if isinstance(value, np.ndarray) and value.dtype.kind in ['i', 'f', 'c']:
return value
avalue = np.array(value)
if avalue.dtype.kind not in ['i', 'f', 'c']:
raise ValueError("Value not scalar compatible or convertible to "
"an int, float, or complex array")
return avalue
dimensionless_unscaled = CompositeUnit(1, [], [], _error_check=False)
# Abbreviation of the above, see #1980
one = dimensionless_unscaled
# Maintain error in old location for backward compatibility
# TODO: Is this still needed? Should there be a deprecation warning?
unit_format.fits.UnitScaleError = UnitScaleError
|
3ff609890b7da031588006f73180498236a1e9694f93e0540b21ca642bee2116 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This subpackage contains classes and functions for defining and converting
between different physical units.
This code is adapted from the `pynbody
<https://github.com/pynbody/pynbody>`_ units module written by Andrew
Pontzen, who has granted the Astropy project permission to use the
code under a BSD license.
"""
# Lots of things to import - go from more basic to advanced, so that
# whatever advanced ones need generally has been imported already;
# this helps prevent circular imports and makes it easier to understand
# where most time is spent (e.g., using python -X importtime).
from .core import *
from .quantity import *
from . import si
from . import cgs
from . import astrophys
from . import photometric
from .function import units as function_units
from .si import *
from .astrophys import *
from .photometric import *
from .cgs import *
from .physical import *
from .function.units import *
from .equivalencies import *
from .function.core import *
from .function.logarithmic import *
from .function import magnitude_zero_points
from .decorators import *
del bases
# Enable the set of default units. This notably does *not* include
# Imperial units.
set_enabled_units([si, cgs, astrophys, function_units, photometric])
|
2baa9af0522fc5d8e7b4f6d4fc65da5c6f11519ac28f9624f6a6b361720c73b2 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package defines the astrophysics-specific units. They are also
available in the `astropy.units` namespace.
"""
from . import si
from astropy.constants import si as _si
from .core import (UnitBase, def_unit, si_prefixes, binary_prefixes,
set_enabled_units)
# To ensure si units of the constants can be interpreted.
set_enabled_units([si])
import numpy as _numpy
_ns = globals()
###########################################################################
# LENGTH
def_unit((['AU', 'au'], ['astronomical_unit']), _si.au, namespace=_ns, prefixes=True,
doc="astronomical unit: approximately the mean Earth--Sun "
"distance.")
def_unit(['pc', 'parsec'], _si.pc, namespace=_ns, prefixes=True,
doc="parsec: approximately 3.26 light-years.")
def_unit(['solRad', 'R_sun', 'Rsun'], _si.R_sun, namespace=_ns,
doc="Solar radius", prefixes=False,
format={'latex': r'R_{\odot}', 'unicode': 'R⊙'})
def_unit(['jupiterRad', 'R_jup', 'Rjup', 'R_jupiter', 'Rjupiter'],
_si.R_jup, namespace=_ns, prefixes=False, doc="Jupiter radius",
# LaTeX jupiter symbol requires wasysym
format={'latex': r'R_{\rm J}', 'unicode': 'R♃'})
def_unit(['earthRad', 'R_earth', 'Rearth'], _si.R_earth, namespace=_ns,
prefixes=False, doc="Earth radius",
# LaTeX earth symbol requires wasysym
format={'latex': r'R_{\oplus}', 'unicode': 'R⊕'})
def_unit(['lyr', 'lightyear'], (_si.c * si.yr).to(si.m),
namespace=_ns, prefixes=True, doc="Light year")
###########################################################################
# AREAS
def_unit(['barn', 'barn'], 10 ** -28 * si.m ** 2, namespace=_ns, prefixes=True,
doc="barn: unit of area used in HEP")
###########################################################################
# ANGULAR MEASUREMENTS
def_unit(['cycle', 'cy'], 2.0 * _numpy.pi * si.rad,
namespace=_ns, prefixes=False,
doc="cycle: angular measurement, a full turn or rotation")
###########################################################################
# MASS
def_unit(['solMass', 'M_sun', 'Msun'], _si.M_sun, namespace=_ns,
prefixes=False, doc="Solar mass",
format={'latex': r'M_{\odot}', 'unicode': 'M⊙'})
def_unit(['jupiterMass', 'M_jup', 'Mjup', 'M_jupiter', 'Mjupiter'],
_si.M_jup, namespace=_ns, prefixes=False, doc="Jupiter mass",
# LaTeX jupiter symbol requires wasysym
format={'latex': r'M_{\rm J}', 'unicode': 'M♃'})
def_unit(['earthMass', 'M_earth', 'Mearth'], _si.M_earth, namespace=_ns,
prefixes=False, doc="Earth mass",
# LaTeX earth symbol requires wasysym
format={'latex': r'M_{\oplus}', 'unicode': 'M⊕'})
def_unit(['M_p'], _si.m_p, namespace=_ns, doc="Proton mass",
format={'latex': r'M_{p}', 'unicode': 'Mₚ'})
def_unit(['M_e'], _si.m_e, namespace=_ns, doc="Electron mass",
format={'latex': r'M_{e}', 'unicode': 'Mₑ'})
# Unified atomic mass unit
def_unit(['u', 'Da', 'Dalton'], _si.u, namespace=_ns,
prefixes=True, exclude_prefixes=['a', 'da'],
doc="Unified atomic mass unit")
##########################################################################
# ENERGY
# Here, explicitly convert the planck constant to 'eV s' since the constant
# can override that to give a more precise value that takes into account
# covariances between e and h. Eventually, this may also be replaced with
# just `_si.Ryd.to(eV)`.
def_unit(['Ry', 'rydberg'],
(_si.Ryd * _si.c * _si.h.to(si.eV * si.s)).to(si.eV),
namespace=_ns, prefixes=True,
doc="Rydberg: Energy of a photon whose wavenumber is the Rydberg "
"constant",
format={'latex': r'R_{\infty}', 'unicode': 'R∞'})
###########################################################################
# ILLUMINATION
def_unit(['solLum', 'L_sun', 'Lsun'], _si.L_sun, namespace=_ns,
prefixes=False, doc="Solar luminance",
format={'latex': r'L_{\odot}', 'unicode': 'L⊙'})
###########################################################################
# SPECTRAL DENSITY
def_unit((['ph', 'photon'], ['photon']),
format={'ogip': 'photon', 'vounit': 'photon'},
namespace=_ns, prefixes=True)
def_unit(['Jy', 'Jansky', 'jansky'], 1e-26 * si.W / si.m ** 2 / si.Hz,
namespace=_ns, prefixes=True,
doc="Jansky: spectral flux density")
def_unit(['R', 'Rayleigh', 'rayleigh'],
(1e10 / (4 * _numpy.pi)) *
ph * si.m ** -2 * si.s ** -1 * si.sr ** -1,
namespace=_ns, prefixes=True,
doc="Rayleigh: photon flux")
###########################################################################
# MISCELLANEOUS
# Some of these are very FITS-specific and perhaps considered a mistake.
# Maybe they should be moved into the FITS format class?
# TODO: This is defined by the FITS standard as "relative to the sun".
# Is that mass, volume, what?
def_unit(['Sun'], namespace=_ns)
###########################################################################
# EVENTS
def_unit((['ct', 'count'], ['count']),
format={'fits': 'count', 'ogip': 'count', 'vounit': 'count'},
namespace=_ns, prefixes=True, exclude_prefixes=['p'])
def_unit((['pix', 'pixel'], ['pixel']),
format={'ogip': 'pixel', 'vounit': 'pixel'},
namespace=_ns, prefixes=True)
###########################################################################
# MISCELLANEOUS
def_unit(['chan'], namespace=_ns, prefixes=True)
def_unit(['bin'], namespace=_ns, prefixes=True)
def_unit((['vox', 'voxel'], ['voxel']),
format={'fits': 'voxel', 'ogip': 'voxel', 'vounit': 'voxel'},
namespace=_ns, prefixes=True)
def_unit((['bit', 'b'], ['bit']), namespace=_ns,
prefixes=si_prefixes + binary_prefixes)
def_unit((['byte', 'B'], ['byte']), 8 * bit, namespace=_ns,
format={'vounit': 'byte'},
prefixes=si_prefixes + binary_prefixes,
exclude_prefixes=['d'])
def_unit(['adu'], namespace=_ns, prefixes=True)
def_unit(['beam'], namespace=_ns, prefixes=True)
def_unit(['electron'], doc="Number of electrons", namespace=_ns,
format={'latex': r'e^{-}', 'unicode': 'e⁻'})
# This is not formally a unit, but is used in that way in many contexts, and
# an appropriate equivalency is only possible if it's treated as a unit (see
# https://arxiv.org/pdf/1308.4150.pdf for more)
# Also note that h or h100 or h_100 would be a better name, but they either
# conflict or have numbers in them, which is apparently disallowed
def_unit(['littleh'], namespace=_ns, prefixes=False,
doc="Reduced/\"dimensionless\" Hubble constant",
format={'latex': r'h_{100}'})
###########################################################################
# CLEANUP
del UnitBase
del def_unit
del si
###########################################################################
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import generate_unit_summary as _generate_unit_summary
if __doc__ is not None:
__doc__ += _generate_unit_summary(globals())
|
c81ae5c11b1b65c93183b0749b2c7b30d7b236f01b378f4deed07dfad77cd736 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Defines physical unit names.
This module is not intended for use by user code directly. Instead,
the physical unit name of a `Unit` can be obtained using its `ptype`
property.
"""
from . import core
from . import si
from . import astrophys
from . import cgs
from . import imperial
__all__ = ['def_physical_type', 'get_physical_type']
_physical_unit_mapping = {}
_unit_physical_mapping = {}
def def_physical_type(unit, name):
"""
Adds a new physical unit mapping.
Parameters
----------
unit : `~astropy.units.UnitBase` instance
The unit to map from.
name : str
The physical name of the unit.
"""
r = unit._get_physical_type_id()
if r in _physical_unit_mapping:
raise ValueError(
"{0!r} ({1!r}) already defined as {2!r}".format(
r, name, _physical_unit_mapping[r]))
_physical_unit_mapping[r] = name
_unit_physical_mapping[name] = r
def get_physical_type(unit):
"""
Given a unit, returns the name of the physical quantity it
represents. If it represents an unknown physical quantity,
``"unknown"`` is returned.
Parameters
----------
unit : `~astropy.units.UnitBase` instance
The unit to lookup
Returns
-------
physical : str
The name of the physical quantity, or unknown if not
known.
"""
r = unit._get_physical_type_id()
return _physical_unit_mapping.get(r, 'unknown')
for unit, name in [
(core.Unit(1), 'dimensionless'),
(si.m, 'length'),
(si.m ** 2, 'area'),
(si.m ** 3, 'volume'),
(si.s, 'time'),
(si.rad, 'angle'),
(si.sr, 'solid angle'),
(si.m / si.s, 'speed'),
(si.m / si.s ** 2, 'acceleration'),
(si.Hz, 'frequency'),
(si.g, 'mass'),
(si.mol, 'amount of substance'),
(si.K, 'temperature'),
(si.deg_C, 'temperature'),
(imperial.deg_F, 'temperature'),
(si.N, 'force'),
(si.J, 'energy'),
(si.Pa, 'pressure'),
(si.W, 'power'),
(si.kg / si.m ** 3, 'mass density'),
(si.m ** 3 / si.kg, 'specific volume'),
(si.mol / si.m ** 3, 'molar volume'),
(si.kg * si.m / si.s, 'momentum/impulse'),
(si.kg * si.m ** 2 / si.s, 'angular momentum'),
(si.rad / si.s, 'angular speed'),
(si.rad / si.s ** 2, 'angular acceleration'),
(si.g / (si.m * si.s), 'dynamic viscosity'),
(si.m ** 2 / si.s, 'kinematic viscosity'),
(si.m ** -1, 'wavenumber'),
(si.A, 'electrical current'),
(si.C, 'electrical charge'),
(si.V, 'electrical potential'),
(si.Ohm, 'electrical resistance'),
(si.S, 'electrical conductance'),
(si.F, 'electrical capacitance'),
(si.C * si.m, 'electrical dipole moment'),
(si.A / si.m ** 2, 'electrical current density'),
(si.V / si.m, 'electrical field strength'),
(si.C / si.m ** 2, 'electrical flux density'),
(si.C / si.m ** 3, 'electrical charge density'),
(si.F / si.m, 'permittivity'),
(si.Wb, 'magnetic flux'),
(si.T, 'magnetic flux density'),
(si.A / si.m, 'magnetic field strength'),
(si.H / si.m, 'electromagnetic field strength'),
(si.H, 'inductance'),
(si.cd, 'luminous intensity'),
(si.lm, 'luminous flux'),
(si.lx, 'luminous emittance/illuminance'),
(si.W / si.sr, 'radiant intensity'),
(si.cd / si.m ** 2, 'luminance'),
(astrophys.Jy, 'spectral flux density'),
(cgs.erg / si.angstrom / si.cm ** 2 / si.s, 'spectral flux density wav'),
(astrophys.photon / si.Hz / si.cm ** 2 / si.s, 'photon flux density'),
(astrophys.photon / si.AA / si.cm ** 2 / si.s, 'photon flux density wav'),
(astrophys.R, 'photon flux'),
(astrophys.bit, 'data quantity'),
(astrophys.bit / si.s, 'bandwidth'),
(cgs.Franklin, 'electrical charge (ESU)'),
(cgs.statampere, 'electrical current (ESU)'),
(cgs.Biot, 'electrical current (EMU)'),
(cgs.abcoulomb, 'electrical charge (EMU)')
]:
def_physical_type(unit, name)
|
7001ed0c5e0368d00525ede68ff523d3f5fa9c0eec674e764e2c5eb9ddeca43a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Miscellaneous utilities for `astropy.units`.
None of the functions in the module are meant for use outside of the
package.
"""
import numbers
import io
import re
from fractions import Fraction
import numpy as np
from numpy import finfo
_float_finfo = finfo(float)
# take float here to ensure comparison with another float is fast
# give a little margin since often multiple calculations happened
_JUST_BELOW_UNITY = float(1.-4.*_float_finfo.epsneg)
_JUST_ABOVE_UNITY = float(1.+4.*_float_finfo.eps)
def _get_first_sentence(s):
"""
Get the first sentence from a string and remove any carriage
returns.
"""
x = re.match(r".*?\S\.\s", s)
if x is not None:
s = x.group(0)
return s.replace('\n', ' ')
def _iter_unit_summary(namespace):
"""
Generates the ``(unit, doc, represents, aliases, prefixes)``
tuple used to format the unit summary docs in `generate_unit_summary`.
"""
from . import core
# Get all of the units, and keep track of which ones have SI
# prefixes
units = []
has_prefixes = set()
for key, val in namespace.items():
# Skip non-unit items
if not isinstance(val, core.UnitBase):
continue
# Skip aliases
if key != val.name:
continue
if isinstance(val, core.PrefixUnit):
# This will return the root unit that is scaled by the prefix
# attached to it
has_prefixes.add(val._represents.bases[0].name)
else:
units.append(val)
# Sort alphabetically, case insensitive
units.sort(key=lambda x: x.name.lower())
for unit in units:
doc = _get_first_sentence(unit.__doc__).strip()
represents = ''
if isinstance(unit, core.Unit):
represents = ":math:`{0}`".format(
unit._represents.to_string('latex')[1:-1])
aliases = ', '.join('``{0}``'.format(x) for x in unit.aliases)
yield (unit, doc, represents, aliases, 'Yes' if unit.name in has_prefixes else 'No')
def generate_unit_summary(namespace):
"""
Generates a summary of units from a given namespace. This is used
to generate the docstring for the modules that define the actual
units.
Parameters
----------
namespace : dict
A namespace containing units.
Returns
-------
docstring : str
A docstring containing a summary table of the units.
"""
docstring = io.StringIO()
docstring.write("""
.. list-table:: Available Units
:header-rows: 1
:widths: 10 20 20 20 1
* - Unit
- Description
- Represents
- Aliases
- SI Prefixes
""")
for unit_summary in _iter_unit_summary(namespace):
docstring.write("""
* - ``{0}``
- {1}
- {2}
- {3}
- {4}
""".format(*unit_summary))
return docstring.getvalue()
def generate_prefixonly_unit_summary(namespace):
"""
Generates table entries for units in a namespace that are just prefixes
without the base unit. Note that this is intended to be used *after*
`generate_unit_summary` and therefore does not include the table header.
Parameters
----------
namespace : dict
A namespace containing units that are prefixes but do *not* have the
base unit in their namespace.
Returns
-------
docstring : str
A docstring containing a summary table of the units.
"""
from . import PrefixUnit
faux_namespace = {}
for nm, unit in namespace.items():
if isinstance(unit, PrefixUnit):
base_unit = unit.represents.bases[0]
faux_namespace[base_unit.name] = base_unit
docstring = io.StringIO()
for unit_summary in _iter_unit_summary(faux_namespace):
docstring.write("""
* - Prefixes for ``{0}``
- {1} prefixes
- {2}
- {3}
- Only
""".format(*unit_summary))
return docstring.getvalue()
def is_effectively_unity(value):
# value is *almost* always real, except, e.g., for u.mag**0.5, when
# it will be complex. Use try/except to ensure normal case is fast
try:
return _JUST_BELOW_UNITY <= value <= _JUST_ABOVE_UNITY
except TypeError: # value is complex
return (_JUST_BELOW_UNITY <= value.real <= _JUST_ABOVE_UNITY and
_JUST_BELOW_UNITY <= value.imag + 1 <= _JUST_ABOVE_UNITY)
def sanitize_scale(scale):
if is_effectively_unity(scale):
return 1.0
# Maximum speed for regular case where scale is a float.
if scale.__class__ is float:
return scale
# All classes that scale can be (int, float, complex, Fraction)
# have an "imag" attribute.
if scale.imag:
if abs(scale.real) > abs(scale.imag):
if is_effectively_unity(scale.imag/scale.real + 1):
return scale.real
elif is_effectively_unity(scale.real/scale.imag + 1):
return complex(0., scale.imag)
return scale
else:
return scale.real
def validate_power(p, support_tuples=False):
"""Convert a power to a floating point value, an integer, or a Fraction.
If a fractional power can be represented exactly as a floating point
number, convert it to a float, to make the math much faster; otherwise,
retain it as a `fractions.Fraction` object to avoid losing precision.
Conversely, if the value is indistinguishable from a rational number with a
low-numbered denominator, convert to a Fraction object.
Parameters
----------
p : float, int, Rational, Fraction
Power to be converted
"""
denom = getattr(p, 'denominator', None)
if denom is None:
try:
p = float(p)
except Exception:
if not np.isscalar(p):
raise ValueError("Quantities and Units may only be raised "
"to a scalar power")
else:
raise
if (p % 1.0) == 0.0:
# Denominators of 1 can just be integers.
p = int(p)
elif (p * 8.0) % 1.0 == 0.0:
# Leave alone if the denominator is exactly 2, 4 or 8, since this
# can be perfectly represented as a float, which means subsequent
# operations are much faster.
pass
else:
# Convert floats indistinguishable from a rational to Fraction.
# Here, we do not need to test values that are divisors of a higher
# number, such as 3, since it is already addressed by 6.
for i in (10, 9, 7, 6):
scaled = p * float(i)
if((scaled + 4. * _float_finfo.eps) % 1.0 <
8. * _float_finfo.eps):
p = Fraction(int(round(scaled)), i)
break
elif denom == 1:
p = int(p.numerator)
elif (denom & (denom - 1)) == 0:
# Above is a bit-twiddling hack to see if denom is a power of two.
p = float(p)
return p
def resolve_fractions(a, b):
"""
If either input is a Fraction, convert the other to a Fraction.
This ensures that any operation involving a Fraction will use
rational arithmetic and preserve precision.
"""
# We short-circuit on the most common cases of int and float, since
# isinstance(a, Fraction) is very slow for any non-Fraction instances.
a_is_fraction = (a.__class__ is not int and a.__class__ is not float and
isinstance(a, Fraction))
b_is_fraction = (b.__class__ is not int and b.__class__ is not float and
isinstance(b, Fraction))
if a_is_fraction and not b_is_fraction:
b = Fraction(b)
elif not a_is_fraction and b_is_fraction:
a = Fraction(a)
return a, b
def quantity_asanyarray(a, dtype=None):
from .quantity import Quantity
if not isinstance(a, np.ndarray) and not np.isscalar(a) and any(isinstance(x, Quantity) for x in a):
return Quantity(a, dtype=dtype)
else:
return np.asanyarray(a, dtype=dtype)
|
41a0892ce50a32ce0cc6ff530616921594a75cf2f48c43fe4cd5f15c0ee45f1c | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module defines the `Quantity` object, which represents a number with some
associated units. `Quantity` objects support operations like ordinary numbers,
but will deal with unit conversions internally.
"""
# Standard library
import re
import numbers
from fractions import Fraction
import warnings
import numpy as np
# AstroPy
from .core import (Unit, dimensionless_unscaled, get_current_unit_registry,
UnitBase, UnitsError, UnitConversionError, UnitTypeError)
from .utils import is_effectively_unity
from .format.latex import Latex
from astropy.utils.compat import NUMPY_LT_1_14, NUMPY_LT_1_16
from astropy.utils.compat.misc import override__dir__
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyWarning
from astropy.utils.misc import isiterable, InheritDocstrings
from astropy.utils.data_info import ParentDtypeInfo
from astropy import config as _config
from .quantity_helper import (converters_and_unit, can_have_arbitrary_unit,
check_output)
__all__ = ["Quantity", "SpecificTypeQuantity",
"QuantityInfoBase", "QuantityInfo", "allclose", "isclose"]
# We don't want to run doctests in the docstrings we inherit from Numpy
__doctest_skip__ = ['Quantity.*']
_UNIT_NOT_INITIALISED = "(Unit not initialised)"
_UFUNCS_FILTER_WARNINGS = {np.arcsin, np.arccos, np.arccosh, np.arctanh}
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for Quantity
"""
latex_array_threshold = _config.ConfigItem(100,
'The maximum size an array Quantity can be before its LaTeX '
'representation for IPython gets "summarized" (meaning only the first '
'and last few elements are shown with "..." between). Setting this to a '
'negative number means that the value will instead be whatever numpy '
'gets from get_printoptions.')
conf = Conf()
class QuantityIterator:
"""
Flat iterator object to iterate over Quantities
A `QuantityIterator` iterator is returned by ``q.flat`` for any Quantity
``q``. It allows iterating over the array as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in C-contiguous style, with the last index varying the
fastest. The iterator can also be indexed using basic slicing or
advanced indexing.
See Also
--------
Quantity.flatten : Returns a flattened copy of an array.
Notes
-----
`QuantityIterator` is inspired by `~numpy.ma.core.MaskedIterator`. It
is not exported by the `~astropy.units` module. Instead of
instantiating a `QuantityIterator` directly, use `Quantity.flat`.
"""
def __init__(self, q):
self._quantity = q
self._dataiter = q.view(np.ndarray).flat
def __iter__(self):
return self
def __getitem__(self, indx):
out = self._dataiter.__getitem__(indx)
# For single elements, ndarray.flat.__getitem__ returns scalars; these
# need a new view as a Quantity.
if isinstance(out, type(self._quantity)):
return out
else:
return self._quantity._new_view(out)
def __setitem__(self, index, value):
self._dataiter[index] = self._quantity._to_own_unit(value)
def __next__(self):
"""
Return the next value, or raise StopIteration.
"""
out = next(self._dataiter)
# ndarray.flat._dataiter returns scalars, so need a view as a Quantity.
return self._quantity._new_view(out)
next = __next__
class QuantityInfoBase(ParentDtypeInfo):
# This is on a base class rather than QuantityInfo directly, so that
# it can be used for EarthLocationInfo yet make clear that that class
# should not be considered a typical Quantity subclass by Table.
attrs_from_parent = {'dtype', 'unit'} # dtype and unit taken from parent
_supports_indexing = True
@staticmethod
def default_format(val):
return '{0.value:}'.format(val)
@staticmethod
def possible_string_format_functions(format_):
"""Iterate through possible string-derived format functions.
A string can either be a format specifier for the format built-in,
a new-style format string, or an old-style format string.
This method is overridden in order to suppress printing the unit
in each row since it is already at the top in the column header.
"""
yield lambda format_, val: format(val.value, format_)
yield lambda format_, val: format_.format(val.value)
yield lambda format_, val: format_ % val.value
class QuantityInfo(QuantityInfoBase):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
_represent_as_dict_attrs = ('value', 'unit')
_construct_from_dict_args = ['value']
_represent_as_dict_primary_data = 'value'
def new_like(self, cols, length, metadata_conflicts='warn', name=None):
"""
Return a new Quantity instance which is consistent with the
input ``cols`` and has ``length`` rows.
This is intended for creating an empty column object whose elements can
be set in-place for table operations like join or vstack.
Parameters
----------
cols : list
List of input columns
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : Quantity (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(cols, metadata_conflicts, name,
('meta', 'format', 'description'))
# Make an empty quantity using the unit of the last one.
shape = (length,) + attrs.pop('shape')
dtype = attrs.pop('dtype')
# Use zeros so we do not get problems for Quantity subclasses such
# as Longitude and Latitude, which cannot take arbitrary values.
data = np.zeros(shape=shape, dtype=dtype)
# Get arguments needed to reconstruct class
map = {key: (data if key == 'value' else getattr(cols[-1], key))
for key in self._represent_as_dict_attrs}
map['copy'] = False
out = self._construct_from_dict(map)
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
class Quantity(np.ndarray, metaclass=InheritDocstrings):
"""A `~astropy.units.Quantity` represents a number with some associated unit.
See also: http://docs.astropy.org/en/stable/units/quantity.html
Parameters
----------
value : number, `~numpy.ndarray`, `Quantity` object (sequence), str
The numerical value of this quantity in the units given by unit. If a
`Quantity` or sequence of them (or any other valid object with a
``unit`` attribute), creates a new `Quantity` object, converting to
`unit` units as needed. If a string, it is converted to a number or
`Quantity`, depending on whether a unit is present.
unit : `~astropy.units.UnitBase` instance, str
An object that represents the unit associated with the input value.
Must be an `~astropy.units.UnitBase` object or a string parseable by
the :mod:`~astropy.units` package.
dtype : ~numpy.dtype, optional
The dtype of the resulting Numpy array or scalar that will
hold the value. If not provided, it is determined from the input,
except that any input that cannot represent float (integer and bool)
is converted to float.
copy : bool, optional
If `True` (default), then the value is copied. Otherwise, a copy will
only be made if ``__array__`` returns a copy, if value is a nested
sequence, or if a copy is needed to satisfy an explicitly given
``dtype``. (The `False` option is intended mostly for internal use,
to speed up initialization where a copy is known to have been made.
Use with care.)
order : {'C', 'F', 'A'}, optional
Specify the order of the array. As in `~numpy.array`. This parameter
is ignored if the input is a `Quantity` and ``copy=False``.
subok : bool, optional
If `False` (default), the returned array will be forced to be a
`Quantity`. Otherwise, `Quantity` subclasses will be passed through,
or a subclass appropriate for the unit will be used (such as
`~astropy.units.Dex` for ``u.dex(u.AA)``).
ndmin : int, optional
Specifies the minimum number of dimensions that the resulting array
should have. Ones will be pre-pended to the shape as needed to meet
this requirement. This parameter is ignored if the input is a
`Quantity` and ``copy=False``.
Raises
------
TypeError
If the value provided is not a Python numeric type.
TypeError
If the unit provided is not either a :class:`~astropy.units.Unit`
object or a parseable string unit.
Notes
-----
Quantities can also be created by multiplying a number or array with a
:class:`~astropy.units.Unit`. See http://docs.astropy.org/en/latest/units/
"""
# Need to set a class-level default for _equivalencies, or
# Constants can not initialize properly
_equivalencies = []
# Default unit for initialization; can be overridden by subclasses,
# possibly to `None` to indicate there is no default unit.
_default_unit = dimensionless_unscaled
# Ensures views have an undefined unit.
_unit = None
__array_priority__ = 10000
def __new__(cls, value, unit=None, dtype=None, copy=True, order=None,
subok=False, ndmin=0):
if unit is not None:
# convert unit first, to avoid multiple string->unit conversions
unit = Unit(unit)
# if we allow subclasses, allow a class from the unit.
if subok:
qcls = getattr(unit, '_quantity_class', cls)
if issubclass(qcls, cls):
cls = qcls
# optimize speed for Quantity with no dtype given, copy=False
if isinstance(value, Quantity):
if unit is not None and unit is not value.unit:
value = value.to(unit)
# the above already makes a copy (with float dtype)
copy = False
if type(value) is not cls and not (subok and
isinstance(value, cls)):
value = value.view(cls)
if dtype is None:
if not copy:
return value
if not np.can_cast(np.float32, value.dtype):
dtype = float
return np.array(value, dtype=dtype, copy=copy, order=order,
subok=True, ndmin=ndmin)
# Maybe str, or list/tuple of Quantity? If so, this may set value_unit.
# To ensure array remains fast, we short-circuit it.
value_unit = None
if not isinstance(value, np.ndarray):
if isinstance(value, str):
# The first part of the regex string matches any integer/float;
# the second parts adds possible trailing .+-, which will break
# the float function below and ensure things like 1.2.3deg
# will not work.
pattern = (r'\s*[+-]?'
r'((\d+\.?\d*)|(\.\d+)|([nN][aA][nN])|'
r'([iI][nN][fF]([iI][nN][iI][tT][yY]){0,1}))'
r'([eE][+-]?\d+)?'
r'[.+-]?')
v = re.match(pattern, value)
unit_string = None
try:
value = float(v.group())
except Exception:
raise TypeError('Cannot parse "{0}" as a {1}. It does not '
'start with a number.'
.format(value, cls.__name__))
unit_string = v.string[v.end():].strip()
if unit_string:
value_unit = Unit(unit_string)
if unit is None:
unit = value_unit # signal no conversion needed below.
elif (isiterable(value) and len(value) > 0 and
all(isinstance(v, Quantity) for v in value)):
# Convert all quantities to the same unit.
if unit is None:
unit = value[0].unit
value = [q.to_value(unit) for q in value]
value_unit = unit # signal below that conversion has been done
if value_unit is None:
# If the value has a `unit` attribute and if not None
# (for Columns with uninitialized unit), treat it like a quantity.
value_unit = getattr(value, 'unit', None)
if value_unit is None:
# Default to dimensionless for no (initialized) unit attribute.
if unit is None:
unit = cls._default_unit
value_unit = unit # signal below that no conversion is needed
else:
try:
value_unit = Unit(value_unit)
except Exception as exc:
raise TypeError("The unit attribute {0!r} of the input could "
"not be parsed as an astropy Unit, raising "
"the following exception:\n{1}"
.format(value.unit, exc))
if unit is None:
unit = value_unit
elif unit is not value_unit:
copy = False # copy will be made in conversion at end
value = np.array(value, dtype=dtype, copy=copy, order=order,
subok=False, ndmin=ndmin)
# check that array contains numbers or long int objects
if (value.dtype.kind in 'OSU' and
not (value.dtype.kind == 'O' and
isinstance(value.item(() if value.ndim == 0 else 0),
numbers.Number))):
raise TypeError("The value must be a valid Python or "
"Numpy numeric type.")
# by default, cast any integer, boolean, etc., to float
if dtype is None and (not np.can_cast(np.float32, value.dtype)
or value.dtype.kind == 'O'):
value = value.astype(float)
value = value.view(cls)
value._set_unit(value_unit)
if unit is value_unit:
return value
else:
# here we had non-Quantity input that had a "unit" attribute
# with a unit different from the desired one. So, convert.
return value.to(unit)
def __array_finalize__(self, obj):
# If we're a new object or viewing an ndarray, nothing has to be done.
if obj is None or obj.__class__ is np.ndarray:
return
# If our unit is not set and obj has a valid one, use it.
if self._unit is None:
unit = getattr(obj, '_unit', None)
if unit is not None:
self._set_unit(unit)
# Copy info if the original had `info` defined. Because of the way the
# DataInfo works, `'info' in obj.__dict__` is False until the
# `info` attribute is accessed or set.
if 'info' in obj.__dict__:
self.info = obj.info
def __array_wrap__(self, obj, context=None):
if context is None:
# Methods like .squeeze() created a new `ndarray` and then call
# __array_wrap__ to turn the array into self's subclass.
return self._new_view(obj)
raise NotImplementedError('__array_wrap__ should not be used '
'with a context any more, since we require '
'numpy >=1.13. Please raise an issue on '
'https://github.com/astropy/astropy')
def __array_ufunc__(self, function, method, *inputs, **kwargs):
"""Wrap numpy ufuncs, taking care of units.
Parameters
----------
function : callable
ufunc to wrap.
method : str
Ufunc method: ``__call__``, ``at``, ``reduce``, etc.
inputs : tuple
Input arrays.
kwargs : keyword arguments
As passed on, with ``out`` containing possible quantity output.
Returns
-------
result : `~astropy.units.Quantity`
Results of the ufunc, with the unit set properly.
"""
# Determine required conversion functions -- to bring the unit of the
# input to that expected (e.g., radian for np.sin), or to get
# consistent units between two inputs (e.g., in np.add) --
# and the unit of the result (or tuple of units for nout > 1).
converters, unit = converters_and_unit(function, method, *inputs)
out = kwargs.get('out', None)
# Avoid loop back by turning any Quantity output into array views.
if out is not None:
# If pre-allocated output is used, check it is suitable.
# This also returns array view, to ensure we don't loop back.
if function.nout == 1:
out = out[0]
out_array = check_output(out, unit, inputs, function=function)
# Ensure output argument remains a tuple.
kwargs['out'] = (out_array,) if function.nout == 1 else out_array
# Same for inputs, but here also convert if necessary.
arrays = [(converter(input_.value) if converter else
getattr(input_, 'value', input_))
for input_, converter in zip(inputs, converters)]
# Call our superclass's __array_ufunc__
result = super().__array_ufunc__(function, method, *arrays, **kwargs)
# If unit is None, a plain array is expected (e.g., comparisons), which
# means we're done.
# We're also done if the result was None (for method 'at') or
# NotImplemented, which can happen if other inputs/outputs override
# __array_ufunc__; hopefully, they can then deal with us.
if unit is None or result is None or result is NotImplemented:
return result
return self._result_as_quantity(result, unit, out)
def _result_as_quantity(self, result, unit, out):
"""Turn result into a quantity with the given unit.
If no output is given, it will take a view of the array as a quantity,
and set the unit. If output is given, those should be quantity views
of the result arrays, and the function will just set the unit.
Parameters
----------
result : `~numpy.ndarray` or tuple of `~numpy.ndarray`
Array(s) which need to be turned into quantity.
unit : `~astropy.units.Unit`
Unit for the quantities to be returned (or `None` if the result
should not be a quantity). Should be tuple if result is a tuple.
out : `~astropy.units.Quantity` or None
Possible output quantity. Should be `None` or a tuple if result
is a tuple.
Returns
-------
out : `~astropy.units.Quantity`
With units set.
"""
if isinstance(result, tuple):
if out is None:
out = (None,) * len(result)
return tuple(self._result_as_quantity(result_, unit_, out_)
for (result_, unit_, out_) in
zip(result, unit, out))
if out is None:
# View the result array as a Quantity with the proper unit.
return result if unit is None else self._new_view(result, unit)
# For given output, just set the unit. We know the unit is not None and
# the output is of the correct Quantity subclass, as it was passed
# through check_output.
out._set_unit(unit)
return out
def __quantity_subclass__(self, unit):
"""
Overridden by subclasses to change what kind of view is
created based on the output unit of an operation.
Parameters
----------
unit : UnitBase
The unit for which the appropriate class should be returned
Returns
-------
tuple :
- `Quantity` subclass
- bool: True if subclasses of the given class are ok
"""
return Quantity, True
def _new_view(self, obj=None, unit=None):
"""
Create a Quantity view of some array-like input, and set the unit
By default, return a view of ``obj`` of the same class as ``self`` and
with the same unit. Subclasses can override the type of class for a
given unit using ``__quantity_subclass__``, and can ensure properties
other than the unit are copied using ``__array_finalize__``.
If the given unit defines a ``_quantity_class`` of which ``self``
is not an instance, a view using this class is taken.
Parameters
----------
obj : ndarray or scalar, optional
The array to create a view of. If obj is a numpy or python scalar,
it will be converted to an array scalar. By default, ``self``
is converted.
unit : `UnitBase`, or anything convertible to a :class:`~astropy.units.Unit`, optional
The unit of the resulting object. It is used to select a
subclass, and explicitly assigned to the view if given.
If not given, the subclass and unit will be that of ``self``.
Returns
-------
view : Quantity subclass
"""
# Determine the unit and quantity subclass that we need for the view.
if unit is None:
unit = self.unit
quantity_subclass = self.__class__
elif unit is self.unit and self.__class__ is Quantity:
# The second part is because we should not presume what other
# classes want to do for the same unit. E.g., Constant will
# always want to fall back to Quantity, and relies on going
# through `__quantity_subclass__`.
quantity_subclass = Quantity
else:
unit = Unit(unit)
quantity_subclass = getattr(unit, '_quantity_class', Quantity)
if isinstance(self, quantity_subclass):
quantity_subclass, subok = self.__quantity_subclass__(unit)
if subok:
quantity_subclass = self.__class__
# We only want to propagate information from ``self`` to our new view,
# so obj should be a regular array. By using ``np.array``, we also
# convert python and numpy scalars, which cannot be viewed as arrays
# and thus not as Quantity either, to zero-dimensional arrays.
# (These are turned back into scalar in `.value`)
# Note that for an ndarray input, the np.array call takes only double
# ``obj.__class is np.ndarray``. So, not worth special-casing.
if obj is None:
obj = self.view(np.ndarray)
else:
obj = np.array(obj, copy=False)
# Take the view, set the unit, and update possible other properties
# such as ``info``, ``wrap_angle`` in `Longitude`, etc.
view = obj.view(quantity_subclass)
view._set_unit(unit)
view.__array_finalize__(self)
return view
def _set_unit(self, unit):
"""Set the unit.
This is used anywhere the unit is set or modified, i.e., in the
initilizer, in ``__imul__`` and ``__itruediv__`` for in-place
multiplication and division by another unit, as well as in
``__array_finalize__`` for wrapping up views. For Quantity, it just
sets the unit, but subclasses can override it to check that, e.g.,
a unit is consistent.
"""
if not isinstance(unit, UnitBase):
# Trying to go through a string ensures that, e.g., Magnitudes with
# dimensionless physical unit become Quantity with units of mag.
unit = Unit(str(unit), parse_strict='silent')
if not isinstance(unit, UnitBase):
raise UnitTypeError(
"{0} instances require {1} units, not {2} instances."
.format(type(self).__name__, UnitBase, type(unit)))
self._unit = unit
def __deepcopy__(self, memo):
# If we don't define this, ``copy.deepcopy(quantity)`` will
# return a bare Numpy array.
return self.copy()
def __reduce__(self):
# patch to pickle Quantity objects (ndarray subclasses), see
# http://www.mail-archive.com/[email protected]/msg02446.html
object_state = list(super().__reduce__())
object_state[2] = (object_state[2], self.__dict__)
return tuple(object_state)
def __setstate__(self, state):
# patch to unpickle Quantity objects (ndarray subclasses), see
# http://www.mail-archive.com/[email protected]/msg02446.html
nd_state, own_state = state
super().__setstate__(nd_state)
self.__dict__.update(own_state)
info = QuantityInfo()
def _to_value(self, unit, equivalencies=[]):
"""Helper method for to and to_value."""
if equivalencies == []:
equivalencies = self._equivalencies
return self.unit.to(unit, self.view(np.ndarray),
equivalencies=equivalencies)
def to(self, unit, equivalencies=[]):
"""
Return a new `~astropy.units.Quantity` object with the specified unit.
Parameters
----------
unit : `~astropy.units.UnitBase` instance, str
An object that represents the unit to convert to. Must be
an `~astropy.units.UnitBase` object or a string parseable
by the `~astropy.units` package.
equivalencies : list of equivalence pairs, optional
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`unit_equivalencies`.
If not provided or ``[]``, class default equivalencies will be used
(none for `~astropy.units.Quantity`, but may be set for subclasses)
If `None`, no equivalencies will be applied at all, not even any
set globally or within a context.
See also
--------
to_value : get the numerical value in a given unit.
"""
# We don't use `to_value` below since we always want to make a copy
# and don't want to slow down this method (esp. the scalar case).
unit = Unit(unit)
return self._new_view(self._to_value(unit, equivalencies), unit)
def to_value(self, unit=None, equivalencies=[]):
"""
The numerical value, possibly in a different unit.
Parameters
----------
unit : `~astropy.units.UnitBase` instance or str, optional
The unit in which the value should be given. If not given or `None`,
use the current unit.
equivalencies : list of equivalence pairs, optional
A list of equivalence pairs to try if the units are not directly
convertible (see :ref:`unit_equivalencies`). If not provided or
``[]``, class default equivalencies will be used (none for
`~astropy.units.Quantity`, but may be set for subclasses).
If `None`, no equivalencies will be applied at all, not even any
set globally or within a context.
Returns
-------
value : `~numpy.ndarray` or scalar
The value in the units specified. For arrays, this will be a view
of the data if no unit conversion was necessary.
See also
--------
to : Get a new instance in a different unit.
"""
if unit is None or unit is self.unit:
value = self.view(np.ndarray)
else:
unit = Unit(unit)
# We want a view if the unit does not change. One could check
# with "==", but that calculates the scale that we need anyway.
# TODO: would be better for `unit.to` to have an in-place flag.
try:
scale = self.unit._to(unit)
except Exception:
# Short-cut failed; try default (maybe equivalencies help).
value = self._to_value(unit, equivalencies)
else:
value = self.view(np.ndarray)
if not is_effectively_unity(scale):
# not in-place!
value = value * scale
return value if self.shape else (value[()] if self.dtype.fields
else value.item())
value = property(to_value,
doc="""The numerical value of this instance.
See also
--------
to_value : Get the numerical value in a given unit.
""")
@property
def unit(self):
"""
A `~astropy.units.UnitBase` object representing the unit of this
quantity.
"""
return self._unit
@property
def equivalencies(self):
"""
A list of equivalencies that will be applied by default during
unit conversions.
"""
return self._equivalencies
@property
def si(self):
"""
Returns a copy of the current `Quantity` instance with SI units. The
value of the resulting object will be scaled.
"""
si_unit = self.unit.si
return self._new_view(self.value * si_unit.scale,
si_unit / si_unit.scale)
@property
def cgs(self):
"""
Returns a copy of the current `Quantity` instance with CGS units. The
value of the resulting object will be scaled.
"""
cgs_unit = self.unit.cgs
return self._new_view(self.value * cgs_unit.scale,
cgs_unit / cgs_unit.scale)
@property
def isscalar(self):
"""
True if the `value` of this quantity is a scalar, or False if it
is an array-like object.
.. note::
This is subtly different from `numpy.isscalar` in that
`numpy.isscalar` returns False for a zero-dimensional array
(e.g. ``np.array(1)``), while this is True for quantities,
since quantities cannot represent true numpy scalars.
"""
return not self.shape
# This flag controls whether convenience conversion members, such
# as `q.m` equivalent to `q.to_value(u.m)` are available. This is
# not turned on on Quantity itself, but is on some subclasses of
# Quantity, such as `astropy.coordinates.Angle`.
_include_easy_conversion_members = False
@override__dir__
def __dir__(self):
"""
Quantities are able to directly convert to other units that
have the same physical type. This function is implemented in
order to make autocompletion still work correctly in IPython.
"""
if not self._include_easy_conversion_members:
return []
extra_members = set()
equivalencies = Unit._normalize_equivalencies(self.equivalencies)
for equivalent in self.unit._get_units_with_same_physical_type(
equivalencies):
extra_members.update(equivalent.names)
return extra_members
def __getattr__(self, attr):
"""
Quantities are able to directly convert to other units that
have the same physical type.
"""
if not self._include_easy_conversion_members:
raise AttributeError(
"'{0}' object has no '{1}' member".format(
self.__class__.__name__,
attr))
def get_virtual_unit_attribute():
registry = get_current_unit_registry().registry
to_unit = registry.get(attr, None)
if to_unit is None:
return None
try:
return self.unit.to(
to_unit, self.value, equivalencies=self.equivalencies)
except UnitsError:
return None
value = get_virtual_unit_attribute()
if value is None:
raise AttributeError(
"{0} instance has no attribute '{1}'".format(
self.__class__.__name__, attr))
else:
return value
# Equality (return False if units do not match) needs to be handled
# explicitly for numpy >=1.9, since it no longer traps errors.
def __eq__(self, other):
try:
try:
return super().__eq__(other)
except DeprecationWarning:
# We treat the DeprecationWarning separately, since it may
# mask another Exception. But we do not want to just use
# np.equal, since super's __eq__ treats recarrays correctly.
return np.equal(self, other)
except UnitsError:
return False
except TypeError:
return NotImplemented
def __ne__(self, other):
try:
try:
return super().__ne__(other)
except DeprecationWarning:
return np.not_equal(self, other)
except UnitsError:
return True
except TypeError:
return NotImplemented
# Unit conversion operator (<<).
def __lshift__(self, other):
try:
other = Unit(other, parse_strict='silent')
except UnitTypeError:
return NotImplemented
return self.__class__(self, other, copy=False, subok=True)
def __ilshift__(self, other):
try:
other = Unit(other, parse_strict='silent')
except UnitTypeError:
return NotImplemented
try:
factor = self.unit._to(other)
except UnitConversionError:
# Maybe via equivalencies? Now we do make a temporary copy.
try:
value = self._to_value(other)
except UnitConversionError:
return NotImplemented
self.view(np.ndarray)[...] = value
else:
self.view(np.ndarray)[...] *= factor
self._set_unit(other)
return self
def __rlshift__(self, other):
if not self.isscalar:
return NotImplemented
return Unit(self).__rlshift__(other)
# Give warning for other >> self, since probably other << self was meant.
def __rrshift__(self, other):
warnings.warn(">> is not implemented. Did you mean to convert "
"something to this quantity as a unit using '<<'?",
AstropyWarning)
return NotImplemented
# Also define __rshift__ and __irshift__ so we override default ndarray
# behaviour, but instead of emitting a warning here, let it be done by
# other (which likely is a unit if this was a mistake).
def __rshift__(self, other):
return NotImplemented
def __irshift__(self, other):
return NotImplemented
# Arithmetic operations
def __mul__(self, other):
""" Multiplication between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
try:
return self._new_view(self.copy(), other * self.unit)
except UnitsError: # let other try to deal with it
return NotImplemented
return super().__mul__(other)
def __imul__(self, other):
"""In-place multiplication between `Quantity` objects and others."""
if isinstance(other, (UnitBase, str)):
self._set_unit(other * self.unit)
return self
return super().__imul__(other)
def __rmul__(self, other):
""" Right Multiplication between `Quantity` objects and other
objects.
"""
return self.__mul__(other)
def __truediv__(self, other):
""" Division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
try:
return self._new_view(self.copy(), self.unit / other)
except UnitsError: # let other try to deal with it
return NotImplemented
return super().__truediv__(other)
def __itruediv__(self, other):
"""Inplace division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
self._set_unit(self.unit / other)
return self
return super().__itruediv__(other)
def __rtruediv__(self, other):
""" Right Division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
return self._new_view(1. / self.value, other / self.unit)
return super().__rtruediv__(other)
def __div__(self, other):
""" Division between `Quantity` objects. """
return self.__truediv__(other)
def __idiv__(self, other):
""" Division between `Quantity` objects. """
return self.__itruediv__(other)
def __rdiv__(self, other):
""" Division between `Quantity` objects. """
return self.__rtruediv__(other)
def __pow__(self, other):
if isinstance(other, Fraction):
# Avoid getting object arrays by raising the value to a Fraction.
return self._new_view(self.value ** float(other),
self.unit ** other)
return super().__pow__(other)
# For Py>=3.5
if NUMPY_LT_1_16:
def __matmul__(self, other):
result_unit = self.unit * getattr(other, 'unit',
dimensionless_unscaled)
result_array = np.matmul(self.value,
getattr(other, 'value', other))
return self._new_view(result_array, result_unit)
def __rmatmul__(self, other):
result_unit = self.unit * getattr(other, 'unit',
dimensionless_unscaled)
result_array = np.matmul(getattr(other, 'value', other),
self.value)
return self._new_view(result_array, result_unit)
# In numpy 1.13, 1.14, a np.positive ufunc exists, but ndarray.__pos__
# does not go through it, so we define it, to allow subclasses to override
# it inside __array_ufunc__. This can be removed if a solution to
# https://github.com/numpy/numpy/issues/9081 is merged.
def __pos__(self):
"""Plus the quantity."""
return np.positive(self)
# other overrides of special functions
def __hash__(self):
return hash(self.value) ^ hash(self.unit)
def __iter__(self):
if self.isscalar:
raise TypeError(
"'{cls}' object with a scalar value is not iterable"
.format(cls=self.__class__.__name__))
# Otherwise return a generator
def quantity_iter():
for val in self.value:
yield self._new_view(val)
return quantity_iter()
def __getitem__(self, key):
try:
out = super().__getitem__(key)
except IndexError:
# We want zero-dimensional Quantity objects to behave like scalars,
# so they should raise a TypeError rather than an IndexError.
if self.isscalar:
raise TypeError(
"'{cls}' object with a scalar value does not support "
"indexing".format(cls=self.__class__.__name__))
else:
raise
# For single elements, ndarray.__getitem__ returns scalars; these
# need a new view as a Quantity.
if type(out) is not type(self):
out = self._new_view(out)
return out
def __setitem__(self, i, value):
# update indices in info if the info property has been accessed
# (in which case 'info' in self.__dict__ is True; this is guaranteed
# to be the case if we're part of a table).
if not self.isscalar and 'info' in self.__dict__:
self.info.adjust_indices(i, value, len(self))
self.view(np.ndarray).__setitem__(i, self._to_own_unit(value))
# __contains__ is OK
def __bool__(self):
"""Quantities should always be treated as non-False; there is too much
potential for ambiguity otherwise.
"""
warnings.warn('The truth value of a Quantity is ambiguous. '
'In the future this will raise a ValueError.',
AstropyDeprecationWarning)
return True
def __len__(self):
if self.isscalar:
raise TypeError("'{cls}' object with a scalar value has no "
"len()".format(cls=self.__class__.__name__))
else:
return len(self.value)
# Numerical types
def __float__(self):
try:
return float(self.to_value(dimensionless_unscaled))
except (UnitsError, TypeError):
raise TypeError('only dimensionless scalar quantities can be '
'converted to Python scalars')
def __int__(self):
try:
return int(self.to_value(dimensionless_unscaled))
except (UnitsError, TypeError):
raise TypeError('only dimensionless scalar quantities can be '
'converted to Python scalars')
def __index__(self):
# for indices, we do not want to mess around with scaling at all,
# so unlike for float, int, we insist here on unscaled dimensionless
try:
assert self.unit.is_unity()
return self.value.__index__()
except Exception:
raise TypeError('only integer dimensionless scalar quantities '
'can be converted to a Python index')
# TODO: we may want to add a hook for dimensionless quantities?
@property
def _unitstr(self):
if self.unit is None:
unitstr = _UNIT_NOT_INITIALISED
else:
unitstr = str(self.unit)
if unitstr:
unitstr = ' ' + unitstr
return unitstr
def to_string(self, unit=None, precision=None, format=None, subfmt=None):
"""
Generate a string representation of the quantity and its unit.
The behavior of this function can be altered via the
`numpy.set_printoptions` function and its various keywords. The
exception to this is the ``threshold`` keyword, which is controlled via
the ``[units.quantity]`` configuration item ``latex_array_threshold``.
This is treated separately because the numpy default of 1000 is too big
for most browsers to handle.
Parameters
----------
unit : `~astropy.units.UnitBase`, optional
Specifies the unit. If not provided,
the unit used to initialize the quantity will be used.
precision : numeric, optional
The level of decimal precision. If `None`, or not provided,
it will be determined from NumPy print options.
format : str, optional
The format of the result. If not provided, an unadorned
string is returned. Supported values are:
- 'latex': Return a LaTeX-formatted string
subfmt : str, optional
Subformat of the result. For the moment,
only used for format="latex". Supported values are:
- 'inline': Use ``$ ... $`` as delimiters.
- 'display': Use ``$\\displaystyle ... $`` as delimiters.
Returns
-------
lstr
A string with the contents of this Quantity
"""
if unit is not None and unit != self.unit:
return self.to(unit).to_string(
unit=None, precision=precision, format=format, subfmt=subfmt)
formats = {
None: None,
"latex": {
None: ("$", "$"),
"inline": ("$", "$"),
"display": (r"$\displaystyle ", r"$"),
},
}
if format not in formats:
raise ValueError("Unknown format '{0}'".format(format))
elif format is None:
return '{0}{1:s}'.format(self.value, self._unitstr)
# else, for the moment we assume format="latex"
# need to do try/finally because "threshold" cannot be overridden
# with array2string
pops = np.get_printoptions()
format_spec = '.{}g'.format(
precision if precision is not None else pops['precision'])
def float_formatter(value):
return Latex.format_exponential_notation(value,
format_spec=format_spec)
def complex_formatter(value):
return '({0}{1}i)'.format(
Latex.format_exponential_notation(value.real,
format_spec=format_spec),
Latex.format_exponential_notation(value.imag,
format_spec='+' + format_spec))
try:
formatter = {'float_kind': float_formatter,
'complex_kind': complex_formatter}
if conf.latex_array_threshold > -1:
np.set_printoptions(threshold=conf.latex_array_threshold,
formatter=formatter)
# the view is needed for the scalar case - value might be float
if NUMPY_LT_1_14: # style deprecated in 1.14
latex_value = np.array2string(
self.view(np.ndarray),
style=(float_formatter if self.dtype.kind == 'f'
else complex_formatter if self.dtype.kind == 'c'
else repr),
max_line_width=np.inf, separator=',~')
else:
latex_value = np.array2string(
self.view(np.ndarray),
max_line_width=np.inf, separator=',~')
latex_value = latex_value.replace('...', r'\dots')
finally:
np.set_printoptions(**pops)
# Format unit
# [1:-1] strips the '$' on either side needed for math mode
latex_unit = (self.unit._repr_latex_()[1:-1] # note this is unicode
if self.unit is not None
else _UNIT_NOT_INITIALISED)
delimiter_left, delimiter_right = formats[format][subfmt]
return r'{left}{0} \; {1}{right}'.format(latex_value, latex_unit,
left=delimiter_left,
right=delimiter_right)
def __str__(self):
return self.to_string()
def __repr__(self):
prefixstr = '<' + self.__class__.__name__ + ' '
sep = ',' if NUMPY_LT_1_14 else ', '
arrstr = np.array2string(self.view(np.ndarray), separator=sep,
prefix=prefixstr)
return '{0}{1}{2:s}>'.format(prefixstr, arrstr, self._unitstr)
def _repr_latex_(self):
"""
Generate a latex representation of the quantity and its unit.
Returns
-------
lstr
A LaTeX string with the contents of this Quantity
"""
# NOTE: This should change to display format in a future release
return self.to_string(format='latex', subfmt='inline')
def __format__(self, format_spec):
"""
Format quantities using the new-style python formatting codes
as specifiers for the number.
If the format specifier correctly applies itself to the value,
then it is used to format only the value. If it cannot be
applied to the value, then it is applied to the whole string.
"""
try:
value = format(self.value, format_spec)
full_format_spec = "s"
except ValueError:
value = self.value
full_format_spec = format_spec
return format("{0}{1:s}".format(value, self._unitstr),
full_format_spec)
def decompose(self, bases=[]):
"""
Generates a new `Quantity` with the units
decomposed. Decomposed units have only irreducible units in
them (see `astropy.units.UnitBase.decompose`).
Parameters
----------
bases : sequence of UnitBase, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `~astropy.units.UnitsError` if it's not possible
to do so.
Returns
-------
newq : `~astropy.units.Quantity`
A new object equal to this quantity with units decomposed.
"""
return self._decompose(False, bases=bases)
def _decompose(self, allowscaledunits=False, bases=[]):
"""
Generates a new `Quantity` with the units decomposed. Decomposed
units have only irreducible units in them (see
`astropy.units.UnitBase.decompose`).
Parameters
----------
allowscaledunits : bool
If True, the resulting `Quantity` may have a scale factor
associated with it. If False, any scaling in the unit will
be subsumed into the value of the resulting `Quantity`
bases : sequence of UnitBase, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `~astropy.units.UnitsError` if it's not possible
to do so.
Returns
-------
newq : `~astropy.units.Quantity`
A new object equal to this quantity with units decomposed.
"""
new_unit = self.unit.decompose(bases=bases)
# Be careful here because self.value usually is a view of self;
# be sure that the original value is not being modified.
if not allowscaledunits and hasattr(new_unit, 'scale'):
new_value = self.value * new_unit.scale
new_unit = new_unit / new_unit.scale
return self._new_view(new_value, new_unit)
else:
return self._new_view(self.copy(), new_unit)
# These functions need to be overridden to take into account the units
# Array conversion
# http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html#array-conversion
def item(self, *args):
return self._new_view(super().item(*args))
def tolist(self):
raise NotImplementedError("cannot make a list of Quantities. Get "
"list of values with q.value.list()")
def _to_own_unit(self, value, check_precision=True):
try:
_value = value.to_value(self.unit)
except AttributeError:
# We're not a Quantity, so let's try a more general conversion.
# Plain arrays will be converted to dimensionless in the process,
# but anything with a unit attribute will use that.
as_quantity = Quantity(value)
try:
_value = as_quantity.to_value(self.unit)
except UnitsError:
# last chance: if this was not something with a unit
# and is all 0, inf, or nan, we treat it as arbitrary unit.
if (not hasattr(value, 'unit') and
can_have_arbitrary_unit(as_quantity.value)):
_value = as_quantity.value
else:
raise
if check_precision:
# If, e.g., we are casting double to float, we want to fail if
# precision is lost, but let things pass if it works.
_value = np.array(_value, copy=False)
if not np.can_cast(_value.dtype, self.dtype):
self_dtype_array = np.array(_value, self.dtype)
if not np.all(np.logical_or(self_dtype_array == _value,
np.isnan(_value))):
raise TypeError("cannot convert value type to array type "
"without precision loss")
return _value
def itemset(self, *args):
if len(args) == 0:
raise ValueError("itemset must have at least one argument")
self.view(np.ndarray).itemset(*(args[:-1] +
(self._to_own_unit(args[-1]),)))
def tostring(self, order='C'):
raise NotImplementedError("cannot write Quantities to string. Write "
"array with q.value.tostring(...).")
def tofile(self, fid, sep="", format="%s"):
raise NotImplementedError("cannot write Quantities to file. Write "
"array with q.value.tofile(...)")
def dump(self, file):
raise NotImplementedError("cannot dump Quantities to file. Write "
"array with q.value.dump()")
def dumps(self):
raise NotImplementedError("cannot dump Quantities to string. Write "
"array with q.value.dumps()")
# astype, byteswap, copy, view, getfield, setflags OK as is
def fill(self, value):
self.view(np.ndarray).fill(self._to_own_unit(value))
# Shape manipulation: resize cannot be done (does not own data), but
# shape, transpose, swapaxes, flatten, ravel, squeeze all OK. Only
# the flat iterator needs to be overwritten, otherwise single items are
# returned as numbers.
@property
def flat(self):
"""A 1-D iterator over the Quantity array.
This returns a ``QuantityIterator`` instance, which behaves the same
as the `~numpy.flatiter` instance returned by `~numpy.ndarray.flat`,
and is similar to, but not a subclass of, Python's built-in iterator
object.
"""
return QuantityIterator(self)
@flat.setter
def flat(self, value):
y = self.ravel()
y[:] = value
# Item selection and manipulation
# repeat, sort, compress, diagonal OK
def take(self, indices, axis=None, out=None, mode='raise'):
out = super().take(indices, axis=axis, out=out, mode=mode)
# For single elements, ndarray.take returns scalars; these
# need a new view as a Quantity.
if type(out) is not type(self):
out = self._new_view(out)
return out
def put(self, indices, values, mode='raise'):
self.view(np.ndarray).put(indices, self._to_own_unit(values), mode)
def choose(self, choices, out=None, mode='raise'):
raise NotImplementedError("cannot choose based on quantity. Choose "
"using array with q.value.choose(...)")
# ensure we do not return indices as quantities
def argsort(self, axis=-1, kind='quicksort', order=None):
return self.view(np.ndarray).argsort(axis=axis, kind=kind, order=order)
def searchsorted(self, v, *args, **kwargs):
return np.searchsorted(np.array(self),
self._to_own_unit(v, check_precision=False),
*args, **kwargs) # avoid numpy 1.6 problem
def argmax(self, axis=None, out=None):
return self.view(np.ndarray).argmax(axis, out=out)
def argmin(self, axis=None, out=None):
return self.view(np.ndarray).argmin(axis, out=out)
# Calculation -- override ndarray methods to take into account units.
# We use the corresponding numpy functions to evaluate the results, since
# the methods do not always allow calling with keyword arguments.
# For instance, np.array([0.,2.]).clip(a_min=0., a_max=1.) gives
# TypeError: 'a_max' is an invalid keyword argument for this function.
def _wrap_function(self, function, *args, unit=None, out=None, **kwargs):
"""Wrap a numpy function that processes self, returning a Quantity.
Parameters
----------
function : callable
Numpy function to wrap.
args : positional arguments
Any positional arguments to the function beyond the first argument
(which will be set to ``self``).
kwargs : keyword arguments
Keyword arguments to the function.
If present, the following arguments are treated specially:
unit : `~astropy.units.Unit`
Unit of the output result. If not given, the unit of ``self``.
out : `~astropy.units.Quantity`
A Quantity instance in which to store the output.
Notes
-----
Output should always be assigned via a keyword argument, otherwise
no proper account of the unit is taken.
Returns
-------
out : `~astropy.units.Quantity`
Result of the function call, with the unit set properly.
"""
if unit is None:
unit = self.unit
# Ensure we don't loop back by turning any Quantity into array views.
args = (self.value,) + tuple((arg.value if isinstance(arg, Quantity)
else arg) for arg in args)
if out is not None:
# If pre-allocated output is used, check it is suitable.
# This also returns array view, to ensure we don't loop back.
arrays = tuple(arg for arg in args if isinstance(arg, np.ndarray))
kwargs['out'] = check_output(out, unit, arrays, function=function)
# Apply the function and turn it back into a Quantity.
result = function(*args, **kwargs)
return self._result_as_quantity(result, unit, out)
def clip(self, a_min, a_max, out=None):
return self._wrap_function(np.clip, self._to_own_unit(a_min),
self._to_own_unit(a_max), out=out)
def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None):
return self._wrap_function(np.trace, offset, axis1, axis2, dtype,
out=out)
def var(self, axis=None, dtype=None, out=None, ddof=0):
return self._wrap_function(np.var, axis, dtype,
out=out, ddof=ddof, unit=self.unit**2)
def std(self, axis=None, dtype=None, out=None, ddof=0):
return self._wrap_function(np.std, axis, dtype, out=out, ddof=ddof)
def mean(self, axis=None, dtype=None, out=None):
return self._wrap_function(np.mean, axis, dtype, out=out)
def round(self, decimals=0, out=None):
return self._wrap_function(np.round, decimals, out=out)
def dot(self, b, out=None):
result_unit = self.unit * getattr(b, 'unit', dimensionless_unscaled)
return self._wrap_function(np.dot, b, out=out, unit=result_unit)
# Calculation: override methods that do not make sense.
def all(self, axis=None, out=None):
raise NotImplementedError("cannot evaluate truth value of quantities. "
"Evaluate array with q.value.all(...)")
def any(self, axis=None, out=None):
raise NotImplementedError("cannot evaluate truth value of quantities. "
"Evaluate array with q.value.any(...)")
# Calculation: numpy functions that can be overridden with methods.
def diff(self, n=1, axis=-1):
return self._wrap_function(np.diff, n, axis)
def ediff1d(self, to_end=None, to_begin=None):
return self._wrap_function(np.ediff1d, to_end, to_begin)
def nansum(self, axis=None, out=None, keepdims=False):
return self._wrap_function(np.nansum, axis,
out=out, keepdims=keepdims)
def insert(self, obj, values, axis=None):
"""
Insert values along the given axis before the given indices and return
a new `~astropy.units.Quantity` object.
This is a thin wrapper around the `numpy.insert` function.
Parameters
----------
obj : int, slice or sequence of ints
Object that defines the index or indices before which ``values`` is
inserted.
values : array-like
Values to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
``values`` should be shaped so that it can be broadcast appropriately
The unit of ``values`` must be consistent with this quantity.
axis : int, optional
Axis along which to insert ``values``. If ``axis`` is None then
the quantity array is flattened before insertion.
Returns
-------
out : `~astropy.units.Quantity`
A copy of quantity with ``values`` inserted. Note that the
insertion does not occur in-place: a new quantity array is returned.
Examples
--------
>>> import astropy.units as u
>>> q = [1, 2] * u.m
>>> q.insert(0, 50 * u.cm)
<Quantity [ 0.5, 1., 2.] m>
>>> q = [[1, 2], [3, 4]] * u.m
>>> q.insert(1, [10, 20] * u.m, axis=0)
<Quantity [[ 1., 2.],
[ 10., 20.],
[ 3., 4.]] m>
>>> q.insert(1, 10 * u.m, axis=1)
<Quantity [[ 1., 10., 2.],
[ 3., 10., 4.]] m>
"""
out_array = np.insert(self.value, obj, self._to_own_unit(values), axis)
return self._new_view(out_array)
class SpecificTypeQuantity(Quantity):
"""Superclass for Quantities of specific physical type.
Subclasses of these work just like :class:`~astropy.units.Quantity`, except
that they are for specific physical types (and may have methods that are
only appropriate for that type). Astropy examples are
:class:`~astropy.coordinates.Angle` and
:class:`~astropy.coordinates.Distance`
At a minimum, subclasses should set ``_equivalent_unit`` to the unit
associated with the physical type.
"""
# The unit for the specific physical type. Instances can only be created
# with units that are equivalent to this.
_equivalent_unit = None
# The default unit used for views. Even with `None`, views of arrays
# without units are possible, but will have an uninitalized unit.
_unit = None
# Default unit for initialization through the constructor.
_default_unit = None
# ensure that we get precedence over our superclass.
__array_priority__ = Quantity.__array_priority__ + 10
def __quantity_subclass__(self, unit):
if unit.is_equivalent(self._equivalent_unit):
return type(self), True
else:
return super().__quantity_subclass__(unit)[0], False
def _set_unit(self, unit):
if unit is None or not unit.is_equivalent(self._equivalent_unit):
raise UnitTypeError(
"{0} instances require units equivalent to '{1}'"
.format(type(self).__name__, self._equivalent_unit) +
(", but no unit was given." if unit is None else
", so cannot set it to '{0}'.".format(unit)))
super()._set_unit(unit)
def isclose(a, b, rtol=1.e-5, atol=None, **kwargs):
"""
Notes
-----
Returns True if two arrays are element-wise equal within a tolerance.
This is a :class:`~astropy.units.Quantity`-aware version of
:func:`numpy.isclose`.
"""
return np.isclose(*_unquantify_allclose_arguments(a, b, rtol, atol),
**kwargs)
def allclose(a, b, rtol=1.e-5, atol=None, **kwargs):
"""
Notes
-----
Returns True if two arrays are element-wise equal within a tolerance.
This is a :class:`~astropy.units.Quantity`-aware version of
:func:`numpy.allclose`.
"""
return np.allclose(*_unquantify_allclose_arguments(a, b, rtol, atol),
**kwargs)
def _unquantify_allclose_arguments(actual, desired, rtol, atol):
actual = Quantity(actual, subok=True, copy=False)
desired = Quantity(desired, subok=True, copy=False)
try:
desired = desired.to(actual.unit)
except UnitsError:
raise UnitsError("Units for 'desired' ({0}) and 'actual' ({1}) "
"are not convertible"
.format(desired.unit, actual.unit))
if atol is None:
# by default, we assume an absolute tolerance of 0
atol = Quantity(0)
else:
atol = Quantity(atol, subok=True, copy=False)
try:
atol = atol.to(actual.unit)
except UnitsError:
raise UnitsError("Units for 'atol' ({0}) and 'actual' ({1}) "
"are not convertible"
.format(atol.unit, actual.unit))
rtol = Quantity(rtol, subok=True, copy=False)
try:
rtol = rtol.to(dimensionless_unscaled)
except Exception:
raise UnitsError("`rtol` should be dimensionless")
return actual.value, desired.value, rtol.value, atol.value
|
94fb4b2b1c17694b8ac4c2afcafad551bda002ea76aba5e349a02b5431179522 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package defines the SI units. They are also available in the
`astropy.units` namespace.
"""
from astropy.constants import si as _si
from .core import UnitBase, Unit, def_unit
import numpy as _numpy
_ns = globals()
###########################################################################
# DIMENSIONLESS
def_unit(['percent', 'pct'], Unit(0.01), namespace=_ns, prefixes=False,
doc="percent: one hundredth of unity, factor 0.01",
format={'generic': '%', 'console': '%', 'cds': '%',
'latex': r'\%', 'unicode': '%'})
###########################################################################
# LENGTH
def_unit(['m', 'meter'], namespace=_ns, prefixes=True,
doc="meter: base unit of length in SI")
def_unit(['micron'], um, namespace=_ns,
doc="micron: alias for micrometer (um)",
format={'latex': r'\mu m', 'unicode': 'μm'})
def_unit(['Angstrom', 'AA', 'angstrom'], 0.1 * nm, namespace=_ns,
doc="ångström: 10 ** -10 m",
format={'latex': r'\mathring{A}', 'unicode': 'Å',
'vounit': 'Angstrom'})
###########################################################################
# VOLUMES
def_unit((['l', 'L'], ['liter']), 1000 * cm ** 3.0, namespace=_ns, prefixes=True,
format={'latex': r'\mathcal{l}', 'unicode': 'ℓ'},
doc="liter: metric unit of volume")
###########################################################################
# ANGULAR MEASUREMENTS
def_unit(['rad', 'radian'], namespace=_ns, prefixes=True,
doc="radian: angular measurement of the ratio between the length "
"on an arc and its radius")
def_unit(['deg', 'degree'], _numpy.pi / 180.0 * rad, namespace=_ns,
prefixes=True,
doc="degree: angular measurement 1/360 of full rotation",
format={'latex': r'{}^{\circ}', 'unicode': '°'})
def_unit(['hourangle'], 15.0 * deg, namespace=_ns, prefixes=False,
doc="hour angle: angular measurement with 24 in a full circle",
format={'latex': r'{}^{h}', 'unicode': 'ʰ'})
def_unit(['arcmin', 'arcminute'], 1.0 / 60.0 * deg, namespace=_ns,
prefixes=True,
doc="arc minute: angular measurement",
format={'latex': r'{}^{\prime}', 'unicode': '′'})
def_unit(['arcsec', 'arcsecond'], 1.0 / 3600.0 * deg, namespace=_ns,
prefixes=True,
doc="arc second: angular measurement")
# These special formats should only be used for the non-prefix versions
arcsec._format = {'latex': r'{}^{\prime\prime}', 'unicode': '″'}
def_unit(['mas'], 0.001 * arcsec, namespace=_ns,
doc="milli arc second: angular measurement")
def_unit(['uas'], 0.000001 * arcsec, namespace=_ns,
doc="micro arc second: angular measurement",
format={'latex': r'\mu as', 'unicode': 'μas'})
def_unit(['sr', 'steradian'], rad ** 2, namespace=_ns, prefixes=True,
doc="steradian: unit of solid angle in SI")
###########################################################################
# TIME
def_unit(['s', 'second'], namespace=_ns, prefixes=True,
exclude_prefixes=['a'],
doc="second: base unit of time in SI.")
def_unit(['min', 'minute'], 60 * s, prefixes=True, namespace=_ns)
def_unit(['h', 'hour', 'hr'], 3600 * s, namespace=_ns, prefixes=True,
exclude_prefixes=['p'])
def_unit(['d', 'day'], 24 * h, namespace=_ns, prefixes=True,
exclude_prefixes=['c', 'y'])
def_unit(['sday'], 86164.09053 * s, namespace=_ns,
doc="Sidereal day (sday) is the time of one rotation of the Earth.")
def_unit(['wk', 'week'], 7 * day, namespace=_ns)
def_unit(['fortnight'], 2 * wk, namespace=_ns)
def_unit(['a', 'annum'], 365.25 * d, namespace=_ns, prefixes=True,
exclude_prefixes=['P'])
def_unit(['yr', 'year'], 365.25 * d, namespace=_ns, prefixes=True)
###########################################################################
# FREQUENCY
def_unit(['Hz', 'Hertz', 'hertz'], 1 / s, namespace=_ns, prefixes=True,
doc="Frequency")
###########################################################################
# MASS
def_unit(['kg', 'kilogram'], namespace=_ns,
doc="kilogram: base unit of mass in SI.")
def_unit(['g', 'gram'], 1.0e-3 * kg, namespace=_ns, prefixes=True,
exclude_prefixes=['k', 'kilo'])
def_unit(['t', 'tonne'], 1000 * kg, namespace=_ns,
doc="Metric tonne")
###########################################################################
# AMOUNT OF SUBSTANCE
def_unit(['mol', 'mole'], namespace=_ns, prefixes=True,
doc="mole: amount of a chemical substance in SI.")
###########################################################################
# TEMPERATURE
def_unit(
['K', 'Kelvin'], namespace=_ns, prefixes=True,
doc="Kelvin: temperature with a null point at absolute zero.")
def_unit(
['deg_C', 'Celsius'], namespace=_ns, doc='Degrees Celsius',
format={'latex': r'{}^{\circ}C', 'unicode': '°C'})
###########################################################################
# FORCE
def_unit(['N', 'Newton', 'newton'], kg * m * s ** -2, namespace=_ns,
prefixes=True, doc="Newton: force")
##########################################################################
# ENERGY
def_unit(['J', 'Joule', 'joule'], N * m, namespace=_ns, prefixes=True,
doc="Joule: energy")
def_unit(['eV', 'electronvolt'], _si.e.value * J, namespace=_ns, prefixes=True,
doc="Electron Volt")
##########################################################################
# PRESSURE
def_unit(['Pa', 'Pascal', 'pascal'], J * m ** -3, namespace=_ns, prefixes=True,
doc="Pascal: pressure")
def_unit(['bar'], 1e5 * Pa, namespace=_ns,
prefixes=[(['m'], ['milli'], 1.e-3)],
doc="bar: pressure")
###########################################################################
# POWER
def_unit(['W', 'Watt', 'watt'], J / s, namespace=_ns, prefixes=True,
doc="Watt: power")
###########################################################################
# ELECTRICAL
def_unit(['A', 'ampere', 'amp'], namespace=_ns, prefixes=True,
doc="ampere: base unit of electric current in SI")
def_unit(['C', 'coulomb'], A * s, namespace=_ns, prefixes=True,
doc="coulomb: electric charge")
def_unit(['V', 'Volt', 'volt'], J * C ** -1, namespace=_ns, prefixes=True,
doc="Volt: electric potential or electromotive force")
def_unit((['Ohm', 'ohm'], ['Ohm']), V * A ** -1, namespace=_ns, prefixes=True,
doc="Ohm: electrical resistance",
format={'latex': r'\Omega', 'unicode': 'Ω'})
def_unit(['S', 'Siemens', 'siemens'], A * V ** -1, namespace=_ns,
prefixes=True, doc="Siemens: electrical conductance")
def_unit(['F', 'Farad', 'farad'], C * V ** -1, namespace=_ns, prefixes=True,
doc="Farad: electrical capacitance")
###########################################################################
# MAGNETIC
def_unit(['Wb', 'Weber', 'weber'], V * s, namespace=_ns, prefixes=True,
doc="Weber: magnetic flux")
def_unit(['T', 'Tesla', 'tesla'], Wb * m ** -2, namespace=_ns, prefixes=True,
doc="Tesla: magnetic flux density")
def_unit(['H', 'Henry', 'henry'], Wb * A ** -1, namespace=_ns, prefixes=True,
doc="Henry: inductance")
###########################################################################
# ILLUMINATION
def_unit(['cd', 'candela'], namespace=_ns, prefixes=True,
doc="candela: base unit of luminous intensity in SI")
def_unit(['lm', 'lumen'], cd * sr, namespace=_ns, prefixes=True,
doc="lumen: luminous flux")
def_unit(['lx', 'lux'], lm * m ** -2, namespace=_ns, prefixes=True,
doc="lux: luminous emittance")
###########################################################################
# RADIOACTIVITY
def_unit(['Bq', 'becquerel'], Hz, namespace=_ns, prefixes=False,
doc="becquerel: unit of radioactivity")
def_unit(['Ci', 'curie'], Bq * 3.7e10, namespace=_ns, prefixes=False,
doc="curie: unit of radioactivity")
###########################################################################
# BASES
bases = set([m, s, kg, A, cd, rad, K, mol])
###########################################################################
# CLEANUP
del UnitBase
del Unit
del def_unit
###########################################################################
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import generate_unit_summary as _generate_unit_summary
if __doc__ is not None:
__doc__ += _generate_unit_summary(globals())
|
7ab4b1ed2cda7ce8f24b0529f765799a91f51b9a6252e2b547d71dc78f4b0909 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""A set of standard astronomical equivalencies."""
from collections import UserList
# THIRD-PARTY
import numpy as np
import warnings
# LOCAL
from astropy.constants import si as _si
from astropy.utils.misc import isiterable
from . import si
from . import cgs
from . import astrophys
from .function import units as function_units
from . import dimensionless_unscaled
from .core import UnitsError, Unit
__all__ = ['parallax', 'spectral', 'spectral_density', 'doppler_radio',
'doppler_optical', 'doppler_relativistic', 'mass_energy',
'brightness_temperature', 'thermodynamic_temperature',
'beam_angular_area', 'dimensionless_angles', 'logarithmic',
'temperature', 'temperature_energy', 'molar_mass_amu',
'pixel_scale', 'plate_scale', 'with_H0']
class Equivalency(UserList):
"""
A container for a units equivalency.
Attributes
----------
name: `str`
The name of the equivalency.
kwargs: `dict`
Any positional or keyword arguments used to make the equivalency.
"""
def __init__(self, equiv_list, name='', kwargs=None):
self.data = equiv_list
self.name = [name]
self.kwargs = [kwargs] if kwargs is not None else [dict()]
def __add__(self, other):
if isinstance(other, Equivalency):
new = super().__add__(other)
new.name = self.name[:] + other.name
new.kwargs = self.kwargs[:] + other.kwargs
return new
else:
return self.data.__add__(other)
def __eq__(self, other):
return (isinstance(other, self.__class__) and
self.name == other.name and
self.kwargs == other.kwargs)
def dimensionless_angles():
"""Allow angles to be equivalent to dimensionless (with 1 rad = 1 m/m = 1).
It is special compared to other equivalency pairs in that it
allows this independent of the power to which the angle is raised,
and independent of whether it is part of a more complicated unit.
"""
return Equivalency([(si.radian, None)], "dimensionless_angles")
def logarithmic():
"""Allow logarithmic units to be converted to dimensionless fractions"""
return Equivalency([
(dimensionless_unscaled, function_units.dex,
np.log10, lambda x: 10.**x)
], "logarithmic")
def parallax():
"""
Returns a list of equivalence pairs that handle the conversion
between parallax angle and distance.
"""
def parallax_converter(x):
x = np.asanyarray(x)
d = 1 / x
if isiterable(d):
d[d < 0] = np.nan
return d
else:
if d < 0:
return np.array(np.nan)
else:
return d
return Equivalency([
(si.arcsecond, astrophys.parsec, parallax_converter)
], "parallax")
def spectral():
"""
Returns a list of equivalence pairs that handle spectral
wavelength, wave number, frequency, and energy equivalences.
Allows conversions between wavelength units, wave number units,
frequency units, and energy units as they relate to light.
There are two types of wave number:
* spectroscopic - :math:`1 / \\lambda` (per meter)
* angular - :math:`2 \\pi / \\lambda` (radian per meter)
"""
hc = _si.h.value * _si.c.value
two_pi = 2.0 * np.pi
inv_m_spec = si.m ** -1
inv_m_ang = si.radian / si.m
return Equivalency([
(si.m, si.Hz, lambda x: _si.c.value / x),
(si.m, si.J, lambda x: hc / x),
(si.Hz, si.J, lambda x: _si.h.value * x, lambda x: x / _si.h.value),
(si.m, inv_m_spec, lambda x: 1.0 / x),
(si.Hz, inv_m_spec, lambda x: x / _si.c.value,
lambda x: _si.c.value * x),
(si.J, inv_m_spec, lambda x: x / hc, lambda x: hc * x),
(inv_m_spec, inv_m_ang, lambda x: x * two_pi, lambda x: x / two_pi),
(si.m, inv_m_ang, lambda x: two_pi / x),
(si.Hz, inv_m_ang, lambda x: two_pi * x / _si.c.value,
lambda x: _si.c.value * x / two_pi),
(si.J, inv_m_ang, lambda x: x * two_pi / hc, lambda x: hc * x / two_pi)
], "spectral")
def spectral_density(wav, factor=None):
"""
Returns a list of equivalence pairs that handle spectral density
with regard to wavelength and frequency.
Parameters
----------
wav : `~astropy.units.Quantity`
`~astropy.units.Quantity` associated with values being converted
(e.g., wavelength or frequency).
Notes
-----
The ``factor`` argument is left for backward-compatibility with the syntax
``spectral_density(unit, factor)`` but users are encouraged to use
``spectral_density(factor * unit)`` instead.
"""
from .core import UnitBase
if isinstance(wav, UnitBase):
if factor is None:
raise ValueError(
'If `wav` is specified as a unit, `factor` should be set')
wav = factor * wav # Convert to Quantity
c_Aps = _si.c.to_value(si.AA / si.s) # Angstrom/s
h_cgs = _si.h.cgs.value # erg * s
hc = c_Aps * h_cgs
# flux density
f_la = cgs.erg / si.angstrom / si.cm ** 2 / si.s
f_nu = cgs.erg / si.Hz / si.cm ** 2 / si.s
nu_f_nu = cgs.erg / si.cm ** 2 / si.s
la_f_la = nu_f_nu
phot_f_la = astrophys.photon / (si.cm ** 2 * si.s * si.AA)
phot_f_nu = astrophys.photon / (si.cm ** 2 * si.s * si.Hz)
# luminosity density
L_nu = cgs.erg / si.s / si.Hz
L_la = cgs.erg / si.s / si.angstrom
nu_L_nu = cgs.erg / si.s
la_L_la = nu_L_nu
phot_L_la = astrophys.photon / (si.s * si.AA)
phot_L_nu = astrophys.photon / (si.s * si.Hz)
def converter(x):
return x * (wav.to_value(si.AA, spectral()) ** 2 / c_Aps)
def iconverter(x):
return x / (wav.to_value(si.AA, spectral()) ** 2 / c_Aps)
def converter_f_nu_to_nu_f_nu(x):
return x * wav.to_value(si.Hz, spectral())
def iconverter_f_nu_to_nu_f_nu(x):
return x / wav.to_value(si.Hz, spectral())
def converter_f_la_to_la_f_la(x):
return x * wav.to_value(si.AA, spectral())
def iconverter_f_la_to_la_f_la(x):
return x / wav.to_value(si.AA, spectral())
def converter_phot_f_la_to_f_la(x):
return hc * x / wav.to_value(si.AA, spectral())
def iconverter_phot_f_la_to_f_la(x):
return x * wav.to_value(si.AA, spectral()) / hc
def converter_phot_f_la_to_f_nu(x):
return h_cgs * x * wav.to_value(si.AA, spectral())
def iconverter_phot_f_la_to_f_nu(x):
return x / (wav.to_value(si.AA, spectral()) * h_cgs)
def converter_phot_f_la_phot_f_nu(x):
return x * wav.to_value(si.AA, spectral()) ** 2 / c_Aps
def iconverter_phot_f_la_phot_f_nu(x):
return c_Aps * x / wav.to_value(si.AA, spectral()) ** 2
converter_phot_f_nu_to_f_nu = converter_phot_f_la_to_f_la
iconverter_phot_f_nu_to_f_nu = iconverter_phot_f_la_to_f_la
def converter_phot_f_nu_to_f_la(x):
return x * hc * c_Aps / wav.to_value(si.AA, spectral()) ** 3
def iconverter_phot_f_nu_to_f_la(x):
return x * wav.to_value(si.AA, spectral()) ** 3 / (hc * c_Aps)
# for luminosity density
converter_L_nu_to_nu_L_nu = converter_f_nu_to_nu_f_nu
iconverter_L_nu_to_nu_L_nu = iconverter_f_nu_to_nu_f_nu
converter_L_la_to_la_L_la = converter_f_la_to_la_f_la
iconverter_L_la_to_la_L_la = iconverter_f_la_to_la_f_la
converter_phot_L_la_to_L_la = converter_phot_f_la_to_f_la
iconverter_phot_L_la_to_L_la = iconverter_phot_f_la_to_f_la
converter_phot_L_la_to_L_nu = converter_phot_f_la_to_f_nu
iconverter_phot_L_la_to_L_nu = iconverter_phot_f_la_to_f_nu
converter_phot_L_la_phot_L_nu = converter_phot_f_la_phot_f_nu
iconverter_phot_L_la_phot_L_nu = iconverter_phot_f_la_phot_f_nu
converter_phot_L_nu_to_L_nu = converter_phot_f_nu_to_f_nu
iconverter_phot_L_nu_to_L_nu = iconverter_phot_f_nu_to_f_nu
converter_phot_L_nu_to_L_la = converter_phot_f_nu_to_f_la
iconverter_phot_L_nu_to_L_la = iconverter_phot_f_nu_to_f_la
return Equivalency([
# flux
(f_la, f_nu, converter, iconverter),
(f_nu, nu_f_nu, converter_f_nu_to_nu_f_nu, iconverter_f_nu_to_nu_f_nu),
(f_la, la_f_la, converter_f_la_to_la_f_la, iconverter_f_la_to_la_f_la),
(phot_f_la, f_la, converter_phot_f_la_to_f_la, iconverter_phot_f_la_to_f_la),
(phot_f_la, f_nu, converter_phot_f_la_to_f_nu, iconverter_phot_f_la_to_f_nu),
(phot_f_la, phot_f_nu, converter_phot_f_la_phot_f_nu, iconverter_phot_f_la_phot_f_nu),
(phot_f_nu, f_nu, converter_phot_f_nu_to_f_nu, iconverter_phot_f_nu_to_f_nu),
(phot_f_nu, f_la, converter_phot_f_nu_to_f_la, iconverter_phot_f_nu_to_f_la),
# luminosity
(L_la, L_nu, converter, iconverter),
(L_nu, nu_L_nu, converter_L_nu_to_nu_L_nu, iconverter_L_nu_to_nu_L_nu),
(L_la, la_L_la, converter_L_la_to_la_L_la, iconverter_L_la_to_la_L_la),
(phot_L_la, L_la, converter_phot_L_la_to_L_la, iconverter_phot_L_la_to_L_la),
(phot_L_la, L_nu, converter_phot_L_la_to_L_nu, iconverter_phot_L_la_to_L_nu),
(phot_L_la, phot_L_nu, converter_phot_L_la_phot_L_nu, iconverter_phot_L_la_phot_L_nu),
(phot_L_nu, L_nu, converter_phot_L_nu_to_L_nu, iconverter_phot_L_nu_to_L_nu),
(phot_L_nu, L_la, converter_phot_L_nu_to_L_la, iconverter_phot_L_nu_to_L_la),
], "spectral_density", {'wav': wav, 'factor': factor})
def doppler_radio(rest):
r"""
Return the equivalency pairs for the radio convention for velocity.
The radio convention for the relation between velocity and frequency is:
:math:`V = c \frac{f_0 - f}{f_0} ; f(V) = f_0 ( 1 - V/c )`
Parameters
----------
rest : `~astropy.units.Quantity`
Any quantity supported by the standard spectral equivalencies
(wavelength, energy, frequency, wave number).
References
----------
`NRAO site defining the conventions <http://www.gb.nrao.edu/~fghigo/gbtdoc/doppler.html>`_
Examples
--------
>>> import astropy.units as u
>>> CO_restfreq = 115.27120*u.GHz # rest frequency of 12 CO 1-0 in GHz
>>> radio_CO_equiv = u.doppler_radio(CO_restfreq)
>>> measured_freq = 115.2832*u.GHz
>>> radio_velocity = measured_freq.to(u.km/u.s, equivalencies=radio_CO_equiv)
>>> radio_velocity # doctest: +FLOAT_CMP
<Quantity -31.209092088877583 km / s>
"""
assert_is_spectral_unit(rest)
ckms = _si.c.to_value('km/s')
def to_vel_freq(x):
restfreq = rest.to_value(si.Hz, equivalencies=spectral())
return (restfreq-x) / (restfreq) * ckms
def from_vel_freq(x):
restfreq = rest.to_value(si.Hz, equivalencies=spectral())
voverc = x/ckms
return restfreq * (1-voverc)
def to_vel_wav(x):
restwav = rest.to_value(si.AA, spectral())
return (x-restwav) / (x) * ckms
def from_vel_wav(x):
restwav = rest.to_value(si.AA, spectral())
return restwav * ckms / (ckms-x)
def to_vel_en(x):
resten = rest.to_value(si.eV, equivalencies=spectral())
return (resten-x) / (resten) * ckms
def from_vel_en(x):
resten = rest.to_value(si.eV, equivalencies=spectral())
voverc = x/ckms
return resten * (1-voverc)
return Equivalency([(si.Hz, si.km/si.s, to_vel_freq, from_vel_freq),
(si.AA, si.km/si.s, to_vel_wav, from_vel_wav),
(si.eV, si.km/si.s, to_vel_en, from_vel_en),
], "doppler_radio", {'rest': rest})
def doppler_optical(rest):
r"""
Return the equivalency pairs for the optical convention for velocity.
The optical convention for the relation between velocity and frequency is:
:math:`V = c \frac{f_0 - f}{f } ; f(V) = f_0 ( 1 + V/c )^{-1}`
Parameters
----------
rest : `~astropy.units.Quantity`
Any quantity supported by the standard spectral equivalencies
(wavelength, energy, frequency, wave number).
References
----------
`NRAO site defining the conventions <http://www.gb.nrao.edu/~fghigo/gbtdoc/doppler.html>`_
Examples
--------
>>> import astropy.units as u
>>> CO_restfreq = 115.27120*u.GHz # rest frequency of 12 CO 1-0 in GHz
>>> optical_CO_equiv = u.doppler_optical(CO_restfreq)
>>> measured_freq = 115.2832*u.GHz
>>> optical_velocity = measured_freq.to(u.km/u.s, equivalencies=optical_CO_equiv)
>>> optical_velocity # doctest: +FLOAT_CMP
<Quantity -31.20584348799674 km / s>
"""
assert_is_spectral_unit(rest)
ckms = _si.c.to_value('km/s')
def to_vel_freq(x):
restfreq = rest.to_value(si.Hz, equivalencies=spectral())
return ckms * (restfreq-x) / x
def from_vel_freq(x):
restfreq = rest.to_value(si.Hz, equivalencies=spectral())
voverc = x/ckms
return restfreq / (1+voverc)
def to_vel_wav(x):
restwav = rest.to_value(si.AA, spectral())
return ckms * (x/restwav-1)
def from_vel_wav(x):
restwav = rest.to_value(si.AA, spectral())
voverc = x/ckms
return restwav * (1+voverc)
def to_vel_en(x):
resten = rest.to_value(si.eV, equivalencies=spectral())
return ckms * (resten-x) / x
def from_vel_en(x):
resten = rest.to_value(si.eV, equivalencies=spectral())
voverc = x/ckms
return resten / (1+voverc)
return Equivalency([(si.Hz, si.km/si.s, to_vel_freq, from_vel_freq),
(si.AA, si.km/si.s, to_vel_wav, from_vel_wav),
(si.eV, si.km/si.s, to_vel_en, from_vel_en),
], "doppler_optical", {'rest': rest})
def doppler_relativistic(rest):
r"""
Return the equivalency pairs for the relativistic convention for velocity.
The full relativistic convention for the relation between velocity and frequency is:
:math:`V = c \frac{f_0^2 - f^2}{f_0^2 + f^2} ; f(V) = f_0 \frac{\left(1 - (V/c)^2\right)^{1/2}}{(1+V/c)}`
Parameters
----------
rest : `~astropy.units.Quantity`
Any quantity supported by the standard spectral equivalencies
(wavelength, energy, frequency, wave number).
References
----------
`NRAO site defining the conventions <http://www.gb.nrao.edu/~fghigo/gbtdoc/doppler.html>`_
Examples
--------
>>> import astropy.units as u
>>> CO_restfreq = 115.27120*u.GHz # rest frequency of 12 CO 1-0 in GHz
>>> relativistic_CO_equiv = u.doppler_relativistic(CO_restfreq)
>>> measured_freq = 115.2832*u.GHz
>>> relativistic_velocity = measured_freq.to(u.km/u.s, equivalencies=relativistic_CO_equiv)
>>> relativistic_velocity # doctest: +FLOAT_CMP
<Quantity -31.207467619351537 km / s>
>>> measured_velocity = 1250 * u.km/u.s
>>> relativistic_frequency = measured_velocity.to(u.GHz, equivalencies=relativistic_CO_equiv)
>>> relativistic_frequency # doctest: +FLOAT_CMP
<Quantity 114.79156866993588 GHz>
>>> relativistic_wavelength = measured_velocity.to(u.mm, equivalencies=relativistic_CO_equiv)
>>> relativistic_wavelength # doctest: +FLOAT_CMP
<Quantity 2.6116243681798923 mm>
"""
assert_is_spectral_unit(rest)
ckms = _si.c.to_value('km/s')
def to_vel_freq(x):
restfreq = rest.to_value(si.Hz, equivalencies=spectral())
return (restfreq**2-x**2) / (restfreq**2+x**2) * ckms
def from_vel_freq(x):
restfreq = rest.to_value(si.Hz, equivalencies=spectral())
voverc = x/ckms
return restfreq * ((1-voverc) / (1+(voverc)))**0.5
def to_vel_wav(x):
restwav = rest.to_value(si.AA, spectral())
return (x**2-restwav**2) / (restwav**2+x**2) * ckms
def from_vel_wav(x):
restwav = rest.to_value(si.AA, spectral())
voverc = x/ckms
return restwav * ((1+voverc) / (1-voverc))**0.5
def to_vel_en(x):
resten = rest.to_value(si.eV, spectral())
return (resten**2-x**2) / (resten**2+x**2) * ckms
def from_vel_en(x):
resten = rest.to_value(si.eV, spectral())
voverc = x/ckms
return resten * ((1-voverc) / (1+(voverc)))**0.5
return Equivalency([(si.Hz, si.km/si.s, to_vel_freq, from_vel_freq),
(si.AA, si.km/si.s, to_vel_wav, from_vel_wav),
(si.eV, si.km/si.s, to_vel_en, from_vel_en),
], "doppler_relativistic", {'rest': rest})
def molar_mass_amu():
"""
Returns the equivalence between amu and molar mass.
"""
return Equivalency([
(si.g/si.mol, astrophys.u)
], "molar_mass_amu")
def mass_energy():
"""
Returns a list of equivalence pairs that handle the conversion
between mass and energy.
"""
return Equivalency([(si.kg, si.J, lambda x: x * _si.c.value ** 2,
lambda x: x / _si.c.value ** 2),
(si.kg / si.m ** 2, si.J / si.m ** 2,
lambda x: x * _si.c.value ** 2,
lambda x: x / _si.c.value ** 2),
(si.kg / si.m ** 3, si.J / si.m ** 3,
lambda x: x * _si.c.value ** 2,
lambda x: x / _si.c.value ** 2),
(si.kg / si.s, si.J / si.s, lambda x: x * _si.c.value ** 2,
lambda x: x / _si.c.value ** 2),
], "mass_energy")
def brightness_temperature(frequency, beam_area=None):
r"""
Defines the conversion between Jy/sr and "brightness temperature",
:math:`T_B`, in Kelvins. The brightness temperature is a unit very
commonly used in radio astronomy. See, e.g., "Tools of Radio Astronomy"
(Wilson 2009) eqn 8.16 and eqn 8.19 (these pages are available on `google
books
<http://books.google.com/books?id=9KHw6R8rQEMC&pg=PA179&source=gbs_toc_r&cad=4#v=onepage&q&f=false>`__).
:math:`T_B \equiv S_\nu / \left(2 k \nu^2 / c^2 \right)`
If the input is in Jy/beam or Jy (assuming it came from a single beam), the
beam area is essential for this computation: the brightness temperature is
inversely proportional to the beam area.
Parameters
----------
frequency : `~astropy.units.Quantity` with spectral units
The observed ``spectral`` equivalent `~astropy.units.Unit` (e.g.,
frequency or wavelength). The variable is named 'frequency' because it
is more commonly used in radio astronomy.
BACKWARD COMPATIBILITY NOTE: previous versions of the brightness
temperature equivalency used the keyword ``disp``, which is no longer
supported.
beam_area : angular area equivalent
Beam area in angular units, i.e. steradian equivalent
Examples
--------
Arecibo C-band beam::
>>> import numpy as np
>>> from astropy import units as u
>>> beam_sigma = 50*u.arcsec
>>> beam_area = 2*np.pi*(beam_sigma)**2
>>> freq = 5*u.GHz
>>> equiv = u.brightness_temperature(freq)
>>> (1*u.Jy/beam_area).to(u.K, equivalencies=equiv) # doctest: +FLOAT_CMP
<Quantity 3.526295144567176 K>
VLA synthetic beam::
>>> bmaj = 15*u.arcsec
>>> bmin = 15*u.arcsec
>>> fwhm_to_sigma = 1./(8*np.log(2))**0.5
>>> beam_area = 2.*np.pi*(bmaj*bmin*fwhm_to_sigma**2)
>>> freq = 5*u.GHz
>>> equiv = u.brightness_temperature(freq)
>>> (u.Jy/beam_area).to(u.K, equivalencies=equiv) # doctest: +FLOAT_CMP
<Quantity 217.2658703625732 K>
Any generic surface brightness:
>>> surf_brightness = 1e6*u.MJy/u.sr
>>> surf_brightness.to(u.K, equivalencies=u.brightness_temperature(500*u.GHz)) # doctest: +FLOAT_CMP
<Quantity 130.1931904778803 K>
"""
if frequency.unit.is_equivalent(si.sr):
if not beam_area.unit.is_equivalent(si.Hz):
raise ValueError("The inputs to `brightness_temperature` are "
"frequency and angular area.")
warnings.warn("The inputs to `brightness_temperature` have changed. "
"Frequency is now the first input, and angular area "
"is the second, optional input.",
DeprecationWarning)
frequency, beam_area = beam_area, frequency
nu = frequency.to(si.GHz, spectral())
if beam_area is not None:
beam = beam_area.to_value(si.sr)
def convert_Jy_to_K(x_jybm):
factor = (2 * _si.k_B * si.K * nu**2 / _si.c**2).to(astrophys.Jy).value
return (x_jybm / beam / factor)
def convert_K_to_Jy(x_K):
factor = (astrophys.Jy / (2 * _si.k_B * nu**2 / _si.c**2)).to(si.K).value
return (x_K * beam / factor)
return Equivalency([(astrophys.Jy, si.K, convert_Jy_to_K, convert_K_to_Jy),
(astrophys.Jy/astrophys.beam, si.K, convert_Jy_to_K, convert_K_to_Jy),],
"brightness_temperature", {'frequency': frequency, 'beam_area': beam_area})
else:
def convert_JySr_to_K(x_jysr):
factor = (2 * _si.k_B * si.K * nu**2 / _si.c**2).to(astrophys.Jy).value
return (x_jysr / factor)
def convert_K_to_JySr(x_K):
factor = (astrophys.Jy / (2 * _si.k_B * nu**2 / _si.c**2)).to(si.K).value
return (x_K / factor) # multiplied by 1x for 1 steradian
return Equivalency([(astrophys.Jy/si.sr, si.K, convert_JySr_to_K, convert_K_to_JySr)],
"brightness_temperature", {'frequency': frequency, 'beam_area': beam_area})
def beam_angular_area(beam_area):
"""
Convert between the ``beam`` unit, which is commonly used to express the area
of a radio telescope resolution element, and an area on the sky.
This equivalency also supports direct conversion between ``Jy/beam`` and
``Jy/steradian`` units, since that is a common operation.
Parameters
----------
beam_area : angular area equivalent
The area of the beam in angular area units (e.g., steradians)
"""
return Equivalency([(astrophys.beam, Unit(beam_area)),
(astrophys.beam**-1, Unit(beam_area)**-1),
(astrophys.Jy/astrophys.beam, astrophys.Jy/Unit(beam_area)),],
"beam_angular_area", {'beam_area': beam_area})
def thermodynamic_temperature(frequency, T_cmb=None):
r"""Defines the conversion between Jy/sr and "thermodynamic temperature",
:math:`T_{CMB}`, in Kelvins. The thermodynamic temperature is a unit very
commonly used in cosmology. See eqn 8 in [1]
:math:`K_{CMB} \equiv I_\nu / \left(2 k \nu^2 / c^2 f(\nu) \right)`
with :math:`f(\nu) = \frac{ x^2 e^x}{(e^x - 1 )^2}`
where :math:`x = h \nu / k T`
Parameters
----------
frequency : `~astropy.units.Quantity` with spectral units
The observed `spectral` equivalent `~astropy.units.Unit` (e.g.,
frequency or wavelength)
T_cmb : `~astropy.units.Quantity` with temperature units or None
The CMB temperature at z=0. If `None`, the default cosmology will be
used to get this temperature.
Notes
-----
For broad band receivers, this conversion do not hold
as it highly depends on the frequency
References
----------
.. [1] Planck 2013 results. IX. HFI spectral response
https://arxiv.org/abs/1303.5070
Examples
--------
Planck HFI 143 GHz::
>>> from astropy import units as u
>>> from astropy.cosmology import Planck15
>>> freq = 143 * u.GHz
>>> equiv = u.thermodynamic_temperature(freq, Planck15.Tcmb0)
>>> (1. * u.mK).to(u.MJy / u.sr, equivalencies=equiv) # doctest: +FLOAT_CMP
<Quantity 0.37993172 MJy / sr>
"""
nu = frequency.to(si.GHz, spectral())
if T_cmb is None:
from astropy.cosmology import default_cosmology
T_cmb = default_cosmology.get().Tcmb0
def f(nu, T_cmb=T_cmb):
x = _si.h * nu / _si.k_B / T_cmb
return x**2 * np.exp(x) / np.expm1(x)**2
def convert_Jy_to_K(x_jybm):
factor = (f(nu) * 2 * _si.k_B * si.K * nu**2 / _si.c**2).to_value(astrophys.Jy)
return x_jybm / factor
def convert_K_to_Jy(x_K):
factor = (astrophys.Jy / (f(nu) * 2 * _si.k_B * nu**2 / _si.c**2)).to_value(si.K)
return x_K / factor
return Equivalency([(astrophys.Jy/si.sr, si.K, convert_Jy_to_K, convert_K_to_Jy)],
"thermodynamic_temperature", {'frequency': frequency, "T_cmb": T_cmb})
def temperature():
"""Convert between Kelvin, Celsius, and Fahrenheit here because
Unit and CompositeUnit cannot do addition or subtraction properly.
"""
from .imperial import deg_F
return Equivalency([
(si.K, si.deg_C, lambda x: x - 273.15, lambda x: x + 273.15),
(si.deg_C, deg_F, lambda x: x * 1.8 + 32.0, lambda x: (x - 32.0) / 1.8),
(si.K, deg_F, lambda x: (x - 273.15) * 1.8 + 32.0,
lambda x: ((x - 32.0) / 1.8) + 273.15)], "temperature")
def temperature_energy():
"""Convert between Kelvin and keV(eV) to an equivalent amount."""
return Equivalency([
(si.K, si.eV, lambda x: x / (_si.e.value / _si.k_B.value),
lambda x: x * (_si.e.value / _si.k_B.value))], "temperature_energy")
def assert_is_spectral_unit(value):
try:
value.to(si.Hz, spectral())
except (AttributeError, UnitsError) as ex:
raise UnitsError("The 'rest' value must be a spectral equivalent "
"(frequency, wavelength, or energy).")
def pixel_scale(pixscale):
"""
Convert between pixel distances (in units of ``pix``) and angular units,
given a particular ``pixscale``.
Parameters
----------
pixscale : `~astropy.units.Quantity`
The pixel scale either in units of angle/pixel or pixel/angle.
"""
if pixscale.unit.is_equivalent(si.arcsec/astrophys.pix):
pixscale_val = pixscale.to_value(si.radian/astrophys.pix)
elif pixscale.unit.is_equivalent(astrophys.pix/si.arcsec):
pixscale_val = (1/pixscale).to_value(si.radian/astrophys.pix)
else:
raise UnitsError("The pixel scale must be in angle/pixel or "
"pixel/angle")
return Equivalency([(astrophys.pix, si.radian,
lambda px: px*pixscale_val, lambda rad: rad/pixscale_val)],
"pixel_scale", {'pixscale': pixscale})
def plate_scale(platescale):
"""
Convert between lengths (to be interpreted as lengths in the focal plane)
and angular units with a specified ``platescale``.
Parameters
----------
platescale : `~astropy.units.Quantity`
The pixel scale either in units of distance/pixel or distance/angle.
"""
if platescale.unit.is_equivalent(si.arcsec/si.m):
platescale_val = platescale.to_value(si.radian/si.m)
elif platescale.unit.is_equivalent(si.m/si.arcsec):
platescale_val = (1/platescale).to_value(si.radian/si.m)
else:
raise UnitsError("The pixel scale must be in angle/distance or "
"distance/angle")
return Equivalency([(si.m, si.radian, lambda d: d*platescale_val, lambda rad: rad/platescale_val)],
"plate_scale", {'platescale': platescale})
def with_H0(H0=None):
"""
Convert between quantities with little-h and the equivalent physical units.
Parameters
----------
H0 : `None` or `~astropy.units.Quantity`
The value of the Hubble constant to assume. If a `~astropy.units.Quantity`,
will assume the quantity *is* ``H0``. If `None` (default), use the
``H0`` attribute from the default `astropy.cosmology` cosmology.
References
----------
For an illuminating discussion on why you may or may not want to use
little-h at all, see https://arxiv.org/pdf/1308.4150.pdf
"""
if H0 is None:
from astropy import cosmology
H0 = cosmology.default_cosmology.get().H0
h100_val_unit = Unit(100/(H0.to_value((si.km/si.s)/astrophys.Mpc)) * astrophys.littleh)
return Equivalency([(h100_val_unit, None)], "with_H0", kwargs={"H0": H0})
|
442857c830c9113ee0d8af5843c1915129852d0ff4079af590135f953e0338ba | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains convenience functions for retrieving solar system
ephemerides from jplephem.
"""
from urllib.parse import urlparse
from collections import OrderedDict
import numpy as np
from .sky_coordinate import SkyCoord
from astropy.utils.data import download_file
from astropy.utils.decorators import classproperty
from astropy.utils.state import ScienceState
from astropy.utils import indent
from astropy import units as u
from astropy import _erfa as erfa
from astropy.constants import c as speed_of_light
from .representation import CartesianRepresentation
from .orbital_elements import calc_moon
from .builtin_frames import GCRS, ICRS
from .builtin_frames.utils import get_jd12
__all__ = ["get_body", "get_moon", "get_body_barycentric",
"get_body_barycentric_posvel", "solar_system_ephemeris"]
DEFAULT_JPL_EPHEMERIS = 'de430'
"""List of kernel pairs needed to calculate positions of a given object."""
BODY_NAME_TO_KERNEL_SPEC = OrderedDict(
(('sun', [(0, 10)]),
('mercury', [(0, 1), (1, 199)]),
('venus', [(0, 2), (2, 299)]),
('earth-moon-barycenter', [(0, 3)]),
('earth', [(0, 3), (3, 399)]),
('moon', [(0, 3), (3, 301)]),
('mars', [(0, 4)]),
('jupiter', [(0, 5)]),
('saturn', [(0, 6)]),
('uranus', [(0, 7)]),
('neptune', [(0, 8)]),
('pluto', [(0, 9)]))
)
"""Indices to the plan94 routine for the given object."""
PLAN94_BODY_NAME_TO_PLANET_INDEX = OrderedDict(
(('mercury', 1),
('venus', 2),
('earth-moon-barycenter', 3),
('mars', 4),
('jupiter', 5),
('saturn', 6),
('uranus', 7),
('neptune', 8)))
_EPHEMERIS_NOTE = """
You can either give an explicit ephemeris or use a default, which is normally
a built-in ephemeris that does not require ephemeris files. To change
the default to be the JPL ephemeris::
>>> from astropy.coordinates import solar_system_ephemeris
>>> solar_system_ephemeris.set('jpl') # doctest: +SKIP
Use of any JPL ephemeris requires the jplephem package
(https://pypi.python.org/pypi/jplephem).
If needed, the ephemeris file will be downloaded (and cached).
One can check which bodies are covered by a given ephemeris using::
>>> solar_system_ephemeris.bodies
('earth', 'sun', 'moon', 'mercury', 'venus', 'earth-moon-barycenter', 'mars', 'jupiter', 'saturn', 'uranus', 'neptune')
"""[1:-1]
class solar_system_ephemeris(ScienceState):
"""Default ephemerides for calculating positions of Solar-System bodies.
This can be one of the following::
- 'builtin': polynomial approximations to the orbital elements.
- 'de430' or 'de432s': short-cuts for recent JPL dynamical models.
- 'jpl': Alias for the default JPL ephemeris (currently, 'de430').
- URL: (str) The url to a SPK ephemeris in SPICE binary (.bsp) format.
- `None`: Ensure an Exception is raised without an explicit ephemeris.
The default is 'builtin', which uses the ``epv00`` and ``plan94``
routines from the ``erfa`` implementation of the Standards Of Fundamental
Astronomy library.
Notes
-----
Any file required will be downloaded (and cached) when the state is set.
The default Satellite Planet Kernel (SPK) file from NASA JPL (de430) is
~120MB, and covers years ~1550-2650 CE [1]_. The smaller de432s file is
~10MB, and covers years 1950-2050 [2]_. Older versions of the JPL
ephemerides (such as the widely used de200) can be used via their URL [3]_.
.. [1] http://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/aareadme_de430-de431.txt
.. [2] http://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/aareadme_de432s.txt
.. [3] http://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/a_old_versions/
"""
_value = 'builtin'
_kernel = None
@classmethod
def validate(cls, value):
# make no changes if value is None
if value is None:
return cls._value
# Set up Kernel; if the file is not in cache, this will download it.
cls.get_kernel(value)
return value
@classmethod
def get_kernel(cls, value):
# ScienceState only ensures the `_value` attribute is up to date,
# so we need to be sure any kernel returned is consistent.
if cls._kernel is None or cls._kernel.origin != value:
if cls._kernel is not None:
cls._kernel.daf.file.close()
cls._kernel = None
kernel = _get_kernel(value)
if kernel is not None:
kernel.origin = value
cls._kernel = kernel
return cls._kernel
@classproperty
def kernel(cls):
return cls.get_kernel(cls._value)
@classproperty
def bodies(cls):
if cls._value is None:
return None
if cls._value.lower() == 'builtin':
return (('earth', 'sun', 'moon') +
tuple(PLAN94_BODY_NAME_TO_PLANET_INDEX.keys()))
else:
return tuple(BODY_NAME_TO_KERNEL_SPEC.keys())
def _get_kernel(value):
"""
Try importing jplephem, download/retrieve from cache the Satellite Planet
Kernel corresponding to the given ephemeris.
"""
if value is None or value.lower() == 'builtin':
return None
if value.lower() == 'jpl':
value = DEFAULT_JPL_EPHEMERIS
if value.lower() in ('de430', 'de432s'):
value = ('http://naif.jpl.nasa.gov/pub/naif/generic_kernels'
'/spk/planets/{:s}.bsp'.format(value.lower()))
else:
try:
urlparse(value)
except Exception:
raise ValueError('{} was not one of the standard strings and '
'could not be parsed as a URL'.format(value))
try:
from jplephem.spk import SPK
except ImportError:
raise ImportError("Solar system JPL ephemeris calculations require "
"the jplephem package "
"(https://pypi.python.org/pypi/jplephem)")
return SPK.open(download_file(value, cache=True))
def _get_body_barycentric_posvel(body, time, ephemeris=None,
get_velocity=True):
"""Calculate the barycentric position (and velocity) of a solar system body.
Parameters
----------
body : str or other
The solar system body for which to calculate positions. Can also be a
kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL
kernel.
time : `~astropy.time.Time`
Time of observation.
ephemeris : str, optional
Ephemeris to use. By default, use the one set with
``astropy.coordinates.solar_system_ephemeris.set``
get_velocity : bool, optional
Whether or not to calculate the velocity as well as the position.
Returns
-------
position : `~astropy.coordinates.CartesianRepresentation` or tuple
Barycentric (ICRS) position or tuple of position and velocity.
Notes
-----
No velocity can be calculated with the built-in ephemeris for the Moon.
Whether or not velocities are calculated makes little difference for the
built-in ephemerides, but for most JPL ephemeris files, the execution time
roughly doubles.
"""
if ephemeris is None:
ephemeris = solar_system_ephemeris.get()
if ephemeris is None:
raise ValueError(_EPHEMERIS_NOTE)
kernel = solar_system_ephemeris.kernel
else:
kernel = _get_kernel(ephemeris)
jd1, jd2 = get_jd12(time, 'tdb')
if kernel is None:
body = body.lower()
earth_pv_helio, earth_pv_bary = erfa.epv00(jd1, jd2)
if body == 'earth':
body_pv_bary = earth_pv_bary
elif body == 'moon':
if get_velocity:
raise KeyError("the Moon's velocity cannot be calculated with "
"the '{0}' ephemeris.".format(ephemeris))
return calc_moon(time).cartesian
else:
sun_pv_bary = erfa.pvmpv(earth_pv_bary, earth_pv_helio)
if body == 'sun':
body_pv_bary = sun_pv_bary
else:
try:
body_index = PLAN94_BODY_NAME_TO_PLANET_INDEX[body]
except KeyError:
raise KeyError("{0}'s position and velocity cannot be "
"calculated with the '{1}' ephemeris."
.format(body, ephemeris))
body_pv_helio = erfa.plan94(jd1, jd2, body_index)
body_pv_bary = erfa.pvppv(body_pv_helio, sun_pv_bary)
body_pos_bary = CartesianRepresentation(
body_pv_bary['p'], unit=u.au, xyz_axis=-1, copy=False)
if get_velocity:
body_vel_bary = CartesianRepresentation(
body_pv_bary['v'], unit=u.au/u.day, xyz_axis=-1,
copy=False)
else:
if isinstance(body, str):
# Look up kernel chain for JPL ephemeris, based on name
try:
kernel_spec = BODY_NAME_TO_KERNEL_SPEC[body.lower()]
except KeyError:
raise KeyError("{0}'s position cannot be calculated with "
"the {1} ephemeris.".format(body, ephemeris))
else:
# otherwise, assume the user knows what their doing and intentionally
# passed in a kernel chain
kernel_spec = body
# jplephem cannot handle multi-D arrays, so convert to 1D here.
jd1_shape = getattr(jd1, 'shape', ())
if len(jd1_shape) > 1:
jd1, jd2 = jd1.ravel(), jd2.ravel()
# Note that we use the new jd1.shape here to create a 1D result array.
# It is reshaped below.
body_posvel_bary = np.zeros((2 if get_velocity else 1, 3) +
getattr(jd1, 'shape', ()))
for pair in kernel_spec:
spk = kernel[pair]
if spk.data_type == 3:
# Type 3 kernels contain both position and velocity.
posvel = spk.compute(jd1, jd2)
if get_velocity:
body_posvel_bary += posvel.reshape(body_posvel_bary.shape)
else:
body_posvel_bary[0] += posvel[:4]
else:
# spk.generate first yields the position and then the
# derivative. If no velocities are desired, body_posvel_bary
# has only one element and thus the loop ends after a single
# iteration, avoiding the velocity calculation.
for body_p_or_v, p_or_v in zip(body_posvel_bary,
spk.generate(jd1, jd2)):
body_p_or_v += p_or_v
body_posvel_bary.shape = body_posvel_bary.shape[:2] + jd1_shape
body_pos_bary = CartesianRepresentation(body_posvel_bary[0],
unit=u.km, copy=False)
if get_velocity:
body_vel_bary = CartesianRepresentation(body_posvel_bary[1],
unit=u.km/u.day, copy=False)
return (body_pos_bary, body_vel_bary) if get_velocity else body_pos_bary
def get_body_barycentric_posvel(body, time, ephemeris=None):
"""Calculate the barycentric position and velocity of a solar system body.
Parameters
----------
body : str or other
The solar system body for which to calculate positions. Can also be a
kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL
kernel.
time : `~astropy.time.Time`
Time of observation.
ephemeris : str, optional
Ephemeris to use. By default, use the one set with
``astropy.coordinates.solar_system_ephemeris.set``
Returns
-------
position, velocity : tuple of `~astropy.coordinates.CartesianRepresentation`
Tuple of barycentric (ICRS) position and velocity.
See also
--------
get_body_barycentric : to calculate position only.
This is faster by about a factor two for JPL kernels, but has no
speed advantage for the built-in ephemeris.
Notes
-----
The velocity cannot be calculated for the Moon. To just get the position,
use :func:`~astropy.coordinates.get_body_barycentric`.
"""
return _get_body_barycentric_posvel(body, time, ephemeris)
get_body_barycentric_posvel.__doc__ += indent(_EPHEMERIS_NOTE)[4:]
def get_body_barycentric(body, time, ephemeris=None):
"""Calculate the barycentric position of a solar system body.
Parameters
----------
body : str or other
The solar system body for which to calculate positions. Can also be a
kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL
kernel.
time : `~astropy.time.Time`
Time of observation.
ephemeris : str, optional
Ephemeris to use. By default, use the one set with
``astropy.coordinates.solar_system_ephemeris.set``
Returns
-------
position : `~astropy.coordinates.CartesianRepresentation`
Barycentric (ICRS) position of the body in cartesian coordinates
See also
--------
get_body_barycentric_posvel : to calculate both position and velocity.
Notes
-----
"""
return _get_body_barycentric_posvel(body, time, ephemeris,
get_velocity=False)
get_body_barycentric.__doc__ += indent(_EPHEMERIS_NOTE)[4:]
def _get_apparent_body_position(body, time, ephemeris):
"""Calculate the apparent position of body ``body`` relative to Earth.
This corrects for the light-travel time to the object.
Parameters
----------
body : str or other
The solar system body for which to calculate positions. Can also be a
kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL
kernel.
time : `~astropy.time.Time`
Time of observation.
ephemeris : str, optional
Ephemeris to use. By default, use the one set with
``~astropy.coordinates.solar_system_ephemeris.set``
Returns
-------
cartesian_position : `~astropy.coordinates.CartesianRepresentation`
Barycentric (ICRS) apparent position of the body in cartesian coordinates
"""
if ephemeris is None:
ephemeris = solar_system_ephemeris.get()
# builtin ephemeris and moon is a special case, with no need to account for
# light travel time, since this is already included in the Meeus algorithm
# used.
if ephemeris == 'builtin' and body.lower() == 'moon':
return get_body_barycentric(body, time, ephemeris)
# Calculate position given approximate light travel time.
delta_light_travel_time = 20. * u.s
emitted_time = time
light_travel_time = 0. * u.s
earth_loc = get_body_barycentric('earth', time, ephemeris)
while np.any(np.fabs(delta_light_travel_time) > 1.0e-8*u.s):
body_loc = get_body_barycentric(body, emitted_time, ephemeris)
earth_distance = (body_loc - earth_loc).norm()
delta_light_travel_time = (light_travel_time -
earth_distance/speed_of_light)
light_travel_time = earth_distance/speed_of_light
emitted_time = time - light_travel_time
return get_body_barycentric(body, emitted_time, ephemeris)
_get_apparent_body_position.__doc__ += indent(_EPHEMERIS_NOTE)[4:]
def get_body(body, time, location=None, ephemeris=None):
"""
Get a `~astropy.coordinates.SkyCoord` for a solar system body as observed
from a location on Earth in the `~astropy.coordinates.GCRS` reference
system.
Parameters
----------
body : str or other
The solar system body for which to calculate positions. Can also be a
kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL
kernel.
time : `~astropy.time.Time`
Time of observation.
location : `~astropy.coordinates.EarthLocation`, optional
Location of observer on the Earth. If not given, will be taken from
``time`` (if not present, a geocentric observer will be assumed).
ephemeris : str, optional
Ephemeris to use. If not given, use the one set with
``astropy.coordinates.solar_system_ephemeris.set`` (which is
set to 'builtin' by default).
Returns
-------
skycoord : `~astropy.coordinates.SkyCoord`
GCRS Coordinate for the body
Notes
-----
"""
if location is None:
location = time.location
cartrep = _get_apparent_body_position(body, time, ephemeris)
icrs = ICRS(cartrep)
if location is not None:
obsgeoloc, obsgeovel = location.get_gcrs_posvel(time)
gcrs = icrs.transform_to(GCRS(obstime=time,
obsgeoloc=obsgeoloc,
obsgeovel=obsgeovel))
else:
gcrs = icrs.transform_to(GCRS(obstime=time))
return SkyCoord(gcrs)
get_body.__doc__ += indent(_EPHEMERIS_NOTE)[4:]
def get_moon(time, location=None, ephemeris=None):
"""
Get a `~astropy.coordinates.SkyCoord` for the Earth's Moon as observed
from a location on Earth in the `~astropy.coordinates.GCRS` reference
system.
Parameters
----------
time : `~astropy.time.Time`
Time of observation
location : `~astropy.coordinates.EarthLocation`
Location of observer on the Earth. If none is supplied, taken from
``time`` (if not present, a geocentric observer will be assumed).
ephemeris : str, optional
Ephemeris to use. If not given, use the one set with
``astropy.coordinates.solar_system_ephemeris.set`` (which is
set to 'builtin' by default).
Returns
-------
skycoord : `~astropy.coordinates.SkyCoord`
GCRS Coordinate for the Moon
Notes
-----
"""
return get_body('moon', time, location=location, ephemeris=ephemeris)
get_moon.__doc__ += indent(_EPHEMERIS_NOTE)[4:]
def _apparent_position_in_true_coordinates(skycoord):
"""
Convert Skycoord in GCRS frame into one in which RA and Dec
are defined w.r.t to the true equinox and poles of the Earth
"""
jd1, jd2 = get_jd12(skycoord.obstime, 'tt')
_, _, _, _, _, _, _, rbpn = erfa.pn00a(jd1, jd2)
return SkyCoord(skycoord.frame.realize_frame(
skycoord.cartesian.transform(rbpn)))
|
7ca6bcf4ff30df6dff4dc2051831762f61918fad67daf253401d2153395a7459 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
''' This module defines custom errors and exceptions used in astropy.coordinates.
'''
from astropy.utils.exceptions import AstropyWarning
__all__ = ['RangeError', 'BoundsError', 'IllegalHourError',
'IllegalMinuteError', 'IllegalSecondError', 'ConvertError',
'IllegalHourWarning', 'IllegalMinuteWarning', 'IllegalSecondWarning',
'UnknownSiteException']
class RangeError(ValueError):
"""
Raised when some part of an angle is out of its valid range.
"""
class BoundsError(RangeError):
"""
Raised when an angle is outside of its user-specified bounds.
"""
class IllegalHourError(RangeError):
"""
Raised when an hour value is not in the range [0,24).
Parameters
----------
hour : int, float
Examples
--------
.. code-block:: python
if not 0 <= hr < 24:
raise IllegalHourError(hour)
"""
def __init__(self, hour):
self.hour = hour
def __str__(self):
return "An invalid value for 'hours' was found ('{0}'); must be in the range [0,24).".format(self.hour)
class IllegalHourWarning(AstropyWarning):
"""
Raised when an hour value is 24.
Parameters
----------
hour : int, float
"""
def __init__(self, hour, alternativeactionstr=None):
self.hour = hour
self.alternativeactionstr = alternativeactionstr
def __str__(self):
message = "'hour' was found to be '{0}', which is not in range (-24, 24).".format(self.hour)
if self.alternativeactionstr is not None:
message += ' ' + self.alternativeactionstr
return message
class IllegalMinuteError(RangeError):
"""
Raised when an minute value is not in the range [0,60].
Parameters
----------
minute : int, float
Examples
--------
.. code-block:: python
if not 0 <= min < 60:
raise IllegalMinuteError(minute)
"""
def __init__(self, minute):
self.minute = minute
def __str__(self):
return "An invalid value for 'minute' was found ('{0}'); should be in the range [0,60).".format(self.minute)
class IllegalMinuteWarning(AstropyWarning):
"""
Raised when a minute value is 60.
Parameters
----------
minute : int, float
"""
def __init__(self, minute, alternativeactionstr=None):
self.minute = minute
self.alternativeactionstr = alternativeactionstr
def __str__(self):
message = "'minute' was found to be '{0}', which is not in range [0,60).".format(self.minute)
if self.alternativeactionstr is not None:
message += ' ' + self.alternativeactionstr
return message
class IllegalSecondError(RangeError):
"""
Raised when an second value (time) is not in the range [0,60].
Parameters
----------
second : int, float
Examples
--------
.. code-block:: python
if not 0 <= sec < 60:
raise IllegalSecondError(second)
"""
def __init__(self, second):
self.second = second
def __str__(self):
return "An invalid value for 'second' was found ('{0}'); should be in the range [0,60).".format(self.second)
class IllegalSecondWarning(AstropyWarning):
"""
Raised when a second value is 60.
Parameters
----------
second : int, float
"""
def __init__(self, second, alternativeactionstr=None):
self.second = second
self.alternativeactionstr = alternativeactionstr
def __str__(self):
message = "'second' was found to be '{0}', which is not in range [0,60).".format(self.second)
if self.alternativeactionstr is not None:
message += ' ' + self.alternativeactionstr
return message
# TODO: consider if this should be used to `units`?
class UnitsError(ValueError):
"""
Raised if units are missing or invalid.
"""
class ConvertError(Exception):
"""
Raised if a coordinate system cannot be converted to another
"""
class UnknownSiteException(KeyError):
def __init__(self, site, attribute, close_names=None):
message = "Site '{0}' not in database. Use {1} to see available sites.".format(site, attribute)
if close_names:
message += " Did you mean one of: '{0}'?'".format("', '".join(close_names))
self.site = site
self.attribute = attribute
self.close_names = close_names
return super().__init__(message)
|
51c21149a3c26adb758bc7d26d017c107cf78a8a9608422ec860698c9e357c8e | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains utililies used for constructing rotation matrices.
"""
from functools import reduce
import numpy as np
from astropy import units as u
from .angles import Angle
def matrix_product(*matrices):
"""Matrix multiply all arguments together.
Arguments should have dimension 2 or larger. Larger dimensional objects
are interpreted as stacks of matrices residing in the last two dimensions.
This function mostly exists for readability: using `~numpy.matmul`
directly, one would have ``matmul(matmul(m1, m2), m3)``, etc. For even
better readability, one might consider using `~numpy.matrix` for the
arguments (so that one could write ``m1 * m2 * m3``), but then it is not
possible to handle stacks of matrices. Once only python >=3.5 is supported,
this function can be replaced by ``m1 @ m2 @ m3``.
"""
return reduce(np.matmul, matrices)
def matrix_transpose(matrix):
"""Transpose a matrix or stack of matrices by swapping the last two axes.
This function mostly exists for readability; seeing ``.swapaxes(-2, -1)``
it is not that obvious that one does a transpose. Note that one cannot
use `~numpy.ndarray.T`, as this transposes all axes and thus does not
work for stacks of matrices.
"""
return matrix.swapaxes(-2, -1)
def rotation_matrix(angle, axis='z', unit=None):
"""
Generate matrices for rotation by some angle around some axis.
Parameters
----------
angle : convertible to `Angle`
The amount of rotation the matrices should represent. Can be an array.
axis : str, or array-like
Either ``'x'``, ``'y'``, ``'z'``, or a (x,y,z) specifying the axis to
rotate about. If ``'x'``, ``'y'``, or ``'z'``, the rotation sense is
counterclockwise looking down the + axis (e.g. positive rotations obey
left-hand-rule). If given as an array, the last dimension should be 3;
it will be broadcast against ``angle``.
unit : UnitBase, optional
If ``angle`` does not have associated units, they are in this
unit. If neither are provided, it is assumed to be degrees.
Returns
-------
rmat : `numpy.matrix`
A unitary rotation matrix.
"""
if unit is None:
unit = u.degree
angle = Angle(angle, unit=unit)
s = np.sin(angle)
c = np.cos(angle)
# use optimized implementations for x/y/z
try:
i = 'xyz'.index(axis)
except TypeError:
axis = np.asarray(axis)
axis = axis / np.sqrt((axis * axis).sum(axis=-1, keepdims=True))
R = (axis[..., np.newaxis] * axis[..., np.newaxis, :] *
(1. - c)[..., np.newaxis, np.newaxis])
for i in range(0, 3):
R[..., i, i] += c
a1 = (i + 1) % 3
a2 = (i + 2) % 3
R[..., a1, a2] += axis[..., i] * s
R[..., a2, a1] -= axis[..., i] * s
else:
a1 = (i + 1) % 3
a2 = (i + 2) % 3
R = np.zeros(angle.shape + (3, 3))
R[..., i, i] = 1.
R[..., a1, a1] = c
R[..., a1, a2] = s
R[..., a2, a1] = -s
R[..., a2, a2] = c
return R
def angle_axis(matrix):
"""
Angle of rotation and rotation axis for a given rotation matrix.
Parameters
----------
matrix : array-like
A 3 x 3 unitary rotation matrix (or stack of matrices).
Returns
-------
angle : `Angle`
The angle of rotation.
axis : array
The (normalized) axis of rotation (with last dimension 3).
"""
m = np.asanyarray(matrix)
if m.shape[-2:] != (3, 3):
raise ValueError('matrix is not 3x3')
axis = np.zeros(m.shape[:-1])
axis[..., 0] = m[..., 2, 1] - m[..., 1, 2]
axis[..., 1] = m[..., 0, 2] - m[..., 2, 0]
axis[..., 2] = m[..., 1, 0] - m[..., 0, 1]
r = np.sqrt((axis * axis).sum(-1, keepdims=True))
angle = np.arctan2(r[..., 0],
m[..., 0, 0] + m[..., 1, 1] + m[..., 2, 2] - 1.)
return Angle(angle, u.radian), -axis / r
|
5ff3ca2b94deb41fef9294e0e9c0cfe67d90531a313d42d25c87b6d17b8f9e92 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This subpackage contains classes and functions for celestial coordinates
of astronomical objects. It also contains a framework for conversions
between coordinate systems.
"""
from .errors import *
from .angles import *
from .baseframe import *
from .attributes import *
from .distances import *
from .earth import *
from .transformations import *
from .builtin_frames import *
from .name_resolve import *
from .matching import *
from .representation import *
from .sky_coordinate import *
from .funcs import *
from .calculation import *
from .solar_system import *
# This is for backwards-compatibility -- can be removed in v3.0 when the
# deprecation warnings are removed
from .attributes import (TimeFrameAttribute, QuantityFrameAttribute,
CartesianRepresentationFrameAttribute)
__doc__ += builtin_frames._transform_graph_docs
|
a70c2ff0e5387b4514bf3de06991af9572ec3d588fd1569f4b92d403bf10711c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains functions for matching coordinate catalogs.
"""
import numpy as np
from .representation import UnitSphericalRepresentation
from astropy import units as u
from . import Angle
__all__ = ['match_coordinates_3d', 'match_coordinates_sky', 'search_around_3d',
'search_around_sky']
def match_coordinates_3d(matchcoord, catalogcoord, nthneighbor=1, storekdtree='kdtree_3d'):
"""
Finds the nearest 3-dimensional matches of a coordinate or coordinates in
a set of catalog coordinates.
This finds the 3-dimensional closest neighbor, which is only different
from the on-sky distance if ``distance`` is set in either ``matchcoord``
or ``catalogcoord``.
Parameters
----------
matchcoord : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The coordinate(s) to match to the catalog.
catalogcoord : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The base catalog in which to search for matches. Typically this will
be a coordinate object that is an array (i.e.,
``catalogcoord.isscalar == False``)
nthneighbor : int, optional
Which closest neighbor to search for. Typically ``1`` is desired here,
as that is correct for matching one set of coordinates to another.
The next likely use case is ``2``, for matching a coordinate catalog
against *itself* (``1`` is inappropriate because each point will find
itself as the closest match).
storekdtree : bool or str, optional
If a string, will store the KD-Tree used for the computation
in the ``catalogcoord``, as in ``catalogcoord.cache`` with the
provided name. This dramatically speeds up subsequent calls with the
same catalog. If False, the KD-Tree is discarded after use.
Returns
-------
idx : integer array
Indices into ``catalogcoord`` to get the matched points for each
``matchcoord``. Shape matches ``matchcoord``.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the closest match for each ``matchcoord``
and the ``matchcoord``. Shape matches ``matchcoord``.
dist3d : `~astropy.units.Quantity`
The 3D distance between the closest match for each ``matchcoord`` and
the ``matchcoord``. Shape matches ``matchcoord``.
Notes
-----
This function requires `SciPy <https://www.scipy.org/>`_ to be installed
or it will fail.
"""
if catalogcoord.isscalar or len(catalogcoord) < 1:
raise ValueError('The catalog for coordinate matching cannot be a '
'scalar or length-0.')
kdt = _get_cartesian_kdtree(catalogcoord, storekdtree)
# make sure coordinate systems match
matchcoord = matchcoord.transform_to(catalogcoord)
# make sure units match
catunit = catalogcoord.cartesian.x.unit
matchxyz = matchcoord.cartesian.xyz.to(catunit)
matchflatxyz = matchxyz.reshape((3, np.prod(matchxyz.shape) // 3))
dist, idx = kdt.query(matchflatxyz.T, nthneighbor)
if nthneighbor > 1: # query gives 1D arrays if k=1, 2D arrays otherwise
dist = dist[:, -1]
idx = idx[:, -1]
sep2d = catalogcoord[idx].separation(matchcoord)
return idx.reshape(matchxyz.shape[1:]), sep2d, dist.reshape(matchxyz.shape[1:]) * catunit
def match_coordinates_sky(matchcoord, catalogcoord, nthneighbor=1, storekdtree='kdtree_sky'):
"""
Finds the nearest on-sky matches of a coordinate or coordinates in
a set of catalog coordinates.
This finds the on-sky closest neighbor, which is only different from the
3-dimensional match if ``distance`` is set in either ``matchcoord``
or ``catalogcoord``.
Parameters
----------
matchcoord : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The coordinate(s) to match to the catalog.
catalogcoord : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The base catalog in which to search for matches. Typically this will
be a coordinate object that is an array (i.e.,
``catalogcoord.isscalar == False``)
nthneighbor : int, optional
Which closest neighbor to search for. Typically ``1`` is desired here,
as that is correct for matching one set of coordinates to another.
The next likely use case is ``2``, for matching a coordinate catalog
against *itself* (``1`` is inappropriate because each point will find
itself as the closest match).
storekdtree : bool or str, optional
If a string, will store the KD-Tree used for the computation
in the ``catalogcoord`` in ``catalogcoord.cache`` with the
provided name. This dramatically speeds up subsequent calls with the
same catalog. If False, the KD-Tree is discarded after use.
Returns
-------
idx : integer array
Indices into ``catalogcoord`` to get the matched points for each
``matchcoord``. Shape matches ``matchcoord``.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the closest match for each
``matchcoord`` and the ``matchcoord``. Shape matches ``matchcoord``.
dist3d : `~astropy.units.Quantity`
The 3D distance between the closest match for each ``matchcoord`` and
the ``matchcoord``. Shape matches ``matchcoord``. If either
``matchcoord`` or ``catalogcoord`` don't have a distance, this is the 3D
distance on the unit sphere, rather than a true distance.
Notes
-----
This function requires `SciPy <https://www.scipy.org/>`_ to be installed
or it will fail.
"""
if catalogcoord.isscalar or len(catalogcoord) < 1:
raise ValueError('The catalog for coordinate matching cannot be a '
'scalar or length-0.')
# send to catalog frame
newmatch = matchcoord.transform_to(catalogcoord)
# strip out distance info
match_urepr = newmatch.data.represent_as(UnitSphericalRepresentation)
newmatch_u = newmatch.realize_frame(match_urepr)
cat_urepr = catalogcoord.data.represent_as(UnitSphericalRepresentation)
newcat_u = catalogcoord.realize_frame(cat_urepr)
# Check for a stored KD-tree on the passed-in coordinate. Normally it will
# have a distinct name from the "3D" one, so it's safe to use even though
# it's based on UnitSphericalRepresentation.
storekdtree = catalogcoord.cache.get(storekdtree, storekdtree)
idx, sep2d, sep3d = match_coordinates_3d(newmatch_u, newcat_u, nthneighbor, storekdtree)
# sep3d is *wrong* above, because the distance information was removed,
# unless one of the catalogs doesn't have a real distance
if not (isinstance(catalogcoord.data, UnitSphericalRepresentation) or
isinstance(newmatch.data, UnitSphericalRepresentation)):
sep3d = catalogcoord[idx].separation_3d(newmatch)
# update the kdtree on the actual passed-in coordinate
if isinstance(storekdtree, str):
catalogcoord.cache[storekdtree] = newcat_u.cache[storekdtree]
elif storekdtree is True:
# the old backwards-compatible name
catalogcoord.cache['kdtree'] = newcat_u.cache['kdtree']
return idx, sep2d, sep3d
def search_around_3d(coords1, coords2, distlimit, storekdtree='kdtree_3d'):
"""
Searches for pairs of points that are at least as close as a specified
distance in 3D space.
This is intended for use on coordinate objects with arrays of coordinates,
not scalars. For scalar coordinates, it is better to use the
``separation_3d`` methods.
Parameters
----------
coords1 : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The first set of coordinates, which will be searched for matches from
``coords2`` within ``seplimit``. Cannot be a scalar coordinate.
coords2 : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The second set of coordinates, which will be searched for matches from
``coords1`` within ``seplimit``. Cannot be a scalar coordinate.
distlimit : `~astropy.units.Quantity` with distance units
The physical radius to search within.
storekdtree : bool or str, optional
If a string, will store the KD-Tree used in the search with the name
``storekdtree`` in ``coords2.cache``. This speeds up subsequent calls
to this function. If False, the KD-Trees are not saved.
Returns
-------
idx1 : integer array
Indices into ``coords1`` that matches to the corresponding element of
``idx2``. Shape matches ``idx2``.
idx2 : integer array
Indices into ``coords2`` that matches to the corresponding element of
``idx1``. Shape matches ``idx1``.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the coordinates. Shape matches ``idx1``
and ``idx2``.
dist3d : `~astropy.units.Quantity`
The 3D distance between the coordinates. Shape matches ``idx1`` and
``idx2``. The unit is that of ``coords1``.
Notes
-----
This function requires `SciPy <https://www.scipy.org/>`_ (>=0.12.0)
to be installed or it will fail.
If you are using this function to search in a catalog for matches around
specific points, the convention is for ``coords2`` to be the catalog, and
``coords1`` are the points to search around. While these operations are
mathematically the same if ``coords1`` and ``coords2`` are flipped, some of
the optimizations may work better if this convention is obeyed.
In the current implementation, the return values are always sorted in the
same order as the ``coords1`` (so ``idx1`` is in ascending order). This is
considered an implementation detail, though, so it could change in a future
release.
"""
if not distlimit.isscalar:
raise ValueError('distlimit must be a scalar in search_around_3d')
if coords1.isscalar or coords2.isscalar:
raise ValueError('One of the inputs to search_around_3d is a scalar. '
'search_around_3d is intended for use with array '
'coordinates, not scalars. Instead, use '
'``coord1.separation_3d(coord2) < distlimit`` to find '
'the coordinates near a scalar coordinate.')
if len(coords1) == 0 or len(coords2) == 0:
# Empty array input: return empty match
return (np.array([], dtype=int), np.array([], dtype=int),
Angle([], u.deg),
u.Quantity([], coords1.distance.unit))
kdt2 = _get_cartesian_kdtree(coords2, storekdtree)
cunit = coords2.cartesian.x.unit
# we convert coord1 to match coord2's frame. We do it this way
# so that if the conversion does happen, the KD tree of coord2 at least gets
# saved. (by convention, coord2 is the "catalog" if that makes sense)
coords1 = coords1.transform_to(coords2)
kdt1 = _get_cartesian_kdtree(coords1, storekdtree, forceunit=cunit)
# this is the *cartesian* 3D distance that corresponds to the given angle
d = distlimit.to_value(cunit)
idxs1 = []
idxs2 = []
for i, matches in enumerate(kdt1.query_ball_tree(kdt2, d)):
for match in matches:
idxs1.append(i)
idxs2.append(match)
idxs1 = np.array(idxs1, dtype=int)
idxs2 = np.array(idxs2, dtype=int)
if idxs1.size == 0:
d2ds = Angle([], u.deg)
d3ds = u.Quantity([], coords1.distance.unit)
else:
d2ds = coords1[idxs1].separation(coords2[idxs2])
d3ds = coords1[idxs1].separation_3d(coords2[idxs2])
return idxs1, idxs2, d2ds, d3ds
def search_around_sky(coords1, coords2, seplimit, storekdtree='kdtree_sky'):
"""
Searches for pairs of points that have an angular separation at least as
close as a specified angle.
This is intended for use on coordinate objects with arrays of coordinates,
not scalars. For scalar coordinates, it is better to use the ``separation``
methods.
Parameters
----------
coords1 : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The first set of coordinates, which will be searched for matches from
``coords2`` within ``seplimit``. Cannot be a scalar coordinate.
coords2 : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The second set of coordinates, which will be searched for matches from
``coords1`` within ``seplimit``. Cannot be a scalar coordinate.
seplimit : `~astropy.units.Quantity` with angle units
The on-sky separation to search within.
storekdtree : bool or str, optional
If a string, will store the KD-Tree used in the search with the name
``storekdtree`` in ``coords2.cache``. This speeds up subsequent calls
to this function. If False, the KD-Trees are not saved.
Returns
-------
idx1 : integer array
Indices into ``coords1`` that matches to the corresponding element of
``idx2``. Shape matches ``idx2``.
idx2 : integer array
Indices into ``coords2`` that matches to the corresponding element of
``idx1``. Shape matches ``idx1``.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the coordinates. Shape matches ``idx1``
and ``idx2``.
dist3d : `~astropy.units.Quantity`
The 3D distance between the coordinates. Shape matches ``idx1``
and ``idx2``; the unit is that of ``coords1``.
If either ``coords1`` or ``coords2`` don't have a distance,
this is the 3D distance on the unit sphere, rather than a
physical distance.
Notes
-----
This function requires `SciPy <https://www.scipy.org/>`_ (>=0.12.0)
to be installed or it will fail.
In the current implementation, the return values are always sorted in the
same order as the ``coords1`` (so ``idx1`` is in ascending order). This is
considered an implementation detail, though, so it could change in a future
release.
"""
if not seplimit.isscalar:
raise ValueError('seplimit must be a scalar in search_around_sky')
if coords1.isscalar or coords2.isscalar:
raise ValueError('One of the inputs to search_around_sky is a scalar. '
'search_around_sky is intended for use with array '
'coordinates, not scalars. Instead, use '
'``coord1.separation(coord2) < seplimit`` to find the '
'coordinates near a scalar coordinate.')
if len(coords1) == 0 or len(coords2) == 0:
# Empty array input: return empty match
if coords2.distance.unit == u.dimensionless_unscaled:
distunit = u.dimensionless_unscaled
else:
distunit = coords1.distance.unit
return (np.array([], dtype=int), np.array([], dtype=int),
Angle([], u.deg),
u.Quantity([], distunit))
# we convert coord1 to match coord2's frame. We do it this way
# so that if the conversion does happen, the KD tree of coord2 at least gets
# saved. (by convention, coord2 is the "catalog" if that makes sense)
coords1 = coords1.transform_to(coords2)
# strip out distance info
urepr1 = coords1.data.represent_as(UnitSphericalRepresentation)
ucoords1 = coords1.realize_frame(urepr1)
kdt1 = _get_cartesian_kdtree(ucoords1, storekdtree)
if storekdtree and coords2.cache.get(storekdtree):
# just use the stored KD-Tree
kdt2 = coords2.cache[storekdtree]
else:
# strip out distance info
urepr2 = coords2.data.represent_as(UnitSphericalRepresentation)
ucoords2 = coords2.realize_frame(urepr2)
kdt2 = _get_cartesian_kdtree(ucoords2, storekdtree)
if storekdtree:
# save the KD-Tree in coords2, *not* ucoords2
coords2.cache['kdtree' if storekdtree is True else storekdtree] = kdt2
# this is the *cartesian* 3D distance that corresponds to the given angle
r = (2 * np.sin(Angle(seplimit) / 2.0)).value
idxs1 = []
idxs2 = []
for i, matches in enumerate(kdt1.query_ball_tree(kdt2, r)):
for match in matches:
idxs1.append(i)
idxs2.append(match)
idxs1 = np.array(idxs1, dtype=int)
idxs2 = np.array(idxs2, dtype=int)
if idxs1.size == 0:
if coords2.distance.unit == u.dimensionless_unscaled:
distunit = u.dimensionless_unscaled
else:
distunit = coords1.distance.unit
d2ds = Angle([], u.deg)
d3ds = u.Quantity([], distunit)
else:
d2ds = coords1[idxs1].separation(coords2[idxs2])
try:
d3ds = coords1[idxs1].separation_3d(coords2[idxs2])
except ValueError:
# they don't have distances, so we just fall back on the cartesian
# distance, computed from d2ds
d3ds = 2 * np.sin(d2ds / 2.0)
return idxs1, idxs2, d2ds, d3ds
def _get_cartesian_kdtree(coord, attrname_or_kdt='kdtree', forceunit=None):
"""
This is a utility function to retrieve (and build/cache, if necessary)
a 3D cartesian KD-Tree from various sorts of astropy coordinate objects.
Parameters
----------
coord : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The coordinates to build the KD-Tree for.
attrname_or_kdt : bool or str or KDTree
If a string, will store the KD-Tree used for the computation in the
``coord``, in ``coord.cache`` with the provided name. If given as a
KD-Tree, it will just be used directly.
forceunit : unit or None
If a unit, the cartesian coordinates will convert to that unit before
being put in the KD-Tree. If None, whatever unit it's already in
will be used
Returns
-------
kdt : `~scipy.spatial.cKDTree` or `~scipy.spatial.KDTree`
The KD-Tree representing the 3D cartesian representation of the input
coordinates.
"""
from warnings import warn
# without scipy this will immediately fail
from scipy import spatial
try:
KDTree = spatial.cKDTree
except Exception:
warn('C-based KD tree not found, falling back on (much slower) '
'python implementation')
KDTree = spatial.KDTree
if attrname_or_kdt is True: # backwards compatibility for pre v0.4
attrname_or_kdt = 'kdtree'
# figure out where any cached KDTree might be
if isinstance(attrname_or_kdt, str):
kdt = coord.cache.get(attrname_or_kdt, None)
if kdt is not None and not isinstance(kdt, KDTree):
raise TypeError('The `attrname_or_kdt` "{0}" is not a scipy KD tree!'.format(attrname_or_kdt))
elif isinstance(attrname_or_kdt, KDTree):
kdt = attrname_or_kdt
attrname_or_kdt = None
elif not attrname_or_kdt:
kdt = None
else:
raise TypeError('Invalid `attrname_or_kdt` argument for KD-Tree:' +
str(attrname_or_kdt))
if kdt is None:
# need to build the cartesian KD-tree for the catalog
if forceunit is None:
cartxyz = coord.cartesian.xyz
else:
cartxyz = coord.cartesian.xyz.to(forceunit)
flatxyz = cartxyz.reshape((3, np.prod(cartxyz.shape) // 3))
try:
# Set compact_nodes=False, balanced_tree=False to use
# "sliding midpoint" rule, which is much faster than standard for
# many common use cases
kdt = KDTree(flatxyz.value.T, compact_nodes=False, balanced_tree=False)
except TypeError:
# Python implementation does not take compact_nodes and balanced_tree
# as arguments. However, it uses sliding midpoint rule by default
kdt = KDTree(flatxyz.value.T)
if attrname_or_kdt:
# cache the kdtree in `coord`
coord.cache[attrname_or_kdt] = kdt
return kdt
|
7664080e69fe2cd106f20d16b723f558fa0603cdf2a794825f19e89b19b02f50 |
import re
import copy
import numpy as np
from astropy import _erfa as erfa
from astropy.utils.compat.misc import override__dir__
from astropy import units as u
from astropy.constants import c as speed_of_light
from astropy.wcs.utils import skycoord_to_pixel, pixel_to_skycoord
from astropy.utils.data_info import MixinInfo
from astropy.utils import ShapedLikeNDArray
from astropy.time import Time
from .distances import Distance
from .angles import Angle
from .baseframe import (BaseCoordinateFrame, frame_transform_graph,
GenericFrame, _get_repr_cls)
from .builtin_frames import ICRS, SkyOffsetFrame
from .representation import (SphericalRepresentation,
UnitSphericalRepresentation, SphericalDifferential)
from .sky_coordinate_parsers import (_get_frame_class, _get_frame_without_data,
_parse_coordinate_data)
__all__ = ['SkyCoord', 'SkyCoordInfo']
class SkyCoordInfo(MixinInfo):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
attrs_from_parent = set(['unit']) # Unit is read-only
_supports_indexing = False
@staticmethod
def default_format(val):
repr_data = val.info._repr_data
formats = ['{0.' + compname + '.value:}' for compname
in repr_data.components]
return ','.join(formats).format(repr_data)
@property
def unit(self):
repr_data = self._repr_data
unit = ','.join(str(getattr(repr_data, comp).unit) or 'None'
for comp in repr_data.components)
return unit
@property
def _repr_data(self):
if self._parent is None:
return None
sc = self._parent
if (issubclass(sc.representation_type, SphericalRepresentation) and
isinstance(sc.data, UnitSphericalRepresentation)):
repr_data = sc.represent_as(sc.data.__class__, in_frame_units=True)
else:
repr_data = sc.represent_as(sc.representation_type,
in_frame_units=True)
return repr_data
def _represent_as_dict(self):
obj = self._parent
attrs = (list(obj.representation_component_names) +
list(frame_transform_graph.frame_attributes.keys()))
# Don't output distance if it is all unitless 1.0
if 'distance' in attrs and np.all(obj.distance == 1.0):
attrs.remove('distance')
self._represent_as_dict_attrs = attrs
out = super()._represent_as_dict()
out['representation_type'] = obj.representation_type.get_name()
out['frame'] = obj.frame.name
# Note that obj.info.unit is a fake composite unit (e.g. 'deg,deg,None'
# or None,None,m) and is not stored. The individual attributes have
# units.
return out
class SkyCoord(ShapedLikeNDArray):
"""High-level object providing a flexible interface for celestial coordinate
representation, manipulation, and transformation between systems.
The `SkyCoord` class accepts a wide variety of inputs for initialization. At
a minimum these must provide one or more celestial coordinate values with
unambiguous units. Inputs may be scalars or lists/tuples/arrays, yielding
scalar or array coordinates (can be checked via ``SkyCoord.isscalar``).
Typically one also specifies the coordinate frame, though this is not
required. The general pattern for spherical representations is::
SkyCoord(COORD, [FRAME], keyword_args ...)
SkyCoord(LON, LAT, [FRAME], keyword_args ...)
SkyCoord(LON, LAT, [DISTANCE], frame=FRAME, unit=UNIT, keyword_args ...)
SkyCoord([FRAME], <lon_attr>=LON, <lat_attr>=LAT, keyword_args ...)
It is also possible to input coordinate values in other representations
such as cartesian or cylindrical. In this case one includes the keyword
argument ``representation_type='cartesian'`` (for example) along with data
in ``x``, ``y``, and ``z``.
See also: http://docs.astropy.org/en/stable/coordinates/
Examples
--------
The examples below illustrate common ways of initializing a `SkyCoord`
object. For a complete description of the allowed syntax see the
full coordinates documentation. First some imports::
>>> from astropy.coordinates import SkyCoord # High-level coordinates
>>> from astropy.coordinates import ICRS, Galactic, FK4, FK5 # Low-level frames
>>> from astropy.coordinates import Angle, Latitude, Longitude # Angles
>>> import astropy.units as u
The coordinate values and frame specification can now be provided using
positional and keyword arguments::
>>> c = SkyCoord(10, 20, unit="deg") # defaults to ICRS frame
>>> c = SkyCoord([1, 2, 3], [-30, 45, 8], frame="icrs", unit="deg") # 3 coords
>>> coords = ["1:12:43.2 +1:12:43", "1 12 43.2 +1 12 43"]
>>> c = SkyCoord(coords, frame=FK4, unit=(u.deg, u.hourangle), obstime="J1992.21")
>>> c = SkyCoord("1h12m43.2s +1d12m43s", frame=Galactic) # Units from string
>>> c = SkyCoord(frame="galactic", l="1h12m43.2s", b="+1d12m43s")
>>> ra = Longitude([1, 2, 3], unit=u.deg) # Could also use Angle
>>> dec = np.array([4.5, 5.2, 6.3]) * u.deg # Astropy Quantity
>>> c = SkyCoord(ra, dec, frame='icrs')
>>> c = SkyCoord(frame=ICRS, ra=ra, dec=dec, obstime='2001-01-02T12:34:56')
>>> c = FK4(1 * u.deg, 2 * u.deg) # Uses defaults for obstime, equinox
>>> c = SkyCoord(c, obstime='J2010.11', equinox='B1965') # Override defaults
>>> c = SkyCoord(w=0, u=1, v=2, unit='kpc', frame='galactic',
... representation_type='cartesian')
>>> c = SkyCoord([ICRS(ra=1*u.deg, dec=2*u.deg), ICRS(ra=3*u.deg, dec=4*u.deg)])
Velocity components (proper motions or radial velocities) can also be
provided in a similar manner::
>>> c = SkyCoord(ra=1*u.deg, dec=2*u.deg, radial_velocity=10*u.km/u.s)
>>> c = SkyCoord(ra=1*u.deg, dec=2*u.deg, pm_ra_cosdec=2*u.mas/u.yr, pm_dec=1*u.mas/u.yr)
As shown, the frame can be a `~astropy.coordinates.BaseCoordinateFrame`
class or the corresponding string alias. The frame classes that are built in
to astropy are `ICRS`, `FK5`, `FK4`, `FK4NoETerms`, and `Galactic`.
The string aliases are simply lower-case versions of the class name, and
allow for creating a `SkyCoord` object and transforming frames without
explicitly importing the frame classes.
Parameters
----------
frame : `~astropy.coordinates.BaseCoordinateFrame` class or string, optional
Type of coordinate frame this `SkyCoord` should represent. Defaults to
to ICRS if not given or given as None.
unit : `~astropy.units.Unit`, string, or tuple of :class:`~astropy.units.Unit` or str, optional
Units for supplied ``LON`` and ``LAT`` values, respectively. If
only one unit is supplied then it applies to both ``LON`` and
``LAT``.
obstime : valid `~astropy.time.Time` initializer, optional
Time of observation
equinox : valid `~astropy.time.Time` initializer, optional
Coordinate frame equinox
representation_type : str or Representation class
Specifies the representation, e.g. 'spherical', 'cartesian', or
'cylindrical'. This affects the positional args and other keyword args
which must correspond to the given representation.
copy : bool, optional
If `True` (default), a copy of any coordinate data is made. This
argument can only be passed in as a keyword argument.
**keyword_args
Other keyword arguments as applicable for user-defined coordinate frames.
Common options include:
ra, dec : valid `~astropy.coordinates.Angle` initializer, optional
RA and Dec for frames where ``ra`` and ``dec`` are keys in the
frame's ``representation_component_names``, including `ICRS`,
`FK5`, `FK4`, and `FK4NoETerms`.
pm_ra_cosdec, pm_dec : `~astropy.units.Quantity`, optional
Proper motion components, in angle per time units.
l, b : valid `~astropy.coordinates.Angle` initializer, optional
Galactic ``l`` and ``b`` for for frames where ``l`` and ``b`` are
keys in the frame's ``representation_component_names``, including
the `Galactic` frame.
pm_l_cosb, pm_b : `~astropy.units.Quantity`, optional
Proper motion components in the `Galactic` frame, in angle per time
units.
x, y, z : float or `~astropy.units.Quantity`, optional
Cartesian coordinates values
u, v, w : float or `~astropy.units.Quantity`, optional
Cartesian coordinates values for the Galactic frame.
radial_velocity : `~astropy.units.Quantity`, optional
The component of the velocity along the line-of-sight (i.e., the
radial direction), in velocity units.
"""
# Declare that SkyCoord can be used as a Table column by defining the
# info property.
info = SkyCoordInfo()
def __init__(self, *args, copy=True, **kwargs):
# these are frame attributes set on this SkyCoord but *not* a part of
# the frame object this SkyCoord contains
self._extra_frameattr_names = set()
# If all that is passed in is a frame instance that already has data,
# we should bypass all of the parsing and logic below. This is here
# to make this the fastest way to create a SkyCoord instance. Many of
# the classmethods implemented for performance enhancements will use
# this as the initialization path
if (len(args) == 1 and len(kwargs) == 0 and
isinstance(args[0], (BaseCoordinateFrame, SkyCoord))):
coords = args[0]
if isinstance(coords, SkyCoord):
self._extra_frameattr_names = coords._extra_frameattr_names
self.info = coords.info
# Copy over any extra frame attributes
for attr_name in self._extra_frameattr_names:
# Setting it will also validate it.
setattr(self, attr_name, getattr(coords, attr_name))
coords = coords.frame
if not coords.has_data:
raise ValueError('Cannot initialize from a coordinate frame '
'instance without coordinate data')
if copy:
self._sky_coord_frame = coords.copy()
else:
self._sky_coord_frame = coords
else:
# Get the frame instance without coordinate data but with all frame
# attributes set - these could either have been passed in with the
# frame as an instance, or passed in as kwargs here
frame_cls, frame_kwargs = _get_frame_without_data(args, kwargs)
# Parse the args and kwargs to assemble a sanitized and validated
# kwargs dict for initializing attributes for this object and for
# creating the internal self._sky_coord_frame object
args = list(args) # Make it mutable
skycoord_kwargs, components, info = _parse_coordinate_data(
frame_cls(**frame_kwargs), args, kwargs)
# In the above two parsing functions, these kwargs were identified
# as valid frame attributes for *some* frame, but not the frame that
# this SkyCoord will have. We keep these attributes as special
# skycoord frame attributes:
for attr in skycoord_kwargs:
# Setting it will also validate it.
setattr(self, attr, skycoord_kwargs[attr])
if info is not None:
self.info = info
# Finally make the internal coordinate object.
frame_kwargs.update(components)
self._sky_coord_frame = frame_cls(copy=copy, **frame_kwargs)
if not self._sky_coord_frame.has_data:
raise ValueError('Cannot create a SkyCoord without data')
@property
def frame(self):
return self._sky_coord_frame
@property
def representation_type(self):
return self.frame.representation_type
@representation_type.setter
def representation_type(self, value):
self.frame.representation_type = value
# TODO: remove these in future
@property
def representation(self):
return self.frame.representation
@representation.setter
def representation(self, value):
self.frame.representation = value
@property
def shape(self):
return self.frame.shape
def _apply(self, method, *args, **kwargs):
"""Create a new instance, applying a method to the underlying data.
In typical usage, the method is any of the shape-changing methods for
`~numpy.ndarray` (``reshape``, ``swapaxes``, etc.), as well as those
picking particular elements (``__getitem__``, ``take``, etc.), which
are all defined in `~astropy.utils.misc.ShapedLikeNDArray`. It will be
applied to the underlying arrays in the representation (e.g., ``x``,
``y``, and ``z`` for `~astropy.coordinates.CartesianRepresentation`),
as well as to any frame attributes that have a shape, with the results
used to create a new instance.
Internally, it is also used to apply functions to the above parts
(in particular, `~numpy.broadcast_to`).
Parameters
----------
method : str or callable
If str, it is the name of a method that is applied to the internal
``components``. If callable, the function is applied.
args : tuple
Any positional arguments for ``method``.
kwargs : dict
Any keyword arguments for ``method``.
"""
def apply_method(value):
if isinstance(value, ShapedLikeNDArray):
return value._apply(method, *args, **kwargs)
else:
if callable(method):
return method(value, *args, **kwargs)
else:
return getattr(value, method)(*args, **kwargs)
# create a new but empty instance, and copy over stuff
new = super().__new__(self.__class__)
new._sky_coord_frame = self._sky_coord_frame._apply(method,
*args, **kwargs)
new._extra_frameattr_names = self._extra_frameattr_names.copy()
for attr in self._extra_frameattr_names:
value = getattr(self, attr)
if getattr(value, 'size', 1) > 1:
value = apply_method(value)
elif method == 'copy' or method == 'flatten':
# flatten should copy also for a single element array, but
# we cannot use it directly for array scalars, since it
# always returns a one-dimensional array. So, just copy.
value = copy.copy(value)
setattr(new, '_' + attr, value)
# Copy other 'info' attr only if it has actually been defined.
# See PR #3898 for further explanation and justification, along
# with Quantity.__array_finalize__
if 'info' in self.__dict__:
new.info = self.info
return new
def transform_to(self, frame, merge_attributes=True):
"""Transform this coordinate to a new frame.
The precise frame transformed to depends on ``merge_attributes``.
If `False`, the destination frame is used exactly as passed in.
But this is often not quite what one wants. E.g., suppose one wants to
transform an ICRS coordinate that has an obstime attribute to FK4; in
this case, one likely would want to use this information. Thus, the
default for ``merge_attributes`` is `True`, in which the precedence is
as follows: (1) explicitly set (i.e., non-default) values in the
destination frame; (2) explicitly set values in the source; (3) default
value in the destination frame.
Note that in either case, any explicitly set attributes on the source
`SkyCoord` that are not part of the destination frame's definition are
kept (stored on the resulting `SkyCoord`), and thus one can round-trip
(e.g., from FK4 to ICRS to FK4 without loosing obstime).
Parameters
----------
frame : str, `BaseCoordinateFrame` class or instance, or `SkyCoord` instance
The frame to transform this coordinate into. If a `SkyCoord`, the
underlying frame is extracted, and all other information ignored.
merge_attributes : bool, optional
Whether the default attributes in the destination frame are allowed
to be overridden by explicitly set attributes in the source
(see note above; default: `True`).
Returns
-------
coord : `SkyCoord`
A new object with this coordinate represented in the `frame` frame.
Raises
------
ValueError
If there is no possible transformation route.
"""
from astropy.coordinates.errors import ConvertError
frame_kwargs = {}
# Frame name (string) or frame class? Coerce into an instance.
try:
frame = _get_frame_class(frame)()
except Exception:
pass
if isinstance(frame, SkyCoord):
frame = frame.frame # Change to underlying coord frame instance
if isinstance(frame, BaseCoordinateFrame):
new_frame_cls = frame.__class__
# Get frame attributes, allowing defaults to be overridden by
# explicitly set attributes of the source if ``merge_attributes``.
for attr in frame_transform_graph.frame_attributes:
self_val = getattr(self, attr, None)
frame_val = getattr(frame, attr, None)
if (frame_val is not None and not
(merge_attributes and frame.is_frame_attr_default(attr))):
frame_kwargs[attr] = frame_val
elif (self_val is not None and
not self.is_frame_attr_default(attr)):
frame_kwargs[attr] = self_val
elif frame_val is not None:
frame_kwargs[attr] = frame_val
else:
raise ValueError('Transform `frame` must be a frame name, class, or instance')
# Get the composite transform to the new frame
trans = frame_transform_graph.get_transform(self.frame.__class__, new_frame_cls)
if trans is None:
raise ConvertError('Cannot transform from {0} to {1}'
.format(self.frame.__class__, new_frame_cls))
# Make a generic frame which will accept all the frame kwargs that
# are provided and allow for transforming through intermediate frames
# which may require one or more of those kwargs.
generic_frame = GenericFrame(frame_kwargs)
# Do the transformation, returning a coordinate frame of the desired
# final type (not generic).
new_coord = trans(self.frame, generic_frame)
# Finally make the new SkyCoord object from the `new_coord` and
# remaining frame_kwargs that are not frame_attributes in `new_coord`.
for attr in (set(new_coord.get_frame_attr_names()) &
set(frame_kwargs.keys())):
frame_kwargs.pop(attr)
return self.__class__(new_coord, **frame_kwargs)
def apply_space_motion(self, new_obstime=None, dt=None):
"""
Compute the position of the source represented by this coordinate object
to a new time using the velocities stored in this object and assuming
linear space motion (including relativistic corrections). This is
sometimes referred to as an "epoch transformation."
The initial time before the evolution is taken from the ``obstime``
attribute of this coordinate. Note that this method currently does not
support evolving coordinates where the *frame* has an ``obstime`` frame
attribute, so the ``obstime`` is only used for storing the before and
after times, not actually as an attribute of the frame. Alternatively,
if ``dt`` is given, an ``obstime`` need not be provided at all.
Parameters
----------
new_obstime : `~astropy.time.Time`, optional
The time at which to evolve the position to. Requires that the
``obstime`` attribute be present on this frame.
dt : `~astropy.units.Quantity`, `~astropy.time.TimeDelta`, optional
An amount of time to evolve the position of the source. Cannot be
given at the same time as ``new_obstime``.
Returns
-------
new_coord : `SkyCoord`
A new coordinate object with the evolved location of this coordinate
at the new time. ``obstime`` will be set on this object to the new
time only if ``self`` also has ``obstime``.
"""
if (new_obstime is None and dt is None or
new_obstime is not None and dt is not None):
raise ValueError("You must specify one of `new_obstime` or `dt`, "
"but not both.")
# Validate that we have velocity info
if 's' not in self.frame.data.differentials:
raise ValueError('SkyCoord requires velocity data to evolve the '
'position.')
if 'obstime' in self.frame.frame_attributes:
raise NotImplementedError("Updating the coordinates in a frame "
"with explicit time dependence is "
"currently not supported. If you would "
"like this functionality, please open an "
"issue on github:\n"
"https://github.com/astropy/astropy")
if new_obstime is not None and self.obstime is None:
# If no obstime is already on this object, raise an error if a new
# obstime is passed: we need to know the time / epoch at which the
# the position / velocity were measured initially
raise ValueError('This object has no associated `obstime`. '
'apply_space_motion() must receive a time '
'difference, `dt`, and not a new obstime.')
# Compute t1 and t2, the times used in the starpm call, which *only*
# uses them to compute a delta-time
t1 = self.obstime
if dt is None:
# self.obstime is not None and new_obstime is not None b/c of above
# checks
t2 = new_obstime
else:
# new_obstime is definitely None b/c of the above checks
if t1 is None:
# MAGIC NUMBER: if the current SkyCoord object has no obstime,
# assume J2000 to do the dt offset. This is not actually used
# for anything except a delta-t in starpm, so it's OK that it's
# not necessarily the "real" obstime
t1 = Time('J2000')
new_obstime = None # we don't actually know the inital obstime
t2 = t1 + dt
else:
t2 = t1 + dt
new_obstime = t2
# starpm wants tdb time
t1 = t1.tdb
t2 = t2.tdb
# proper motion in RA should not include the cos(dec) term, see the
# erfa function eraStarpv, comment (4). So we convert to the regular
# spherical differentials.
icrsrep = self.icrs.represent_as(SphericalRepresentation, SphericalDifferential)
icrsvel = icrsrep.differentials['s']
try:
plx = icrsrep.distance.to_value(u.arcsecond, u.parallax())
except u.UnitConversionError: # No distance: set to 0 by starpm convention
plx = 0.
try:
rv = icrsvel.d_distance.to_value(u.km/u.s)
except u.UnitConversionError: # No RV
rv = 0.
starpm = erfa.starpm(icrsrep.lon.radian, icrsrep.lat.radian,
icrsvel.d_lon.to_value(u.radian/u.yr),
icrsvel.d_lat.to_value(u.radian/u.yr),
plx, rv, t1.jd1, t1.jd2, t2.jd1, t2.jd2)
icrs2 = ICRS(ra=u.Quantity(starpm[0], u.radian, copy=False),
dec=u.Quantity(starpm[1], u.radian, copy=False),
pm_ra=u.Quantity(starpm[2], u.radian/u.yr, copy=False),
pm_dec=u.Quantity(starpm[3], u.radian/u.yr, copy=False),
distance=Distance(parallax=starpm[4] * u.arcsec, copy=False),
radial_velocity=u.Quantity(starpm[5], u.km/u.s, copy=False),
differential_type=SphericalDifferential)
# Update the obstime of the returned SkyCoord, and need to carry along
# the frame attributes
frattrs = {attrnm: getattr(self, attrnm)
for attrnm in self._extra_frameattr_names}
frattrs['obstime'] = new_obstime
return self.__class__(icrs2, **frattrs).transform_to(self.frame)
def __getattr__(self, attr):
"""
Overrides getattr to return coordinates that this can be transformed
to, based on the alias attr in the master transform graph.
"""
if '_sky_coord_frame' in self.__dict__:
if self.frame.name == attr:
return self # Should this be a deepcopy of self?
# Anything in the set of all possible frame_attr_names is handled
# here. If the attr is relevant for the current frame then delegate
# to self.frame otherwise get it from self._<attr>.
if attr in frame_transform_graph.frame_attributes:
if attr in self.frame.get_frame_attr_names():
return getattr(self.frame, attr)
else:
return getattr(self, '_' + attr, None)
# Some attributes might not fall in the above category but still
# are available through self._sky_coord_frame.
if not attr.startswith('_') and hasattr(self._sky_coord_frame, attr):
return getattr(self._sky_coord_frame, attr)
# Try to interpret as a new frame for transforming.
frame_cls = frame_transform_graph.lookup_name(attr)
if frame_cls is not None and self.frame.is_transformable_to(frame_cls):
return self.transform_to(attr)
# Fail
raise AttributeError("'{0}' object has no attribute '{1}'"
.format(self.__class__.__name__, attr))
def __setattr__(self, attr, val):
# This is to make anything available through __getattr__ immutable
if '_sky_coord_frame' in self.__dict__:
if self.frame.name == attr:
raise AttributeError("'{0}' is immutable".format(attr))
if not attr.startswith('_') and hasattr(self._sky_coord_frame, attr):
setattr(self._sky_coord_frame, attr, val)
return
frame_cls = frame_transform_graph.lookup_name(attr)
if frame_cls is not None and self.frame.is_transformable_to(frame_cls):
raise AttributeError("'{0}' is immutable".format(attr))
if attr in frame_transform_graph.frame_attributes:
# All possible frame attributes can be set, but only via a private
# variable. See __getattr__ above.
super().__setattr__('_' + attr, val)
# Validate it
frame_transform_graph.frame_attributes[attr].__get__(self)
# And add to set of extra attributes
self._extra_frameattr_names |= {attr}
else:
# Otherwise, do the standard Python attribute setting
super().__setattr__(attr, val)
def __delattr__(self, attr):
# mirror __setattr__ above
if '_sky_coord_frame' in self.__dict__:
if self.frame.name == attr:
raise AttributeError("'{0}' is immutable".format(attr))
if not attr.startswith('_') and hasattr(self._sky_coord_frame,
attr):
delattr(self._sky_coord_frame, attr)
return
frame_cls = frame_transform_graph.lookup_name(attr)
if frame_cls is not None and self.frame.is_transformable_to(frame_cls):
raise AttributeError("'{0}' is immutable".format(attr))
if attr in frame_transform_graph.frame_attributes:
# All possible frame attributes can be deleted, but need to remove
# the corresponding private variable. See __getattr__ above.
super().__delattr__('_' + attr)
# Also remove it from the set of extra attributes
self._extra_frameattr_names -= {attr}
else:
# Otherwise, do the standard Python attribute setting
super().__delattr__(attr)
@override__dir__
def __dir__(self):
"""
Override the builtin `dir` behavior to include:
- Transforms available by aliases
- Attribute / methods of the underlying self.frame object
"""
# determine the aliases that this can be transformed to.
dir_values = set()
for name in frame_transform_graph.get_names():
frame_cls = frame_transform_graph.lookup_name(name)
if self.frame.is_transformable_to(frame_cls):
dir_values.add(name)
# Add public attributes of self.frame
dir_values.update(set(attr for attr in dir(self.frame) if not attr.startswith('_')))
# Add all possible frame attributes
dir_values.update(frame_transform_graph.frame_attributes.keys())
return dir_values
def __repr__(self):
clsnm = self.__class__.__name__
coonm = self.frame.__class__.__name__
frameattrs = self.frame._frame_attrs_repr()
if frameattrs:
frameattrs = ': ' + frameattrs
data = self.frame._data_repr()
if data:
data = ': ' + data
return '<{clsnm} ({coonm}{frameattrs}){data}>'.format(**locals())
def to_string(self, style='decimal', **kwargs):
"""
A string representation of the coordinates.
The default styles definitions are::
'decimal': 'lat': {'decimal': True, 'unit': "deg"}
'lon': {'decimal': True, 'unit': "deg"}
'dms': 'lat': {'unit': "deg"}
'lon': {'unit': "deg"}
'hmsdms': 'lat': {'alwayssign': True, 'pad': True, 'unit': "deg"}
'lon': {'pad': True, 'unit': "hour"}
See :meth:`~astropy.coordinates.Angle.to_string` for details and
keyword arguments (the two angles forming the coordinates are are
both :class:`~astropy.coordinates.Angle` instances). Keyword
arguments have precedence over the style defaults and are passed
to :meth:`~astropy.coordinates.Angle.to_string`.
Parameters
----------
style : {'hmsdms', 'dms', 'decimal'}
The formatting specification to use. These encode the three most
common ways to represent coordinates. The default is `decimal`.
kwargs
Keyword args passed to :meth:`~astropy.coordinates.Angle.to_string`.
"""
sph_coord = self.frame.represent_as(SphericalRepresentation)
styles = {'hmsdms': {'lonargs': {'unit': u.hour, 'pad': True},
'latargs': {'unit': u.degree, 'pad': True, 'alwayssign': True}},
'dms': {'lonargs': {'unit': u.degree},
'latargs': {'unit': u.degree}},
'decimal': {'lonargs': {'unit': u.degree, 'decimal': True},
'latargs': {'unit': u.degree, 'decimal': True}}
}
lonargs = {}
latargs = {}
if style in styles:
lonargs.update(styles[style]['lonargs'])
latargs.update(styles[style]['latargs'])
else:
raise ValueError('Invalid style. Valid options are: {0}'.format(",".join(styles)))
lonargs.update(kwargs)
latargs.update(kwargs)
if np.isscalar(sph_coord.lon.value):
coord_string = (sph_coord.lon.to_string(**lonargs)
+ " " +
sph_coord.lat.to_string(**latargs))
else:
coord_string = []
for lonangle, latangle in zip(sph_coord.lon.ravel(), sph_coord.lat.ravel()):
coord_string += [(lonangle.to_string(**lonargs)
+ " " +
latangle.to_string(**latargs))]
if len(sph_coord.shape) > 1:
coord_string = np.array(coord_string).reshape(sph_coord.shape)
return coord_string
def is_equivalent_frame(self, other):
"""
Checks if this object's frame as the same as that of the ``other``
object.
To be the same frame, two objects must be the same frame class and have
the same frame attributes. For two `SkyCoord` objects, *all* of the
frame attributes have to match, not just those relevant for the object's
frame.
Parameters
----------
other : SkyCoord or BaseCoordinateFrame
The other object to check.
Returns
-------
isequiv : bool
True if the frames are the same, False if not.
Raises
------
TypeError
If ``other`` isn't a `SkyCoord` or a `BaseCoordinateFrame` or subclass.
"""
if isinstance(other, BaseCoordinateFrame):
return self.frame.is_equivalent_frame(other)
elif isinstance(other, SkyCoord):
if other.frame.name != self.frame.name:
return False
for fattrnm in frame_transform_graph.frame_attributes:
if np.any(getattr(self, fattrnm) != getattr(other, fattrnm)):
return False
return True
else:
# not a BaseCoordinateFrame nor a SkyCoord object
raise TypeError("Tried to do is_equivalent_frame on something that "
"isn't frame-like")
# High-level convenience methods
def separation(self, other):
"""
Computes on-sky separation between this coordinate and another.
.. note::
If the ``other`` coordinate object is in a different frame, it is
first transformed to the frame of this object. This can lead to
unintuitive behavior if not accounted for. Particularly of note is
that ``self.separation(other)`` and ``other.separation(self)`` may
not give the same answer in this case.
For more on how to use this (and related) functionality, see the
examples in :doc:`/coordinates/matchsep`.
Parameters
----------
other : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinate to get the separation to.
Returns
-------
sep : `~astropy.coordinates.Angle`
The on-sky separation between this and the ``other`` coordinate.
Notes
-----
The separation is calculated using the Vincenty formula, which
is stable at all locations, including poles and antipodes [1]_.
.. [1] https://en.wikipedia.org/wiki/Great-circle_distance
"""
from . import Angle
from .angle_utilities import angular_separation
if not self.is_equivalent_frame(other):
try:
other = other.transform_to(self, merge_attributes=False)
except TypeError:
raise TypeError('Can only get separation to another SkyCoord '
'or a coordinate frame with data')
lon1 = self.spherical.lon
lat1 = self.spherical.lat
lon2 = other.spherical.lon
lat2 = other.spherical.lat
# Get the separation as a Quantity, convert to Angle in degrees
sep = angular_separation(lon1, lat1, lon2, lat2)
return Angle(sep, unit=u.degree)
def separation_3d(self, other):
"""
Computes three dimensional separation between this coordinate
and another.
For more on how to use this (and related) functionality, see the
examples in :doc:`/coordinates/matchsep`.
Parameters
----------
other : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinate to get the separation to.
Returns
-------
sep : `~astropy.coordinates.Distance`
The real-space distance between these two coordinates.
Raises
------
ValueError
If this or the other coordinate do not have distances.
"""
if not self.is_equivalent_frame(other):
try:
other = other.transform_to(self, merge_attributes=False)
except TypeError:
raise TypeError('Can only get separation to another SkyCoord '
'or a coordinate frame with data')
if issubclass(self.data.__class__, UnitSphericalRepresentation):
raise ValueError('This object does not have a distance; cannot '
'compute 3d separation.')
if issubclass(other.data.__class__, UnitSphericalRepresentation):
raise ValueError('The other object does not have a distance; '
'cannot compute 3d separation.')
c1 = self.cartesian.without_differentials()
c2 = other.cartesian.without_differentials()
return Distance((c1 - c2).norm())
def spherical_offsets_to(self, tocoord):
r"""
Computes angular offsets to go *from* this coordinate *to* another.
Parameters
----------
tocoord : `~astropy.coordinates.BaseCoordinateFrame`
The coordinate to find the offset to.
Returns
-------
lon_offset : `~astropy.coordinates.Angle`
The angular offset in the longitude direction (i.e., RA for
equatorial coordinates).
lat_offset : `~astropy.coordinates.Angle`
The angular offset in the latitude direction (i.e., Dec for
equatorial coordinates).
Raises
------
ValueError
If the ``tocoord`` is not in the same frame as this one. This is
different from the behavior of the `separation`/`separation_3d`
methods because the offset components depend critically on the
specific choice of frame.
Notes
-----
This uses the sky offset frame machinery, and hence will produce a new
sky offset frame if one does not already exist for this object's frame
class.
See Also
--------
separation : for the *total* angular offset (not broken out into components).
position_angle : for the direction of the offset.
"""
if not self.is_equivalent_frame(tocoord):
raise ValueError('Tried to use spherical_offsets_to with two non-matching frames!')
aframe = self.skyoffset_frame()
acoord = tocoord.transform_to(aframe)
dlon = acoord.spherical.lon.view(Angle)
dlat = acoord.spherical.lat.view(Angle)
return dlon, dlat
def directional_offset_by(self, position_angle, separation):
"""
Computes coordinates at the given offset from this coordinate.
Parameters
----------
position_angle : `~astropy.coordinates.Angle`
position_angle of offset
separation : `~astropy.coordinates.Angle`
offset angular separation
Returns
-------
newpoints : `~astropy.coordinates.SkyCoord`
The coordinates for the location that corresponds to offsetting by
the given `position_angle` and `separation`.
Notes
-----
Returned SkyCoord frame retains only the frame attributes that are for
the resulting frame type. (e.g. if the input frame is
`~astropy.coordinates.ICRS`, an ``equinox`` value will be retained, but
an ``obstime`` will not.)
For a more complete set of transform offsets, use `~astropy.wcs.WCS`.
`~astropy.coordinates.SkyCoord.skyoffset_frame()` can also be used to
create a spherical frame with (lat=0, lon=0) at a reference point,
approximating an xy cartesian system for small offsets. This method
is distinct in that it is accurate on the sphere.
See Also
--------
position_angle : inverse operation for the ``position_angle`` component
separation : inverse operation for the ``separation`` component
"""
from . import angle_utilities
slat = self.represent_as(UnitSphericalRepresentation).lat
slon = self.represent_as(UnitSphericalRepresentation).lon
newlon, newlat = angle_utilities.offset_by(
lon=slon, lat=slat,
posang=position_angle, distance=separation)
return SkyCoord(newlon, newlat, frame=self.frame)
def match_to_catalog_sky(self, catalogcoord, nthneighbor=1):
"""
Finds the nearest on-sky matches of this coordinate in a set of
catalog coordinates.
For more on how to use this (and related) functionality, see the
examples in :doc:`/coordinates/matchsep`.
Parameters
----------
catalogcoord : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The base catalog in which to search for matches. Typically this
will be a coordinate object that is an array (i.e.,
``catalogcoord.isscalar == False``)
nthneighbor : int, optional
Which closest neighbor to search for. Typically ``1`` is
desired here, as that is correct for matching one set of
coordinates to another. The next likely use case is ``2``,
for matching a coordinate catalog against *itself* (``1``
is inappropriate because each point will find itself as the
closest match).
Returns
-------
idx : integer array
Indices into ``catalogcoord`` to get the matched points for
each of this object's coordinates. Shape matches this
object.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the closest match for each
element in this object in ``catalogcoord``. Shape matches
this object.
dist3d : `~astropy.units.Quantity`
The 3D distance between the closest match for each element
in this object in ``catalogcoord``. Shape matches this
object. Unless both this and ``catalogcoord`` have associated
distances, this quantity assumes that all sources are at a
distance of 1 (dimensionless).
Notes
-----
This method requires `SciPy <https://www.scipy.org/>`_ to be
installed or it will fail.
See Also
--------
astropy.coordinates.match_coordinates_sky
SkyCoord.match_to_catalog_3d
"""
from .matching import match_coordinates_sky
if (isinstance(catalogcoord, (SkyCoord, BaseCoordinateFrame))
and catalogcoord.has_data):
self_in_catalog_frame = self.transform_to(catalogcoord)
else:
raise TypeError('Can only get separation to another SkyCoord or a '
'coordinate frame with data')
res = match_coordinates_sky(self_in_catalog_frame, catalogcoord,
nthneighbor=nthneighbor,
storekdtree='_kdtree_sky')
return res
def match_to_catalog_3d(self, catalogcoord, nthneighbor=1):
"""
Finds the nearest 3-dimensional matches of this coordinate to a set
of catalog coordinates.
This finds the 3-dimensional closest neighbor, which is only different
from the on-sky distance if ``distance`` is set in this object or the
``catalogcoord`` object.
For more on how to use this (and related) functionality, see the
examples in :doc:`/coordinates/matchsep`.
Parameters
----------
catalogcoord : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The base catalog in which to search for matches. Typically this
will be a coordinate object that is an array (i.e.,
``catalogcoord.isscalar == False``)
nthneighbor : int, optional
Which closest neighbor to search for. Typically ``1`` is
desired here, as that is correct for matching one set of
coordinates to another. The next likely use case is
``2``, for matching a coordinate catalog against *itself*
(``1`` is inappropriate because each point will find
itself as the closest match).
Returns
-------
idx : integer array
Indices into ``catalogcoord`` to get the matched points for
each of this object's coordinates. Shape matches this
object.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the closest match for each
element in this object in ``catalogcoord``. Shape matches
this object.
dist3d : `~astropy.units.Quantity`
The 3D distance between the closest match for each element
in this object in ``catalogcoord``. Shape matches this
object.
Notes
-----
This method requires `SciPy <https://www.scipy.org/>`_ to be
installed or it will fail.
See Also
--------
astropy.coordinates.match_coordinates_3d
SkyCoord.match_to_catalog_sky
"""
from .matching import match_coordinates_3d
if (isinstance(catalogcoord, (SkyCoord, BaseCoordinateFrame))
and catalogcoord.has_data):
self_in_catalog_frame = self.transform_to(catalogcoord)
else:
raise TypeError('Can only get separation to another SkyCoord or a '
'coordinate frame with data')
res = match_coordinates_3d(self_in_catalog_frame, catalogcoord,
nthneighbor=nthneighbor,
storekdtree='_kdtree_3d')
return res
def search_around_sky(self, searcharoundcoords, seplimit):
"""
Searches for all coordinates in this object around a supplied set of
points within a given on-sky separation.
This is intended for use on `~astropy.coordinates.SkyCoord` objects
with coordinate arrays, rather than a scalar coordinate. For a scalar
coordinate, it is better to use
`~astropy.coordinates.SkyCoord.separation`.
For more on how to use this (and related) functionality, see the
examples in :doc:`/coordinates/matchsep`.
Parameters
----------
searcharoundcoords : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinates to search around to try to find matching points in
this `SkyCoord`. This should be an object with array coordinates,
not a scalar coordinate object.
seplimit : `~astropy.units.Quantity` with angle units
The on-sky separation to search within.
Returns
-------
idxsearcharound : integer array
Indices into ``searcharoundcoords`` that match the
corresponding elements of ``idxself``. Shape matches
``idxself``.
idxself : integer array
Indices into ``self`` that match the
corresponding elements of ``idxsearcharound``. Shape matches
``idxsearcharound``.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the coordinates. Shape matches
``idxsearcharound`` and ``idxself``.
dist3d : `~astropy.units.Quantity`
The 3D distance between the coordinates. Shape matches
``idxsearcharound`` and ``idxself``.
Notes
-----
This method requires `SciPy <https://www.scipy.org/>`_ (>=0.12.0) to be
installed or it will fail.
In the current implementation, the return values are always sorted in
the same order as the ``searcharoundcoords`` (so ``idxsearcharound`` is
in ascending order). This is considered an implementation detail,
though, so it could change in a future release.
See Also
--------
astropy.coordinates.search_around_sky
SkyCoord.search_around_3d
"""
from .matching import search_around_sky
return search_around_sky(searcharoundcoords, self, seplimit,
storekdtree='_kdtree_sky')
def search_around_3d(self, searcharoundcoords, distlimit):
"""
Searches for all coordinates in this object around a supplied set of
points within a given 3D radius.
This is intended for use on `~astropy.coordinates.SkyCoord` objects
with coordinate arrays, rather than a scalar coordinate. For a scalar
coordinate, it is better to use
`~astropy.coordinates.SkyCoord.separation_3d`.
For more on how to use this (and related) functionality, see the
examples in :doc:`/coordinates/matchsep`.
Parameters
----------
searcharoundcoords : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinates to search around to try to find matching points in
this `SkyCoord`. This should be an object with array coordinates,
not a scalar coordinate object.
distlimit : `~astropy.units.Quantity` with distance units
The physical radius to search within.
Returns
-------
idxsearcharound : integer array
Indices into ``searcharoundcoords`` that match the
corresponding elements of ``idxself``. Shape matches
``idxself``.
idxself : integer array
Indices into ``self`` that match the
corresponding elements of ``idxsearcharound``. Shape matches
``idxsearcharound``.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the coordinates. Shape matches
``idxsearcharound`` and ``idxself``.
dist3d : `~astropy.units.Quantity`
The 3D distance between the coordinates. Shape matches
``idxsearcharound`` and ``idxself``.
Notes
-----
This method requires `SciPy <https://www.scipy.org/>`_ (>=0.12.0) to be
installed or it will fail.
In the current implementation, the return values are always sorted in
the same order as the ``searcharoundcoords`` (so ``idxsearcharound`` is
in ascending order). This is considered an implementation detail,
though, so it could change in a future release.
See Also
--------
astropy.coordinates.search_around_3d
SkyCoord.search_around_sky
"""
from .matching import search_around_3d
return search_around_3d(searcharoundcoords, self, distlimit,
storekdtree='_kdtree_3d')
def position_angle(self, other):
"""
Computes the on-sky position angle (East of North) between this
`SkyCoord` and another.
Parameters
----------
other : `SkyCoord`
The other coordinate to compute the position angle to. It is
treated as the "head" of the vector of the position angle.
Returns
-------
pa : `~astropy.coordinates.Angle`
The (positive) position angle of the vector pointing from ``self``
to ``other``. If either ``self`` or ``other`` contain arrays, this
will be an array following the appropriate `numpy` broadcasting
rules.
Examples
--------
>>> c1 = SkyCoord(0*u.deg, 0*u.deg)
>>> c2 = SkyCoord(1*u.deg, 0*u.deg)
>>> c1.position_angle(c2).degree
90.0
>>> c3 = SkyCoord(1*u.deg, 1*u.deg)
>>> c1.position_angle(c3).degree # doctest: +FLOAT_CMP
44.995636455344844
"""
from . import angle_utilities
if not self.is_equivalent_frame(other):
try:
other = other.transform_to(self, merge_attributes=False)
except TypeError:
raise TypeError('Can only get position_angle to another '
'SkyCoord or a coordinate frame with data')
slat = self.represent_as(UnitSphericalRepresentation).lat
slon = self.represent_as(UnitSphericalRepresentation).lon
olat = other.represent_as(UnitSphericalRepresentation).lat
olon = other.represent_as(UnitSphericalRepresentation).lon
return angle_utilities.position_angle(slon, slat, olon, olat)
def skyoffset_frame(self, rotation=None):
"""
Returns the sky offset frame with this `SkyCoord` at the origin.
Returns
-------
astrframe : `~astropy.coordinates.SkyOffsetFrame`
A sky offset frame of the same type as this `SkyCoord` (e.g., if
this object has an ICRS coordinate, the resulting frame is
SkyOffsetICRS, with the origin set to this object)
rotation : `~astropy.coordinates.Angle` or `~astropy.units.Quantity` with angle units
The final rotation of the frame about the ``origin``. The sign of
the rotation is the left-hand rule. That is, an object at a
particular position angle in the un-rotated system will be sent to
the positive latitude (z) direction in the final frame.
"""
return SkyOffsetFrame(origin=self, rotation=rotation)
def get_constellation(self, short_name=False, constellation_list='iau'):
"""
Determines the constellation(s) of the coordinates this `SkyCoord`
contains.
Parameters
----------
short_name : bool
If True, the returned names are the IAU-sanctioned abbreviated
names. Otherwise, full names for the constellations are used.
constellation_list : str
The set of constellations to use. Currently only ``'iau'`` is
supported, meaning the 88 "modern" constellations endorsed by the IAU.
Returns
-------
constellation : str or string array
If this is a scalar coordinate, returns the name of the
constellation. If it is an array `SkyCoord`, it returns an array of
names.
Notes
-----
To determine which constellation a point on the sky is in, this first
precesses to B1875, and then uses the Delporte boundaries of the 88
modern constellations, as tabulated by
`Roman 1987 <http://cdsarc.u-strasbg.fr/viz-bin/Cat?VI/42>`_.
See Also
--------
astropy.coordinates.get_constellation
"""
from .funcs import get_constellation
# because of issue #7028, the conversion to a PrecessedGeocentric
# system fails in some cases. Work around is to drop the velocities.
# they are not needed here since only position infromation is used
extra_frameattrs = {nm: getattr(self, nm)
for nm in self._extra_frameattr_names}
novel = SkyCoord(self.realize_frame(self.data.without_differentials()),
**extra_frameattrs)
return get_constellation(novel, short_name, constellation_list)
# the simpler version below can be used when gh-issue #7028 is resolved
#return get_constellation(self, short_name, constellation_list)
# WCS pixel to/from sky conversions
def to_pixel(self, wcs, origin=0, mode='all'):
"""
Convert this coordinate to pixel coordinates using a `~astropy.wcs.WCS`
object.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS to use for convert
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
Returns
-------
xp, yp : `numpy.ndarray`
The pixel coordinates
See Also
--------
astropy.wcs.utils.skycoord_to_pixel : the implementation of this method
"""
return skycoord_to_pixel(self, wcs=wcs, origin=origin, mode=mode)
@classmethod
def from_pixel(cls, xp, yp, wcs, origin=0, mode='all'):
"""
Create a new `SkyCoord` from pixel coordinates using an
`~astropy.wcs.WCS` object.
Parameters
----------
xp, yp : float or `numpy.ndarray`
The coordinates to convert.
wcs : `~astropy.wcs.WCS`
The WCS to use for convert
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
Returns
-------
coord : an instance of this class
A new object with sky coordinates corresponding to the input ``xp``
and ``yp``.
See Also
--------
to_pixel : to do the inverse operation
astropy.wcs.utils.pixel_to_skycoord : the implementation of this method
"""
return pixel_to_skycoord(xp, yp, wcs=wcs, origin=origin, mode=mode, cls=cls)
def contained_by(self, wcs, image=None, **kwargs):
"""
Determines if the SkyCoord is contained in the given wcs footprint.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The coordinate to check if it is within the wcs coordinate.
image : array
Optional. The image associated with the wcs object that the cooordinate
is being checked against. If not given the naxis keywords will be used
to determine if the coordinate falls within the wcs footprint.
**kwargs :
Additional arguments to pass to `~astropy.coordinates.SkyCoord.to_pixel`
Returns
-------
response : bool
True means the WCS footprint contains the coordinate, False means it does not.
"""
if image is not None:
ymax,xmax = image.shape
else:
xmax,ymax = wcs._naxis
import warnings
with warnings.catch_warnings():
# Suppress warnings since they just mean we didn't find the coordinate
warnings.simplefilter("ignore")
try:
x,y = self.to_pixel(wcs, **kwargs)
except Exception:
return False
return (x < xmax) & (x > 0) & (y < ymax) & (y > 0)
def radial_velocity_correction(self, kind='barycentric', obstime=None,
location=None):
"""
Compute the correction required to convert a radial velocity at a given
time and place on the Earth's Surface to a barycentric or heliocentric
velocity.
Parameters
----------
kind : str
The kind of velocity correction. Must be 'barycentric' or
'heliocentric'.
obstime : `~astropy.time.Time` or None, optional
The time at which to compute the correction. If `None`, the
``obstime`` frame attribute on the `SkyCoord` will be used.
location : `~astropy.coordinates.EarthLocation` or None, optional
The observer location at which to compute the correction. If
`None`, the ``location`` frame attribute on the passed-in
``obstime`` will be used, and if that is None, the ``location``
frame attribute on the `SkyCoord` will be used.
Raises
------
ValueError
If either ``obstime`` or ``location`` are passed in (not ``None``)
when the frame attribute is already set on this `SkyCoord`.
TypeError
If ``obstime`` or ``location`` aren't provided, either as arguments
or as frame attributes.
Returns
-------
vcorr : `~astropy.units.Quantity` with velocity units
The correction with a positive sign. I.e., *add* this
to an observed radial velocity to get the barycentric (or
heliocentric) velocity. If m/s precision or better is needed,
see the notes below.
Notes
-----
The barycentric correction is calculated to higher precision than the
heliocentric correction and includes additional physics (e.g time dilation).
Use barycentric corrections if m/s precision is required.
The algorithm here is sufficient to perform corrections at the mm/s level, but
care is needed in application. Strictly speaking, the barycentric correction is
multiplicative and should be applied as::
sc = SkyCoord(1*u.deg, 2*u.deg)
vcorr = sc.rv_correction(kind='barycentric', obstime=t, location=loc)
rv = rv + vcorr + rv * vcorr / consts.c
If your target is nearby and/or has finite proper motion you may need to account
for terms arising from this. See Wright & Eastmann (2014) for details.
The default is for this method to use the builtin ephemeris for
computing the sun and earth location. Other ephemerides can be chosen
by setting the `~astropy.coordinates.solar_system_ephemeris` variable,
either directly or via ``with`` statement. For example, to use the JPL
ephemeris, do::
sc = SkyCoord(1*u.deg, 2*u.deg)
with coord.solar_system_ephemeris.set('jpl'):
rv += sc.rv_correction(obstime=t, location=loc)
"""
# has to be here to prevent circular imports
from .solar_system import get_body_barycentric_posvel, get_body_barycentric
# location validation
timeloc = getattr(obstime, 'location', None)
if location is None:
if self.location is not None:
location = self.location
if timeloc is not None:
raise ValueError('`location` cannot be in both the '
'passed-in `obstime` and this `SkyCoord` '
'because it is ambiguous which is meant '
'for the radial_velocity_correction.')
elif timeloc is not None:
location = timeloc
else:
raise TypeError('Must provide a `location` to '
'radial_velocity_correction, either as a '
'SkyCoord frame attribute, as an attribute on '
'the passed in `obstime`, or in the method '
'call.')
elif self.location is not None or timeloc is not None:
raise ValueError('Cannot compute radial velocity correction if '
'`location` argument is passed in and there is '
'also a `location` attribute on this SkyCoord or '
'the passed-in `obstime`.')
# obstime validation
if obstime is None:
obstime = self.obstime
if obstime is None:
raise TypeError('Must provide an `obstime` to '
'radial_velocity_correction, either as a '
'SkyCoord frame attribute or in the method '
'call.')
elif self.obstime is not None:
raise ValueError('Cannot compute radial velocity correction if '
'`obstime` argument is passed in and it is '
'inconsistent with the `obstime` frame '
'attribute on the SkyCoord')
pos_earth, v_earth = get_body_barycentric_posvel('earth', obstime)
if kind == 'barycentric':
v_origin_to_earth = v_earth
elif kind == 'heliocentric':
v_sun = get_body_barycentric_posvel('sun', obstime)[1]
v_origin_to_earth = v_earth - v_sun
else:
raise ValueError("`kind` argument to radial_velocity_correction must "
"be 'barycentric' or 'heliocentric', but got "
"'{}'".format(kind))
gcrs_p, gcrs_v = location.get_gcrs_posvel(obstime)
# transforming to GCRS is not the correct thing to do here, since we don't want to
# include aberration (or light deflection)? Instead, only apply parallax if necessary
if self.data.__class__ is UnitSphericalRepresentation:
targcart = self.icrs.cartesian
else:
# skycoord has distances so apply parallax
obs_icrs_cart = pos_earth + gcrs_p
icrs_cart = self.icrs.cartesian
targcart = icrs_cart - obs_icrs_cart
targcart /= targcart.norm()
if kind == 'barycentric':
beta_obs = (v_origin_to_earth + gcrs_v) / speed_of_light
gamma_obs = 1 / np.sqrt(1 - beta_obs.norm()**2)
gr = location.gravitational_redshift(obstime)
# barycentric redshift according to eq 28 in Wright & Eastmann (2014),
# neglecting Shapiro delay and effects of the star's own motion
zb = gamma_obs * (1 + targcart.dot(beta_obs)) / (1 + gr/speed_of_light) - 1
return zb * speed_of_light
else:
# do a simpler correction ignoring time dilation and gravitational redshift
# this is adequate since Heliocentric corrections shouldn't be used if
# cm/s precision is required.
return targcart.dot(v_origin_to_earth + gcrs_v)
# Table interactions
@classmethod
def guess_from_table(cls, table, **coord_kwargs):
r"""
A convenience method to create and return a new `SkyCoord` from the data
in an astropy Table.
This method matches table columns that start with the case-insensitive
names of the the components of the requested frames, if they are also
followed by a non-alphanumeric character. It will also match columns
that *end* with the component name if a non-alphanumeric character is
*before* it.
For example, the first rule means columns with names like
``'RA[J2000]'`` or ``'ra'`` will be interpreted as ``ra`` attributes for
`~astropy.coordinates.ICRS` frames, but ``'RAJ2000'`` or ``'radius'``
are *not*. Similarly, the second rule applied to the
`~astropy.coordinates.Galactic` frame means that a column named
``'gal_l'`` will be used as the the ``l`` component, but ``gall`` or
``'fill'`` will not.
The definition of alphanumeric here is based on Unicode's definition
of alphanumeric, except without ``_`` (which is normally considered
alphanumeric). So for ASCII, this means the non-alphanumeric characters
are ``<space>_!"#$%&'()*+,-./\:;<=>?@[]^`{|}~``).
Parameters
----------
table : astropy.Table
The table to load data from.
coord_kwargs
Any additional keyword arguments are passed directly to this class's
constructor.
Returns
-------
newsc : same as this class
The new `SkyCoord` (or subclass) object.
"""
_frame_cls, _frame_kwargs = _get_frame_without_data([], coord_kwargs)
frame = _frame_cls(**_frame_kwargs)
coord_kwargs['frame'] = coord_kwargs.get('frame', frame)
comp_kwargs = {}
for comp_name in frame.representation_component_names:
# this matches things like 'ra[...]'' but *not* 'rad'.
# note that the "_" must be in there explicitly, because
# "alphanumeric" usually includes underscores.
starts_with_comp = comp_name + r'(\W|\b|_)'
# this part matches stuff like 'center_ra', but *not*
# 'aura'
ends_with_comp = r'.*(\W|\b|_)' + comp_name + r'\b'
# the final regex ORs together the two patterns
rex = re.compile('(' + starts_with_comp + ')|(' + ends_with_comp + ')',
re.IGNORECASE | re.UNICODE)
for col_name in table.colnames:
if rex.match(col_name):
if comp_name in comp_kwargs:
oldname = comp_kwargs[comp_name].name
msg = ('Found at least two matches for component "{0}"'
': "{1}" and "{2}". Cannot continue with this '
'ambiguity.')
raise ValueError(msg.format(comp_name, oldname, col_name))
comp_kwargs[comp_name] = table[col_name]
for k, v in comp_kwargs.items():
if k in coord_kwargs:
raise ValueError('Found column "{0}" in table, but it was '
'already provided as "{1}" keyword to '
'guess_from_table function.'.format(v.name, k))
else:
coord_kwargs[k] = v
return cls(**coord_kwargs)
# Name resolve
@classmethod
def from_name(cls, name, frame='icrs', parse=False):
"""
Given a name, query the CDS name resolver to attempt to retrieve
coordinate information for that object. The search database, sesame
url, and query timeout can be set through configuration items in
``astropy.coordinates.name_resolve`` -- see docstring for
`~astropy.coordinates.get_icrs_coordinates` for more
information.
Parameters
----------
name : str
The name of the object to get coordinates for, e.g. ``'M42'``.
frame : str or `BaseCoordinateFrame` class or instance
The frame to transform the object to.
parse: bool
Whether to attempt extracting the coordinates from the name by
parsing with a regex. For objects catalog names that have
J-coordinates embedded in their names eg:
'CRTS SSS100805 J194428-420209', this may be much faster than a
sesame query for the same object name. The coordinates extracted
in this way may differ from the database coordinates by a few
deci-arcseconds, so only use this option if you do not need
sub-arcsecond accuracy for coordinates.
Returns
-------
coord : SkyCoord
Instance of the SkyCoord class.
"""
from .name_resolve import get_icrs_coordinates
icrs_coord = get_icrs_coordinates(name, parse)
icrs_sky_coord = cls(icrs_coord)
if frame in ('icrs', icrs_coord.__class__):
return icrs_sky_coord
else:
return icrs_sky_coord.transform_to(frame)
|
0ae5e38c89d19b9707fb27181bce213c495aefe5b42ca6bfc21a9282f5a0e66b | """
In this module, we define the coordinate representation classes, which are
used to represent low-level cartesian, spherical, cylindrical, and other
coordinates.
"""
import abc
import functools
import operator
from collections import OrderedDict
import inspect
import warnings
import numpy as np
import astropy.units as u
from .angles import Angle, Longitude, Latitude
from .distances import Distance
from astropy._erfa import ufunc as erfa_ufunc
from astropy.utils import ShapedLikeNDArray, classproperty
from astropy.utils import deprecated_attribute
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy.utils.misc import InheritDocstrings
from astropy.utils.compat import NUMPY_LT_1_14
__all__ = ["BaseRepresentationOrDifferential", "BaseRepresentation",
"CartesianRepresentation", "SphericalRepresentation",
"UnitSphericalRepresentation", "RadialRepresentation",
"PhysicsSphericalRepresentation", "CylindricalRepresentation",
"BaseDifferential", "CartesianDifferential",
"BaseSphericalDifferential", "BaseSphericalCosLatDifferential",
"SphericalDifferential", "SphericalCosLatDifferential",
"UnitSphericalDifferential", "UnitSphericalCosLatDifferential",
"RadialDifferential", "CylindricalDifferential",
"PhysicsSphericalDifferential"]
# Module-level dict mapping representation string alias names to classes.
# This is populated by the metaclass init so all representation and differential
# classes get registered automatically.
REPRESENTATION_CLASSES = {}
DIFFERENTIAL_CLASSES = {}
# a hash for the content of the above two dicts, cached for speed.
_REPRDIFF_HASH = None
def get_reprdiff_cls_hash():
"""
Returns a hash value that should be invariable if the
`REPRESENTATION_CLASSES` and `DIFFERENTIAL_CLASSES` dictionaries have not
changed.
"""
global _REPRDIFF_HASH
if _REPRDIFF_HASH is None:
_REPRDIFF_HASH = (hash(tuple(REPRESENTATION_CLASSES.items())) +
hash(tuple(DIFFERENTIAL_CLASSES.items())) )
return _REPRDIFF_HASH
def _invalidate_reprdiff_cls_hash():
global _REPRDIFF_HASH
_REPRDIFF_HASH = None
# recommended_units deprecation message; if the attribute is removed later,
# also remove its use in BaseFrame._get_representation_info.
_recommended_units_deprecation = """
The 'recommended_units' attribute is deprecated since 3.0 and may be removed
in a future version. Its main use, of representing angles in degrees in frames,
is now done automatically in frames. Further overrides are discouraged but can
be done using a frame's ``frame_specific_representation_info``.
"""
def _array2string(values, prefix=''):
# Work around version differences for array2string.
kwargs = {'separator': ', ', 'prefix': prefix}
kwargs['formatter'] = {}
if NUMPY_LT_1_14: # in 1.14, style is no longer used (and deprecated)
kwargs['style'] = repr
return np.array2string(values, **kwargs)
def _combine_xyz(x, y, z, xyz_axis=0):
"""
Combine components ``x``, ``y``, ``z`` into a single Quantity array.
Parameters
----------
x, y, z : `~astropy.units.Quantity`
The individual x, y, and z components.
xyz_axis : int, optional
The axis in the final array along which the x, y, z components
should be stored (default: 0).
Returns
-------
xyz : `~astropy.units.Quantity`
With dimension 3 along ``xyz_axis``, i.e., using the default of ``0``,
the shape will be ``(3,) + x.shape``.
"""
# Get x, y, z to the same units (this is very fast for identical units)
# since np.stack cannot deal with quantity.
cls = x.__class__
unit = x.unit
x = x.value
y = y.to_value(unit)
z = z.to_value(unit)
xyz = np.stack([x, y, z], axis=xyz_axis)
return cls(xyz, unit=unit, copy=False)
class BaseRepresentationOrDifferential(ShapedLikeNDArray):
"""3D coordinate representations and differentials.
Parameters
----------
comp1, comp2, comp3 : `~astropy.units.Quantity` or subclass
The components of the 3D point or differential. The names are the
keys and the subclasses the values of the ``attr_classes`` attribute.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
"""
# Ensure multiplication/division with ndarray or Quantity doesn't lead to
# object arrays.
__array_priority__ = 50000
def __init__(self, *args, **kwargs):
# make argument a list, so we can pop them off.
args = list(args)
components = self.components
attrs = []
for component in components:
try:
attrs.append(args.pop(0) if args else kwargs.pop(component))
except KeyError:
raise TypeError('__init__() missing 1 required positional '
'argument: {0!r}'.format(component))
copy = args.pop(0) if args else kwargs.pop('copy', True)
if args:
raise TypeError('unexpected arguments: {0}'.format(args))
if kwargs:
for component in components:
if component in kwargs:
raise TypeError("__init__() got multiple values for "
"argument {0!r}".format(component))
raise TypeError('unexpected keyword arguments: {0}'.format(kwargs))
# Pass attributes through the required initializing classes.
attrs = [self.attr_classes[component](attr, copy=copy)
for component, attr in zip(components, attrs)]
try:
attrs = np.broadcast_arrays(*attrs, subok=True)
except ValueError:
if len(components) <= 2:
c_str = ' and '.join(components)
else:
c_str = ', '.join(components[:2]) + ', and ' + components[2]
raise ValueError("Input parameters {0} cannot be broadcast"
.format(c_str))
# Set private attributes for the attributes. (If not defined explicitly
# on the class, the metaclass will define properties to access these.)
for component, attr in zip(components, attrs):
setattr(self, '_' + component, attr)
@classmethod
def get_name(cls):
"""Name of the representation or differential.
In lower case, with any trailing 'representation' or 'differential'
removed. (E.g., 'spherical' for
`~astropy.coordinates.SphericalRepresentation` or
`~astropy.coordinates.SphericalDifferential`.)
"""
name = cls.__name__.lower()
if name.endswith('representation'):
name = name[:-14]
elif name.endswith('differential'):
name = name[:-12]
return name
# The two methods that any subclass has to define.
@classmethod
@abc.abstractmethod
def from_cartesian(cls, other):
"""Create a representation of this class from a supplied Cartesian one.
Parameters
----------
other : `CartesianRepresentation`
The representation to turn into this class
Returns
-------
representation : object of this class
A new representation of this class's type.
"""
# Note: the above docstring gets overridden for differentials.
raise NotImplementedError()
@abc.abstractmethod
def to_cartesian(self):
"""Convert the representation to its Cartesian form.
Note that any differentials get dropped.
Returns
-------
cartrepr : `CartesianRepresentation`
The representation in Cartesian form.
"""
# Note: the above docstring gets overridden for differentials.
raise NotImplementedError()
@property
def components(self):
"""A tuple with the in-order names of the coordinate components."""
return tuple(self.attr_classes)
def _apply(self, method, *args, **kwargs):
"""Create a new representation or differential with ``method`` applied
to the component data.
In typical usage, the method is any of the shape-changing methods for
`~numpy.ndarray` (``reshape``, ``swapaxes``, etc.), as well as those
picking particular elements (``__getitem__``, ``take``, etc.), which
are all defined in `~astropy.utils.misc.ShapedLikeNDArray`. It will be
applied to the underlying arrays (e.g., ``x``, ``y``, and ``z`` for
`~astropy.coordinates.CartesianRepresentation`), with the results used
to create a new instance.
Internally, it is also used to apply functions to the components
(in particular, `~numpy.broadcast_to`).
Parameters
----------
method : str or callable
If str, it is the name of a method that is applied to the internal
``components``. If callable, the function is applied.
args : tuple
Any positional arguments for ``method``.
kwargs : dict
Any keyword arguments for ``method``.
"""
if callable(method):
apply_method = lambda array: method(array, *args, **kwargs)
else:
apply_method = operator.methodcaller(method, *args, **kwargs)
new = super().__new__(self.__class__)
for component in self.components:
setattr(new, '_' + component,
apply_method(getattr(self, component)))
return new
@property
def shape(self):
"""The shape of the instance and underlying arrays.
Like `~numpy.ndarray.shape`, can be set to a new shape by assigning a
tuple. Note that if different instances share some but not all
underlying data, setting the shape of one instance can make the other
instance unusable. Hence, it is strongly recommended to get new,
reshaped instances with the ``reshape`` method.
Raises
------
AttributeError
If the shape of any of the components cannot be changed without the
arrays being copied. For these cases, use the ``reshape`` method
(which copies any arrays that cannot be reshaped in-place).
"""
return getattr(self, self.components[0]).shape
@shape.setter
def shape(self, shape):
# We keep track of arrays that were already reshaped since we may have
# to return those to their original shape if a later shape-setting
# fails. (This can happen since coordinates are broadcast together.)
reshaped = []
oldshape = self.shape
for component in self.components:
val = getattr(self, component)
if val.size > 1:
try:
val.shape = shape
except AttributeError:
for val2 in reshaped:
val2.shape = oldshape
raise
else:
reshaped.append(val)
# Required to support multiplication and division, and defined by the base
# representation and differential classes.
@abc.abstractmethod
def _scale_operation(self, op, *args):
raise NotImplementedError()
def __mul__(self, other):
return self._scale_operation(operator.mul, other)
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
return self._scale_operation(operator.truediv, other)
def __div__(self, other): # pragma: py2
return self._scale_operation(operator.truediv, other)
def __neg__(self):
return self._scale_operation(operator.neg)
# Follow numpy convention and make an independent copy.
def __pos__(self):
return self.copy()
# Required to support addition and subtraction, and defined by the base
# representation and differential classes.
@abc.abstractmethod
def _combine_operation(self, op, other, reverse=False):
raise NotImplementedError()
def __add__(self, other):
return self._combine_operation(operator.add, other)
def __radd__(self, other):
return self._combine_operation(operator.add, other, reverse=True)
def __sub__(self, other):
return self._combine_operation(operator.sub, other)
def __rsub__(self, other):
return self._combine_operation(operator.sub, other, reverse=True)
# The following are used for repr and str
@property
def _values(self):
"""Turn the coordinates into a record array with the coordinate values.
The record array fields will have the component names.
"""
coo_items = [(c, getattr(self, c)) for c in self.components]
result = np.empty(self.shape, [(c, coo.dtype) for c, coo in coo_items])
for c, coo in coo_items:
result[c] = coo.value
return result
@property
def _units(self):
"""Return a dictionary with the units of the coordinate components."""
return dict([(component, getattr(self, component).unit)
for component in self.components])
@property
def _unitstr(self):
units_set = set(self._units.values())
if len(units_set) == 1:
unitstr = units_set.pop().to_string()
else:
unitstr = '({0})'.format(
', '.join([self._units[component].to_string()
for component in self.components]))
return unitstr
def __str__(self):
return '{0} {1:s}'.format(_array2string(self._values), self._unitstr)
def __repr__(self):
prefixstr = ' '
arrstr = _array2string(self._values, prefix=prefixstr)
diffstr = ''
if getattr(self, 'differentials', None):
diffstr = '\n (has differentials w.r.t.: {0})'.format(
', '.join([repr(key) for key in self.differentials.keys()]))
unitstr = ('in ' + self._unitstr) if self._unitstr else '[dimensionless]'
return '<{0} ({1}) {2:s}\n{3}{4}{5}>'.format(
self.__class__.__name__, ', '.join(self.components),
unitstr, prefixstr, arrstr, diffstr)
def _make_getter(component):
"""Make an attribute getter for use in a property.
Parameters
----------
component : str
The name of the component that should be accessed. This assumes the
actual value is stored in an attribute of that name prefixed by '_'.
"""
# This has to be done in a function to ensure the reference to component
# is not lost/redirected.
component = '_' + component
def get_component(self):
return getattr(self, component)
return get_component
# Need to also subclass ABCMeta rather than type, so that this meta class can
# be combined with a ShapedLikeNDArray subclass (which is an ABC). Without it:
# "TypeError: metaclass conflict: the metaclass of a derived class must be a
# (non-strict) subclass of the metaclasses of all its bases"
class MetaBaseRepresentation(InheritDocstrings, abc.ABCMeta):
def __init__(cls, name, bases, dct):
super().__init__(name, bases, dct)
# Register representation name (except for BaseRepresentation)
if cls.__name__ == 'BaseRepresentation':
return
if 'attr_classes' not in dct:
raise NotImplementedError('Representations must have an '
'"attr_classes" class attribute.')
if 'recommended_units' in dct:
warnings.warn(_recommended_units_deprecation,
AstropyDeprecationWarning)
# Ensure we don't override the property that warns about the
# deprecation, but that the value remains the same.
dct.setdefault('_recommended_units', dct.pop('recommended_units'))
repr_name = cls.get_name()
if repr_name in REPRESENTATION_CLASSES:
raise ValueError("Representation class {0} already defined"
.format(repr_name))
REPRESENTATION_CLASSES[repr_name] = cls
_invalidate_reprdiff_cls_hash()
# define getters for any component that does not yet have one.
for component in cls.attr_classes:
if not hasattr(cls, component):
setattr(cls, component,
property(_make_getter(component),
doc=("The '{0}' component of the points(s)."
.format(component))))
class BaseRepresentation(BaseRepresentationOrDifferential,
metaclass=MetaBaseRepresentation):
"""Base for representing a point in a 3D coordinate system.
Parameters
----------
comp1, comp2, comp3 : `~astropy.units.Quantity` or subclass
The components of the 3D points. The names are the keys and the
subclasses the values of the ``attr_classes`` attribute.
differentials : dict, `BaseDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single `BaseDifferential`
subclass instance, or a dictionary with keys set to a string
representation of the SI unit with which the differential (derivative)
is taken. For example, for a velocity differential on a positional
representation, the key would be ``'s'`` for seconds, indicating that
the derivative is a time derivative.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
Notes
-----
All representation classes should subclass this base representation class,
and define an ``attr_classes`` attribute, an `~collections.OrderedDict`
which maps component names to the class that creates them. They must also
define a ``to_cartesian`` method and a ``from_cartesian`` class method. By
default, transformations are done via the cartesian system, but classes
that want to define a smarter transformation path can overload the
``represent_as`` method. If one wants to use an associated differential
class, one should also define ``unit_vectors`` and ``scale_factors``
methods (see those methods for details).
"""
recommended_units = deprecated_attribute('recommended_units', since='3.0')
_recommended_units = {}
def __init__(self, *args, differentials=None, **kwargs):
# Handle any differentials passed in.
super().__init__(*args, **kwargs)
self._differentials = self._validate_differentials(differentials)
def _validate_differentials(self, differentials):
"""
Validate that the provided differentials are appropriate for this
representation and recast/reshape as necessary and then return.
Note that this does *not* set the differentials on
``self._differentials``, but rather leaves that for the caller.
"""
# Now handle the actual validation of any specified differential classes
if differentials is None:
differentials = dict()
elif isinstance(differentials, BaseDifferential):
# We can't handle auto-determining the key for this combo
if (isinstance(differentials, RadialDifferential) and
isinstance(self, UnitSphericalRepresentation)):
raise ValueError("To attach a RadialDifferential to a "
"UnitSphericalRepresentation, you must supply "
"a dictionary with an appropriate key.")
key = differentials._get_deriv_key(self)
differentials = {key: differentials}
for key in differentials:
try:
diff = differentials[key]
except TypeError:
raise TypeError("'differentials' argument must be a "
"dictionary-like object")
diff._check_base(self)
if (isinstance(diff, RadialDifferential) and
isinstance(self, UnitSphericalRepresentation)):
# We trust the passing of a key for a RadialDifferential
# attached to a UnitSphericalRepresentation because it will not
# have a paired component name (UnitSphericalRepresentation has
# no .distance) to automatically determine the expected key
pass
else:
expected_key = diff._get_deriv_key(self)
if key != expected_key:
raise ValueError("For differential object '{0}', expected "
"unit key = '{1}' but received key = '{2}'"
.format(repr(diff), expected_key, key))
# For now, we are very rigid: differentials must have the same shape
# as the representation. This makes it easier to handle __getitem__
# and any other shape-changing operations on representations that
# have associated differentials
if diff.shape != self.shape:
# TODO: message of IncompatibleShapeError is not customizable,
# so use a valueerror instead?
raise ValueError("Shape of differentials must be the same "
"as the shape of the representation ({0} vs "
"{1})".format(diff.shape, self.shape))
return differentials
def _raise_if_has_differentials(self, op_name):
"""
Used to raise a consistent exception for any operation that is not
supported when a representation has differentials attached.
"""
if self.differentials:
raise TypeError("Operation '{0}' is not supported when "
"differentials are attached to a {1}."
.format(op_name, self.__class__.__name__))
@property
def _compatible_differentials(self):
return [DIFFERENTIAL_CLASSES[self.get_name()]]
@property
def differentials(self):
"""A dictionary of differential class instances.
The keys of this dictionary must be a string representation of the SI
unit with which the differential (derivative) is taken. For example, for
a velocity differential on a positional representation, the key would be
``'s'`` for seconds, indicating that the derivative is a time
derivative.
"""
return self._differentials
# We do not make unit_vectors and scale_factors abstract methods, since
# they are only necessary if one also defines an associated Differential.
# Also, doing so would break pre-differential representation subclasses.
def unit_vectors(self):
r"""Cartesian unit vectors in the direction of each component.
Given unit vectors :math:`\hat{e}_c` and scale factors :math:`f_c`,
a change in one component of :math:`\delta c` corresponds to a change
in representation of :math:`\delta c \times f_c \times \hat{e}_c`.
Returns
-------
unit_vectors : dict of `CartesianRepresentation`
The keys are the component names.
"""
raise NotImplementedError("{} has not implemented unit vectors"
.format(type(self)))
def scale_factors(self):
r"""Scale factors for each component's direction.
Given unit vectors :math:`\hat{e}_c` and scale factors :math:`f_c`,
a change in one component of :math:`\delta c` corresponds to a change
in representation of :math:`\delta c \times f_c \times \hat{e}_c`.
Returns
-------
scale_factors : dict of `~astropy.units.Quantity`
The keys are the component names.
"""
raise NotImplementedError("{} has not implemented scale factors."
.format(type(self)))
def _re_represent_differentials(self, new_rep, differential_class):
"""Re-represent the differentials to the specified classes.
This returns a new dictionary with the same keys but with the
attached differentials converted to the new differential classes.
"""
if differential_class is None:
return dict()
if not self.differentials and differential_class:
raise ValueError("No differentials associated with this "
"representation!")
elif (len(self.differentials) == 1 and
inspect.isclass(differential_class) and
issubclass(differential_class, BaseDifferential)):
# TODO: is there a better way to do this?
differential_class = {
list(self.differentials.keys())[0]: differential_class
}
elif set(differential_class.keys()) != set(self.differentials.keys()):
ValueError("Desired differential classes must be passed in "
"as a dictionary with keys equal to a string "
"representation of the unit of the derivative "
"for each differential stored with this "
"representation object ({0})"
.format(self.differentials))
new_diffs = dict()
for k in self.differentials:
diff = self.differentials[k]
try:
new_diffs[k] = diff.represent_as(differential_class[k],
base=self)
except Exception:
if (differential_class[k] not in
new_rep._compatible_differentials):
raise TypeError("Desired differential class {0} is not "
"compatible with the desired "
"representation class {1}"
.format(differential_class[k],
new_rep.__class__))
else:
raise
return new_diffs
def represent_as(self, other_class, differential_class=None):
"""Convert coordinates to another representation.
If the instance is of the requested class, it is returned unmodified.
By default, conversion is done via cartesian coordinates.
Parameters
----------
other_class : `~astropy.coordinates.BaseRepresentation` subclass
The type of representation to turn the coordinates into.
differential_class : dict of `~astropy.coordinates.BaseDifferential`, optional
Classes in which the differentials should be represented.
Can be a single class if only a single differential is attached,
otherwise it should be a `dict` keyed by the same keys as the
differentials.
"""
if other_class is self.__class__ and not differential_class:
return self.without_differentials()
else:
if isinstance(other_class, str):
raise ValueError("Input to a representation's represent_as "
"must be a class, not a string. For "
"strings, use frame objects")
if other_class is not self.__class__:
# The default is to convert via cartesian coordinates
new_rep = other_class.from_cartesian(self.to_cartesian())
else:
new_rep = self
new_rep._differentials = self._re_represent_differentials(
new_rep, differential_class)
return new_rep
def with_differentials(self, differentials):
"""
Create a new representation with the same positions as this
representation, but with these new differentials.
Differential keys that already exist in this object's differential dict
are overwritten.
Parameters
----------
differentials : Sequence of `~astropy.coordinates.BaseDifferential`
The differentials for the new representation to have.
Returns
-------
newrepr
A copy of this representation, but with the ``differentials`` as
its differentials.
"""
if not differentials:
return self
args = [getattr(self, component) for component in self.components]
# We shallow copy the differentials dictionary so we don't update the
# current object's dictionary when adding new keys
new_rep = self.__class__(*args, differentials=self.differentials.copy(),
copy=False)
new_rep._differentials.update(
new_rep._validate_differentials(differentials))
return new_rep
def without_differentials(self):
"""Return a copy of the representation without attached differentials.
Returns
-------
newrepr
A shallow copy of this representation, without any differentials.
If no differentials were present, no copy is made.
"""
if not self._differentials:
return self
args = [getattr(self, component) for component in self.components]
return self.__class__(*args, copy=False)
@classmethod
def from_representation(cls, representation):
"""Create a new instance of this representation from another one.
Parameters
----------
representation : `~astropy.coordinates.BaseRepresentation` instance
The presentation that should be converted to this class.
"""
return representation.represent_as(cls)
def _apply(self, method, *args, **kwargs):
"""Create a new representation with ``method`` applied to the component
data.
This is not a simple inherit from ``BaseRepresentationOrDifferential``
because we need to call ``._apply()`` on any associated differential
classes.
See docstring for `BaseRepresentationOrDifferential._apply`.
Parameters
----------
method : str or callable
If str, it is the name of a method that is applied to the internal
``components``. If callable, the function is applied.
args : tuple
Any positional arguments for ``method``.
kwargs : dict
Any keyword arguments for ``method``.
"""
rep = super()._apply(method, *args, **kwargs)
rep._differentials = dict(
[(k, diff._apply(method, *args, **kwargs))
for k, diff in self._differentials.items()])
return rep
def _scale_operation(self, op, *args):
"""Scale all non-angular components, leaving angular ones unchanged.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.mul`, `~operator.neg`, etc.
*args
Any arguments required for the operator (typically, what is to
be multiplied with, divided by).
"""
self._raise_if_has_differentials(op.__name__)
results = []
for component, cls in self.attr_classes.items():
value = getattr(self, component)
if issubclass(cls, Angle):
results.append(value)
else:
results.append(op(value, *args))
# try/except catches anything that cannot initialize the class, such
# as operations that returned NotImplemented or a representation
# instead of a quantity (as would happen for, e.g., rep * rep).
try:
return self.__class__(*results)
except Exception:
return NotImplemented
def _combine_operation(self, op, other, reverse=False):
"""Combine two representation.
By default, operate on the cartesian representations of both.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc.
other : `~astropy.coordinates.BaseRepresentation` instance
The other representation.
reverse : bool
Whether the operands should be reversed (e.g., as we got here via
``self.__rsub__`` because ``self`` is a subclass of ``other``).
"""
self._raise_if_has_differentials(op.__name__)
result = self.to_cartesian()._combine_operation(op, other, reverse)
if result is NotImplemented:
return NotImplemented
else:
return self.from_cartesian(result)
# We need to override this setter to support differentials
@BaseRepresentationOrDifferential.shape.setter
def shape(self, shape):
orig_shape = self.shape
# See: https://stackoverflow.com/questions/3336767/ for an example
BaseRepresentationOrDifferential.shape.fset(self, shape)
# also try to perform shape-setting on any associated differentials
try:
for k in self.differentials:
self.differentials[k].shape = shape
except Exception:
BaseRepresentationOrDifferential.shape.fset(self, orig_shape)
for k in self.differentials:
self.differentials[k].shape = orig_shape
raise
def norm(self):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units.
Note that any associated differentials will be dropped during this
operation.
Returns
-------
norm : `astropy.units.Quantity`
Vector norm, with the same shape as the representation.
"""
return np.sqrt(functools.reduce(
operator.add, (getattr(self, component)**2
for component, cls in self.attr_classes.items()
if not issubclass(cls, Angle))))
def mean(self, *args, **kwargs):
"""Vector mean.
Averaging is done by converting the representation to cartesian, and
taking the mean of the x, y, and z components. The result is converted
back to the same representation as the input.
Refer to `~numpy.mean` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
Returns
-------
mean : representation
Vector mean, in the same representation as that of the input.
"""
self._raise_if_has_differentials('mean')
return self.from_cartesian(self.to_cartesian().mean(*args, **kwargs))
def sum(self, *args, **kwargs):
"""Vector sum.
Adding is done by converting the representation to cartesian, and
summing the x, y, and z components. The result is converted back to the
same representation as the input.
Refer to `~numpy.sum` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
Returns
-------
sum : representation
Vector sum, in the same representation as that of the input.
"""
self._raise_if_has_differentials('sum')
return self.from_cartesian(self.to_cartesian().sum(*args, **kwargs))
def dot(self, other):
"""Dot product of two representations.
The calculation is done by converting both ``self`` and ``other``
to `~astropy.coordinates.CartesianRepresentation`.
Note that any associated differentials will be dropped during this
operation.
Parameters
----------
other : `~astropy.coordinates.BaseRepresentation`
The representation to take the dot product with.
Returns
-------
dot_product : `~astropy.units.Quantity`
The sum of the product of the x, y, and z components of the
cartesian representations of ``self`` and ``other``.
"""
return self.to_cartesian().dot(other)
def cross(self, other):
"""Vector cross product of two representations.
The calculation is done by converting both ``self`` and ``other``
to `~astropy.coordinates.CartesianRepresentation`, and converting the
result back to the type of representation of ``self``.
Parameters
----------
other : representation
The representation to take the cross product with.
Returns
-------
cross_product : representation
With vectors perpendicular to both ``self`` and ``other``, in the
same type of representation as ``self``.
"""
self._raise_if_has_differentials('cross')
return self.from_cartesian(self.to_cartesian().cross(other))
class CartesianRepresentation(BaseRepresentation):
"""
Representation of points in 3D cartesian coordinates.
Parameters
----------
x, y, z : `~astropy.units.Quantity` or array
The x, y, and z coordinates of the point(s). If ``x``, ``y``, and ``z``
have different shapes, they should be broadcastable. If not quantity,
``unit`` should be set. If only ``x`` is given, it is assumed that it
contains an array with the 3 coordinates stored along ``xyz_axis``.
unit : `~astropy.units.Unit` or str
If given, the coordinates will be converted to this unit (or taken to
be in this unit if not given.
xyz_axis : int, optional
The axis along which the coordinates are stored when a single array is
provided rather than distinct ``x``, ``y``, and ``z`` (default: 0).
differentials : dict, `CartesianDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single
`CartesianDifferential` instance, or a dictionary of
`CartesianDifferential` s with keys set to a string representation of
the SI unit with which the differential (derivative) is taken. For
example, for a velocity differential on a positional representation, the
key would be ``'s'`` for seconds, indicating that the derivative is a
time derivative.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
"""
attr_classes = OrderedDict([('x', u.Quantity),
('y', u.Quantity),
('z', u.Quantity)])
_xyz = None
def __init__(self, x, y=None, z=None, unit=None, xyz_axis=None,
differentials=None, copy=True):
if y is None and z is None:
if isinstance(x, np.ndarray) and x.dtype.kind not in 'OV':
# Short-cut for 3-D array input.
x = u.Quantity(x, unit, copy=copy, subok=True)
# Keep a link to the array with all three coordinates
# so that we can return it quickly if needed in get_xyz.
self._xyz = x
if xyz_axis:
x = np.moveaxis(x, xyz_axis, 0)
self._xyz_axis = xyz_axis
else:
self._xyz_axis = 0
self._x, self._y, self._z = x
self._differentials = self._validate_differentials(differentials)
return
else:
x, y, z = x
if xyz_axis is not None:
raise ValueError("xyz_axis should only be set if x, y, and z are "
"in a single array passed in through x, "
"i.e., y and z should not be not given.")
if y is None or z is None:
raise ValueError("x, y, and z are required to instantiate {0}"
.format(self.__class__.__name__))
if unit is not None:
x = u.Quantity(x, unit, copy=copy, subok=True)
y = u.Quantity(y, unit, copy=copy, subok=True)
z = u.Quantity(z, unit, copy=copy, subok=True)
copy = False
super().__init__(x, y, z, copy=copy, differentials=differentials)
if not (self._x.unit.is_equivalent(self._y.unit) and
self._x.unit.is_equivalent(self._z.unit)):
raise u.UnitsError("x, y, and z should have matching physical types")
def unit_vectors(self):
l = np.broadcast_to(1.*u.one, self.shape, subok=True)
o = np.broadcast_to(0.*u.one, self.shape, subok=True)
return OrderedDict(
(('x', CartesianRepresentation(l, o, o, copy=False)),
('y', CartesianRepresentation(o, l, o, copy=False)),
('z', CartesianRepresentation(o, o, l, copy=False))))
def scale_factors(self):
l = np.broadcast_to(1.*u.one, self.shape, subok=True)
return OrderedDict((('x', l), ('y', l), ('z', l)))
def get_xyz(self, xyz_axis=0):
"""Return a vector array of the x, y, and z coordinates.
Parameters
----------
xyz_axis : int, optional
The axis in the final array along which the x, y, z components
should be stored (default: 0).
Returns
-------
xyz : `~astropy.units.Quantity`
With dimension 3 along ``xyz_axis``. Note that, if possible,
this will be a view.
"""
if self._xyz is not None:
if self._xyz_axis == xyz_axis:
return self._xyz
else:
return np.moveaxis(self._xyz, self._xyz_axis, xyz_axis)
# Create combined array. TO DO: keep it in _xyz for repeated use?
# But then in-place changes have to cancel it. Likely best to
# also update components.
return _combine_xyz(self._x, self._y, self._z, xyz_axis=xyz_axis)
xyz = property(get_xyz)
@classmethod
def from_cartesian(cls, other):
return other
def to_cartesian(self):
return self
def transform(self, matrix):
"""
Transform the cartesian coordinates using a 3x3 matrix.
This returns a new representation and does not modify the original one.
Any differentials attached to this representation will also be
transformed.
Parameters
----------
matrix : `~numpy.ndarray`
A 3x3 transformation matrix, such as a rotation matrix.
Examples
--------
We can start off by creating a cartesian representation object:
>>> from astropy import units as u
>>> from astropy.coordinates import CartesianRepresentation
>>> rep = CartesianRepresentation([1, 2] * u.pc,
... [2, 3] * u.pc,
... [3, 4] * u.pc)
We now create a rotation matrix around the z axis:
>>> from astropy.coordinates.matrix_utilities import rotation_matrix
>>> rotation = rotation_matrix(30 * u.deg, axis='z')
Finally, we can apply this transformation:
>>> rep_new = rep.transform(rotation)
>>> rep_new.xyz # doctest: +FLOAT_CMP
<Quantity [[ 1.8660254 , 3.23205081],
[ 1.23205081, 1.59807621],
[ 3. , 4. ]] pc>
"""
# erfa rxp: Multiply a p-vector by an r-matrix.
p = erfa_ufunc.rxp(matrix, self.get_xyz(xyz_axis=-1))
# Handle differentials attached to this representation
if self.differentials:
# TODO: speed this up going via d.d_xyz.
new_diffs = dict(
(k, d.from_cartesian(d.to_cartesian().transform(matrix)))
for k, d in self.differentials.items())
else:
new_diffs = None
return self.__class__(p, xyz_axis=-1, copy=False, differentials=new_diffs)
def _combine_operation(self, op, other, reverse=False):
self._raise_if_has_differentials(op.__name__)
try:
other_c = other.to_cartesian()
except Exception:
return NotImplemented
first, second = ((self, other_c) if not reverse else
(other_c, self))
return self.__class__(*(op(getattr(first, component),
getattr(second, component))
for component in first.components))
def norm(self):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units.
Note that any associated differentials will be dropped during this
operation.
Returns
-------
norm : `astropy.units.Quantity`
Vector norm, with the same shape as the representation.
"""
# erfa pm: Modulus of p-vector.
return erfa_ufunc.pm(self.get_xyz(xyz_axis=-1))
def mean(self, *args, **kwargs):
"""Vector mean.
Returns a new CartesianRepresentation instance with the means of the
x, y, and z components.
Refer to `~numpy.mean` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
"""
self._raise_if_has_differentials('mean')
return self._apply('mean', *args, **kwargs)
def sum(self, *args, **kwargs):
"""Vector sum.
Returns a new CartesianRepresentation instance with the sums of the
x, y, and z components.
Refer to `~numpy.sum` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
"""
self._raise_if_has_differentials('sum')
return self._apply('sum', *args, **kwargs)
def dot(self, other):
"""Dot product of two representations.
Note that any associated differentials will be dropped during this
operation.
Parameters
----------
other : representation
If not already cartesian, it is converted.
Returns
-------
dot_product : `~astropy.units.Quantity`
The sum of the product of the x, y, and z components of ``self``
and ``other``.
"""
try:
other_c = other.to_cartesian()
except Exception:
raise TypeError("cannot only take dot product with another "
"representation, not a {0} instance."
.format(type(other)))
# erfa pdp: p-vector inner (=scalar=dot) product.
return erfa_ufunc.pdp(self.get_xyz(xyz_axis=-1),
other_c.get_xyz(xyz_axis=-1))
def cross(self, other):
"""Cross product of two representations.
Parameters
----------
other : representation
If not already cartesian, it is converted.
Returns
-------
cross_product : `~astropy.coordinates.CartesianRepresentation`
With vectors perpendicular to both ``self`` and ``other``.
"""
self._raise_if_has_differentials('cross')
try:
other_c = other.to_cartesian()
except Exception:
raise TypeError("cannot only take cross product with another "
"representation, not a {0} instance."
.format(type(other)))
# erfa pxp: p-vector outer (=vector=cross) product.
sxo = erfa_ufunc.pxp(self.get_xyz(xyz_axis=-1),
other_c.get_xyz(xyz_axis=-1))
return self.__class__(sxo, xyz_axis=-1)
class UnitSphericalRepresentation(BaseRepresentation):
"""
Representation of points on a unit sphere.
Parameters
----------
lon, lat : `~astropy.units.Quantity` or str
The longitude and latitude of the point(s), in angular units. The
latitude should be between -90 and 90 degrees, and the longitude will
be wrapped to an angle between 0 and 360 degrees. These can also be
instances of `~astropy.coordinates.Angle`,
`~astropy.coordinates.Longitude`, or `~astropy.coordinates.Latitude`.
differentials : dict, `BaseDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single `BaseDifferential`
instance (see `._compatible_differentials` for valid types), or a
dictionary of of differential instances with keys set to a string
representation of the SI unit with which the differential (derivative)
is taken. For example, for a velocity differential on a positional
representation, the key would be ``'s'`` for seconds, indicating that
the derivative is a time derivative.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
"""
attr_classes = OrderedDict([('lon', Longitude),
('lat', Latitude)])
@classproperty
def _dimensional_representation(cls):
return SphericalRepresentation
def __init__(self, lon, lat, differentials=None, copy=True):
super().__init__(lon, lat, differentials=differentials, copy=copy)
@property
def _compatible_differentials(self):
return [UnitSphericalDifferential, UnitSphericalCosLatDifferential,
SphericalDifferential, SphericalCosLatDifferential,
RadialDifferential]
# Could let the metaclass define these automatically, but good to have
# a bit clearer docstrings.
@property
def lon(self):
"""
The longitude of the point(s).
"""
return self._lon
@property
def lat(self):
"""
The latitude of the point(s).
"""
return self._lat
def unit_vectors(self):
sinlon, coslon = np.sin(self.lon), np.cos(self.lon)
sinlat, coslat = np.sin(self.lat), np.cos(self.lat)
return OrderedDict(
(('lon', CartesianRepresentation(-sinlon, coslon, 0., copy=False)),
('lat', CartesianRepresentation(-sinlat*coslon, -sinlat*sinlon,
coslat, copy=False))))
def scale_factors(self, omit_coslat=False):
sf_lat = np.broadcast_to(1./u.radian, self.shape, subok=True)
sf_lon = sf_lat if omit_coslat else np.cos(self.lat) / u.radian
return OrderedDict((('lon', sf_lon),
('lat', sf_lat)))
def to_cartesian(self):
"""
Converts spherical polar coordinates to 3D rectangular cartesian
coordinates.
"""
# NUMPY_LT_1_16 cannot create a vector automatically
p = u.Quantity(np.empty(self.shape + (3,)), u.dimensionless_unscaled,
copy=False)
# erfa s2c: Convert [unit]spherical coordinates to Cartesian.
p = erfa_ufunc.s2c(self.lon, self.lat, p)
return CartesianRepresentation(p, xyz_axis=-1, copy=False)
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to spherical polar
coordinates.
"""
p = cart.get_xyz(xyz_axis=-1)
# erfa c2s: P-vector to [unit]spherical coordinates.
return cls(*erfa_ufunc.c2s(p), copy=False)
def represent_as(self, other_class, differential_class=None):
# Take a short cut if the other class is a spherical representation
# TODO: this could be optimized to shortcut even if a differential_class
# is passed in, using the ._re_represent_differentials() method
if inspect.isclass(other_class) and not differential_class:
if issubclass(other_class, PhysicsSphericalRepresentation):
return other_class(phi=self.lon, theta=90 * u.deg - self.lat, r=1.0,
copy=False)
elif issubclass(other_class, SphericalRepresentation):
return other_class(lon=self.lon, lat=self.lat, distance=1.0,
copy=False)
return super().represent_as(other_class, differential_class)
def __mul__(self, other):
self._raise_if_has_differentials('multiplication')
return self._dimensional_representation(lon=self.lon, lat=self.lat,
distance=1. * other)
def __truediv__(self, other):
self._raise_if_has_differentials('division')
return self._dimensional_representation(lon=self.lon, lat=self.lat,
distance=1. / other)
def __neg__(self):
self._raise_if_has_differentials('negation')
return self.__class__(self.lon + 180. * u.deg, -self.lat, copy=False)
def norm(self):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units, which is
always unity for vectors on the unit sphere.
Returns
-------
norm : `~astropy.units.Quantity`
Dimensionless ones, with the same shape as the representation.
"""
return u.Quantity(np.ones(self.shape), u.dimensionless_unscaled,
copy=False)
def _combine_operation(self, op, other, reverse=False):
self._raise_if_has_differentials(op.__name__)
result = self.to_cartesian()._combine_operation(op, other, reverse)
if result is NotImplemented:
return NotImplemented
else:
return self._dimensional_representation.from_cartesian(result)
def mean(self, *args, **kwargs):
"""Vector mean.
The representation is converted to cartesian, the means of the x, y,
and z components are calculated, and the result is converted to a
`~astropy.coordinates.SphericalRepresentation`.
Refer to `~numpy.mean` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
"""
self._raise_if_has_differentials('mean')
return self._dimensional_representation.from_cartesian(
self.to_cartesian().mean(*args, **kwargs))
def sum(self, *args, **kwargs):
"""Vector sum.
The representation is converted to cartesian, the sums of the x, y,
and z components are calculated, and the result is converted to a
`~astropy.coordinates.SphericalRepresentation`.
Refer to `~numpy.sum` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
"""
self._raise_if_has_differentials('sum')
return self._dimensional_representation.from_cartesian(
self.to_cartesian().sum(*args, **kwargs))
def cross(self, other):
"""Cross product of two representations.
The calculation is done by converting both ``self`` and ``other``
to `~astropy.coordinates.CartesianRepresentation`, and converting the
result back to `~astropy.coordinates.SphericalRepresentation`.
Parameters
----------
other : representation
The representation to take the cross product with.
Returns
-------
cross_product : `~astropy.coordinates.SphericalRepresentation`
With vectors perpendicular to both ``self`` and ``other``.
"""
self._raise_if_has_differentials('cross')
return self._dimensional_representation.from_cartesian(
self.to_cartesian().cross(other))
class RadialRepresentation(BaseRepresentation):
"""
Representation of the distance of points from the origin.
Note that this is mostly intended as an internal helper representation.
It can do little else but being used as a scale in multiplication.
Parameters
----------
distance : `~astropy.units.Quantity`
The distance of the point(s) from the origin.
differentials : dict, `BaseDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single `BaseDifferential`
instance (see `._compatible_differentials` for valid types), or a
dictionary of of differential instances with keys set to a string
representation of the SI unit with which the differential (derivative)
is taken. For example, for a velocity differential on a positional
representation, the key would be ``'s'`` for seconds, indicating that
the derivative is a time derivative.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
"""
attr_classes = OrderedDict([('distance', u.Quantity)])
def __init__(self, distance, differentials=None, copy=True):
super().__init__(distance, copy=copy, differentials=differentials)
@property
def distance(self):
"""
The distance from the origin to the point(s).
"""
return self._distance
def unit_vectors(self):
"""Cartesian unit vectors are undefined for radial representation."""
raise NotImplementedError('Cartesian unit vectors are undefined for '
'{0} instances'.format(self.__class__))
def scale_factors(self):
l = np.broadcast_to(1.*u.one, self.shape, subok=True)
return OrderedDict((('distance', l),))
def to_cartesian(self):
"""Cannot convert radial representation to cartesian."""
raise NotImplementedError('cannot convert {0} instance to cartesian.'
.format(self.__class__))
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to radial coordinate.
"""
return cls(distance=cart.norm(), copy=False)
def _scale_operation(self, op, *args):
self._raise_if_has_differentials(op.__name__)
return op(self.distance, *args)
def norm(self):
"""Vector norm.
Just the distance itself.
Returns
-------
norm : `~astropy.units.Quantity`
Dimensionless ones, with the same shape as the representation.
"""
return self.distance
def _combine_operation(self, op, other, reverse=False):
return NotImplemented
class SphericalRepresentation(BaseRepresentation):
"""
Representation of points in 3D spherical coordinates.
Parameters
----------
lon, lat : `~astropy.units.Quantity`
The longitude and latitude of the point(s), in angular units. The
latitude should be between -90 and 90 degrees, and the longitude will
be wrapped to an angle between 0 and 360 degrees. These can also be
instances of `~astropy.coordinates.Angle`,
`~astropy.coordinates.Longitude`, or `~astropy.coordinates.Latitude`.
distance : `~astropy.units.Quantity`
The distance to the point(s). If the distance is a length, it is
passed to the :class:`~astropy.coordinates.Distance` class, otherwise
it is passed to the :class:`~astropy.units.Quantity` class.
differentials : dict, `BaseDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single `BaseDifferential`
instance (see `._compatible_differentials` for valid types), or a
dictionary of of differential instances with keys set to a string
representation of the SI unit with which the differential (derivative)
is taken. For example, for a velocity differential on a positional
representation, the key would be ``'s'`` for seconds, indicating that
the derivative is a time derivative.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
"""
attr_classes = OrderedDict([('lon', Longitude),
('lat', Latitude),
('distance', u.Quantity)])
_unit_representation = UnitSphericalRepresentation
def __init__(self, lon, lat, distance, differentials=None, copy=True):
super().__init__(lon, lat, distance, copy=copy,
differentials=differentials)
if self._distance.unit.physical_type == 'length':
try:
self._distance = Distance(self._distance, copy=False)
except ValueError as e:
if e.args[0].startswith('Distance must be >= 0'):
raise ValueError("Distance must be >= 0. To allow negative "
"distance values, you must explicitly pass"
" in a `Distance` object with the the "
"argument 'allow_negative=True'.")
else:
raise
@property
def _compatible_differentials(self):
return [UnitSphericalDifferential, UnitSphericalCosLatDifferential,
SphericalDifferential, SphericalCosLatDifferential,
RadialDifferential]
@property
def lon(self):
"""
The longitude of the point(s).
"""
return self._lon
@property
def lat(self):
"""
The latitude of the point(s).
"""
return self._lat
@property
def distance(self):
"""
The distance from the origin to the point(s).
"""
return self._distance
def unit_vectors(self):
sinlon, coslon = np.sin(self.lon), np.cos(self.lon)
sinlat, coslat = np.sin(self.lat), np.cos(self.lat)
return OrderedDict(
(('lon', CartesianRepresentation(-sinlon, coslon, 0., copy=False)),
('lat', CartesianRepresentation(-sinlat*coslon, -sinlat*sinlon,
coslat, copy=False)),
('distance', CartesianRepresentation(coslat*coslon, coslat*sinlon,
sinlat, copy=False))))
def scale_factors(self, omit_coslat=False):
sf_lat = self.distance / u.radian
sf_lon = sf_lat if omit_coslat else sf_lat * np.cos(self.lat)
sf_distance = np.broadcast_to(1.*u.one, self.shape, subok=True)
return OrderedDict((('lon', sf_lon),
('lat', sf_lat),
('distance', sf_distance)))
def represent_as(self, other_class, differential_class=None):
# Take a short cut if the other class is a spherical representation
# TODO: this could be optimized to shortcut even if a differential_class
# is passed in, using the ._re_represent_differentials() method
if inspect.isclass(other_class) and not differential_class:
if issubclass(other_class, PhysicsSphericalRepresentation):
return other_class(phi=self.lon, theta=90 * u.deg - self.lat,
r=self.distance, copy=False)
elif issubclass(other_class, UnitSphericalRepresentation):
return other_class(lon=self.lon, lat=self.lat, copy=False)
return super().represent_as(other_class, differential_class)
def to_cartesian(self):
"""
Converts spherical polar coordinates to 3D rectangular cartesian
coordinates.
"""
# We need to convert Distance to Quantity to allow negative values.
if isinstance(self.distance, Distance):
d = self.distance.view(u.Quantity)
else:
d = self.distance
# NUMPY_LT_1_16 cannot create a vector automatically
p = u.Quantity(np.empty(self.shape + (3,)), d.unit, copy=False)
# erfa s2p: Convert spherical polar coordinates to p-vector.
p = erfa_ufunc.s2p(self.lon, self.lat, d, p)
return CartesianRepresentation(p, xyz_axis=-1, copy=False)
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to spherical polar
coordinates.
"""
p = cart.get_xyz(xyz_axis=-1)
# erfa p2s: P-vector to spherical polar coordinates.
return cls(*erfa_ufunc.p2s(p), copy=False)
def norm(self):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units. For
spherical coordinates, this is just the absolute value of the distance.
Returns
-------
norm : `astropy.units.Quantity`
Vector norm, with the same shape as the representation.
"""
return np.abs(self.distance)
def __neg__(self):
self._raise_if_has_differentials('negation')
return self.__class__(self.lon + 180. * u.deg, -self.lat, self.distance,
copy=False)
class PhysicsSphericalRepresentation(BaseRepresentation):
"""
Representation of points in 3D spherical coordinates (using the physics
convention of using ``phi`` and ``theta`` for azimuth and inclination
from the pole).
Parameters
----------
phi, theta : `~astropy.units.Quantity` or str
The azimuth and inclination of the point(s), in angular units. The
inclination should be between 0 and 180 degrees, and the azimuth will
be wrapped to an angle between 0 and 360 degrees. These can also be
instances of `~astropy.coordinates.Angle`. If ``copy`` is False, `phi`
will be changed inplace if it is not between 0 and 360 degrees.
r : `~astropy.units.Quantity`
The distance to the point(s). If the distance is a length, it is
passed to the :class:`~astropy.coordinates.Distance` class, otherwise
it is passed to the :class:`~astropy.units.Quantity` class.
differentials : dict, `PhysicsSphericalDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single
`PhysicsSphericalDifferential` instance, or a dictionary of of
differential instances with keys set to a string representation of the
SI unit with which the differential (derivative) is taken. For example,
for a velocity differential on a positional representation, the key
would be ``'s'`` for seconds, indicating that the derivative is a time
derivative.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
"""
attr_classes = OrderedDict([('phi', Angle),
('theta', Angle),
('r', u.Quantity)])
def __init__(self, phi, theta, r, differentials=None, copy=True):
super().__init__(phi, theta, r, copy=copy, differentials=differentials)
# Wrap/validate phi/theta
if copy:
self._phi = self._phi.wrap_at(360 * u.deg)
else:
# necessary because the above version of `wrap_at` has to be a copy
self._phi.wrap_at(360 * u.deg, inplace=True)
if np.any(self._theta < 0.*u.deg) or np.any(self._theta > 180.*u.deg):
raise ValueError('Inclination angle(s) must be within '
'0 deg <= angle <= 180 deg, '
'got {0}'.format(theta.to(u.degree)))
if self._r.unit.physical_type == 'length':
self._r = self._r.view(Distance)
@property
def phi(self):
"""
The azimuth of the point(s).
"""
return self._phi
@property
def theta(self):
"""
The elevation of the point(s).
"""
return self._theta
@property
def r(self):
"""
The distance from the origin to the point(s).
"""
return self._r
def unit_vectors(self):
sinphi, cosphi = np.sin(self.phi), np.cos(self.phi)
sintheta, costheta = np.sin(self.theta), np.cos(self.theta)
return OrderedDict(
(('phi', CartesianRepresentation(-sinphi, cosphi, 0., copy=False)),
('theta', CartesianRepresentation(costheta*cosphi,
costheta*sinphi,
-sintheta, copy=False)),
('r', CartesianRepresentation(sintheta*cosphi, sintheta*sinphi,
costheta, copy=False))))
def scale_factors(self):
r = self.r / u.radian
sintheta = np.sin(self.theta)
l = np.broadcast_to(1.*u.one, self.shape, subok=True)
return OrderedDict((('phi', r * sintheta),
('theta', r),
('r', l)))
def represent_as(self, other_class, differential_class=None):
# Take a short cut if the other class is a spherical representation
# TODO: this could be optimized to shortcut even if a differential_class
# is passed in, using the ._re_represent_differentials() method
if inspect.isclass(other_class) and not differential_class:
if issubclass(other_class, SphericalRepresentation):
return other_class(lon=self.phi, lat=90 * u.deg - self.theta,
distance=self.r)
elif issubclass(other_class, UnitSphericalRepresentation):
return other_class(lon=self.phi, lat=90 * u.deg - self.theta)
return super().represent_as(other_class, differential_class)
def to_cartesian(self):
"""
Converts spherical polar coordinates to 3D rectangular cartesian
coordinates.
"""
# We need to convert Distance to Quantity to allow negative values.
if isinstance(self.r, Distance):
d = self.r.view(u.Quantity)
else:
d = self.r
x = d * np.sin(self.theta) * np.cos(self.phi)
y = d * np.sin(self.theta) * np.sin(self.phi)
z = d * np.cos(self.theta)
return CartesianRepresentation(x=x, y=y, z=z, copy=False)
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to spherical polar
coordinates.
"""
s = np.hypot(cart.x, cart.y)
r = np.hypot(s, cart.z)
phi = np.arctan2(cart.y, cart.x)
theta = np.arctan2(s, cart.z)
return cls(phi=phi, theta=theta, r=r, copy=False)
def norm(self):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units. For
spherical coordinates, this is just the absolute value of the radius.
Returns
-------
norm : `astropy.units.Quantity`
Vector norm, with the same shape as the representation.
"""
return np.abs(self.r)
class CylindricalRepresentation(BaseRepresentation):
"""
Representation of points in 3D cylindrical coordinates.
Parameters
----------
rho : `~astropy.units.Quantity`
The distance from the z axis to the point(s).
phi : `~astropy.units.Quantity` or str
The azimuth of the point(s), in angular units, which will be wrapped
to an angle between 0 and 360 degrees. This can also be instances of
`~astropy.coordinates.Angle`,
z : `~astropy.units.Quantity`
The z coordinate(s) of the point(s)
differentials : dict, `CylindricalDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single
`CylindricalDifferential` instance, or a dictionary of of differential
instances with keys set to a string representation of the SI unit with
which the differential (derivative) is taken. For example, for a
velocity differential on a positional representation, the key would be
``'s'`` for seconds, indicating that the derivative is a time
derivative.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
"""
attr_classes = OrderedDict([('rho', u.Quantity),
('phi', Angle),
('z', u.Quantity)])
def __init__(self, rho, phi, z, differentials=None, copy=True):
super().__init__(rho, phi, z, copy=copy, differentials=differentials)
if not self._rho.unit.is_equivalent(self._z.unit):
raise u.UnitsError("rho and z should have matching physical types")
@property
def rho(self):
"""
The distance of the point(s) from the z-axis.
"""
return self._rho
@property
def phi(self):
"""
The azimuth of the point(s).
"""
return self._phi
@property
def z(self):
"""
The height of the point(s).
"""
return self._z
def unit_vectors(self):
sinphi, cosphi = np.sin(self.phi), np.cos(self.phi)
l = np.broadcast_to(1., self.shape)
return OrderedDict(
(('rho', CartesianRepresentation(cosphi, sinphi, 0, copy=False)),
('phi', CartesianRepresentation(-sinphi, cosphi, 0, copy=False)),
('z', CartesianRepresentation(0, 0, l, unit=u.one, copy=False))))
def scale_factors(self):
rho = self.rho / u.radian
l = np.broadcast_to(1.*u.one, self.shape, subok=True)
return OrderedDict((('rho', l),
('phi', rho),
('z', l)))
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to cylindrical polar
coordinates.
"""
rho = np.hypot(cart.x, cart.y)
phi = np.arctan2(cart.y, cart.x)
z = cart.z
return cls(rho=rho, phi=phi, z=z, copy=False)
def to_cartesian(self):
"""
Converts cylindrical polar coordinates to 3D rectangular cartesian
coordinates.
"""
x = self.rho * np.cos(self.phi)
y = self.rho * np.sin(self.phi)
z = self.z
return CartesianRepresentation(x=x, y=y, z=z, copy=False)
class MetaBaseDifferential(InheritDocstrings, abc.ABCMeta):
"""Set default ``attr_classes`` and component getters on a Differential.
For these, the components are those of the base representation prefixed
by 'd_', and the class is `~astropy.units.Quantity`.
"""
def __init__(cls, name, bases, dct):
super().__init__(name, bases, dct)
# Don't do anything for base helper classes.
if cls.__name__ in ('BaseDifferential', 'BaseSphericalDifferential',
'BaseSphericalCosLatDifferential'):
return
if 'base_representation' not in dct:
raise NotImplementedError('Differential representations must have a'
'"base_representation" class attribute.')
# If not defined explicitly, create attr_classes.
if not hasattr(cls, 'attr_classes'):
base_attr_classes = cls.base_representation.attr_classes
cls.attr_classes = OrderedDict([('d_' + c, u.Quantity)
for c in base_attr_classes])
if 'recommended_units' in dct:
warnings.warn(_recommended_units_deprecation,
AstropyDeprecationWarning)
# Ensure we don't override the property that warns about the
# deprecation, but that the value remains the same.
dct.setdefault('_recommended_units', dct.pop('recommended_units'))
repr_name = cls.get_name()
if repr_name in DIFFERENTIAL_CLASSES:
raise ValueError("Differential class {0} already defined"
.format(repr_name))
DIFFERENTIAL_CLASSES[repr_name] = cls
_invalidate_reprdiff_cls_hash()
# If not defined explicitly, create properties for the components.
for component in cls.attr_classes:
if not hasattr(cls, component):
setattr(cls, component,
property(_make_getter(component),
doc=("Component '{0}' of the Differential."
.format(component))))
class BaseDifferential(BaseRepresentationOrDifferential,
metaclass=MetaBaseDifferential):
r"""A base class representing differentials of representations.
These represent differences or derivatives along each component.
E.g., for physics spherical coordinates, these would be
:math:`\delta r, \delta \theta, \delta \phi`.
Parameters
----------
d_comp1, d_comp2, d_comp3 : `~astropy.units.Quantity` or subclass
The components of the 3D differentials. The names are the keys and the
subclasses the values of the ``attr_classes`` attribute.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
Notes
-----
All differential representation classes should subclass this base class,
and define an ``base_representation`` attribute with the class of the
regular `~astropy.coordinates.BaseRepresentation` for which differential
coordinates are provided. This will set up a default ``attr_classes``
instance with names equal to the base component names prefixed by ``d_``,
and all classes set to `~astropy.units.Quantity`, plus properties to access
those, and a default ``__init__`` for initialization.
"""
recommended_units = deprecated_attribute('recommended_units', since='3.0')
_recommended_units = {}
@classmethod
def _check_base(cls, base):
if cls not in base._compatible_differentials:
raise TypeError("Differential class {0} is not compatible with the "
"base (representation) class {1}"
.format(cls, base.__class__))
def _get_deriv_key(self, base):
"""Given a base (representation instance), determine the unit of the
derivative by removing the representation unit from the component units
of this differential.
"""
# This check is just a last resort so we don't return a strange unit key
# from accidentally passing in the wrong base.
self._check_base(base)
for name in base.components:
comp = getattr(base, name)
d_comp = getattr(self, 'd_{0}'.format(name), None)
if d_comp is not None:
d_unit = comp.unit / d_comp.unit
# This is quite a bit faster than using to_system() or going
# through Quantity()
d_unit_si = d_unit.decompose(u.si.bases)
d_unit_si._scale = 1 # remove the scale from the unit
return str(d_unit_si)
else:
raise RuntimeError("Invalid representation-differential units! This"
" likely happened because either the "
"representation or the associated differential "
"have non-standard units. Check that the input "
"positional data have positional units, and the "
"input velocity data have velocity units, or "
"are both dimensionless.")
@classmethod
def _get_base_vectors(cls, base):
"""Get unit vectors and scale factors from base.
Parameters
----------
base : instance of ``self.base_representation``
The points for which the unit vectors and scale factors should be
retrieved.
Returns
-------
unit_vectors : dict of `CartesianRepresentation`
In the directions of the coordinates of base.
scale_factors : dict of `~astropy.units.Quantity`
Scale factors for each of the coordinates
Raises
------
TypeError : if the base is not of the correct type
"""
cls._check_base(base)
return base.unit_vectors(), base.scale_factors()
def to_cartesian(self, base):
"""Convert the differential to 3D rectangular cartesian coordinates.
Parameters
----------
base : instance of ``self.base_representation``
The points for which the differentials are to be converted: each of
the components is multiplied by its unit vectors and scale factors.
Returns
-------
This object as a `CartesianDifferential`
"""
base_e, base_sf = self._get_base_vectors(base)
return functools.reduce(
operator.add, (getattr(self, d_c) * base_sf[c] * base_e[c]
for d_c, c in zip(self.components, base.components)))
@classmethod
def from_cartesian(cls, other, base):
"""Convert the differential from 3D rectangular cartesian coordinates to
the desired class.
Parameters
----------
other :
The object to convert into this differential.
base : instance of ``self.base_representation``
The points for which the differentials are to be converted: each of
the components is multiplied by its unit vectors and scale factors.
Returns
-------
A new differential object that is this class' type.
"""
base_e, base_sf = cls._get_base_vectors(base)
return cls(*(other.dot(e / base_sf[component])
for component, e in base_e.items()), copy=False)
def represent_as(self, other_class, base):
"""Convert coordinates to another representation.
If the instance is of the requested class, it is returned unmodified.
By default, conversion is done via cartesian coordinates.
Parameters
----------
other_class : `~astropy.coordinates.BaseRepresentation` subclass
The type of representation to turn the coordinates into.
base : instance of ``self.base_representation``, optional
Base relative to which the differentials are defined. If the other
class is a differential representation, the base will be converted
to its ``base_representation``.
"""
if other_class is self.__class__:
return self
# The default is to convert via cartesian coordinates.
self_cartesian = self.to_cartesian(base)
if issubclass(other_class, BaseDifferential):
base = base.represent_as(other_class.base_representation)
return other_class.from_cartesian(self_cartesian, base)
else:
return other_class.from_cartesian(self_cartesian)
@classmethod
def from_representation(cls, representation, base):
"""Create a new instance of this representation from another one.
Parameters
----------
representation : `~astropy.coordinates.BaseRepresentation` instance
The presentation that should be converted to this class.
base : instance of ``cls.base_representation``
The base relative to which the differentials will be defined. If
the representation is a differential itself, the base will be
converted to its ``base_representation`` to help convert it.
"""
if isinstance(representation, BaseDifferential):
cartesian = representation.to_cartesian(
base.represent_as(representation.base_representation))
else:
cartesian = representation.to_cartesian()
return cls.from_cartesian(cartesian, base)
def _scale_operation(self, op, *args):
"""Scale all components.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.mul`, `~operator.neg`, etc.
*args
Any arguments required for the operator (typically, what is to
be multiplied with, divided by).
"""
scaled_attrs = [op(getattr(self, c), *args) for c in self.components]
return self.__class__(*scaled_attrs, copy=False)
def _combine_operation(self, op, other, reverse=False):
"""Combine two differentials, or a differential with a representation.
If ``other`` is of the same differential type as ``self``, the
components will simply be combined. If ``other`` is a representation,
it will be used as a base for which to evaluate the differential,
and the result is a new representation.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc.
other : `~astropy.coordinates.BaseRepresentation` instance
The other differential or representation.
reverse : bool
Whether the operands should be reversed (e.g., as we got here via
``self.__rsub__`` because ``self`` is a subclass of ``other``).
"""
if isinstance(self, type(other)):
first, second = (self, other) if not reverse else (other, self)
return self.__class__(*[op(getattr(first, c), getattr(second, c))
for c in self.components])
else:
try:
self_cartesian = self.to_cartesian(other)
except TypeError:
return NotImplemented
return other._combine_operation(op, self_cartesian, not reverse)
def __sub__(self, other):
# avoid "differential - representation".
if isinstance(other, BaseRepresentation):
return NotImplemented
return super().__sub__(other)
def norm(self, base=None):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units.
Parameters
----------
base : instance of ``self.base_representation``
Base relative to which the differentials are defined. This is
required to calculate the physical size of the differential for
all but cartesian differentials.
Returns
-------
norm : `astropy.units.Quantity`
Vector norm, with the same shape as the representation.
"""
return self.to_cartesian(base).norm()
class CartesianDifferential(BaseDifferential):
"""Differentials in of points in 3D cartesian coordinates.
Parameters
----------
d_x, d_y, d_z : `~astropy.units.Quantity` or array
The x, y, and z coordinates of the differentials. If ``d_x``, ``d_y``,
and ``d_z`` have different shapes, they should be broadcastable. If not
quantities, ``unit`` should be set. If only ``d_x`` is given, it is
assumed that it contains an array with the 3 coordinates stored along
``xyz_axis``.
unit : `~astropy.units.Unit` or str
If given, the differentials will be converted to this unit (or taken to
be in this unit if not given.
xyz_axis : int, optional
The axis along which the coordinates are stored when a single array is
provided instead of distinct ``d_x``, ``d_y``, and ``d_z`` (default: 0).
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
"""
base_representation = CartesianRepresentation
_d_xyz = None
def __init__(self, d_x, d_y=None, d_z=None, unit=None, xyz_axis=None,
copy=True):
if d_y is None and d_z is None:
if isinstance(d_x, np.ndarray) and d_x.dtype.kind not in 'OV':
# Short-cut for 3-D array input.
d_x = u.Quantity(d_x, unit, copy=copy, subok=True)
# Keep a link to the array with all three coordinates
# so that we can return it quickly if needed in get_xyz.
self._d_xyz = d_x
if xyz_axis:
d_x = np.moveaxis(d_x, xyz_axis, 0)
self._xyz_axis = xyz_axis
else:
self._xyz_axis = 0
self._d_x, self._d_y, self._d_z = d_x
return
else:
d_x, d_y, d_z = d_x
if xyz_axis is not None:
raise ValueError("xyz_axis should only be set if d_x, d_y, and d_z "
"are in a single array passed in through d_x, "
"i.e., d_y and d_z should not be not given.")
if d_y is None or d_z is None:
raise ValueError("d_x, d_y, and d_z are required to instantiate {0}"
.format(self.__class__.__name__))
if unit is not None:
d_x = u.Quantity(d_x, unit, copy=copy, subok=True)
d_y = u.Quantity(d_y, unit, copy=copy, subok=True)
d_z = u.Quantity(d_z, unit, copy=copy, subok=True)
copy = False
super().__init__(d_x, d_y, d_z, copy=copy)
if not (self._d_x.unit.is_equivalent(self._d_y.unit) and
self._d_x.unit.is_equivalent(self._d_z.unit)):
raise u.UnitsError('d_x, d_y and d_z should have equivalent units.')
def to_cartesian(self, base=None):
return CartesianRepresentation(*[getattr(self, c) for c
in self.components])
@classmethod
def from_cartesian(cls, other, base=None):
return cls(*[getattr(other, c) for c in other.components])
def get_d_xyz(self, xyz_axis=0):
"""Return a vector array of the x, y, and z coordinates.
Parameters
----------
xyz_axis : int, optional
The axis in the final array along which the x, y, z components
should be stored (default: 0).
Returns
-------
d_xyz : `~astropy.units.Quantity`
With dimension 3 along ``xyz_axis``. Note that, if possible,
this will be a view.
"""
if self._d_xyz is not None:
if self._xyz_axis == xyz_axis:
return self._d_xyz
else:
return np.moveaxis(self._d_xyz, self._xyz_axis, xyz_axis)
# Create combined array. TO DO: keep it in _d_xyz for repeated use?
# But then in-place changes have to cancel it. Likely best to
# also update components.
return _combine_xyz(self._d_x, self._d_y, self._d_z, xyz_axis=xyz_axis)
d_xyz = property(get_d_xyz)
class BaseSphericalDifferential(BaseDifferential):
def _d_lon_coslat(self, base):
"""Convert longitude differential d_lon to d_lon_coslat.
Parameters
----------
base : instance of ``cls.base_representation``
The base from which the latitude will be taken.
"""
self._check_base(base)
return self.d_lon * np.cos(base.lat)
@classmethod
def _get_d_lon(cls, d_lon_coslat, base):
"""Convert longitude differential d_lon_coslat to d_lon.
Parameters
----------
d_lon_coslat : `~astropy.units.Quantity`
Longitude differential that includes ``cos(lat)``.
base : instance of ``cls.base_representation``
The base from which the latitude will be taken.
"""
cls._check_base(base)
return d_lon_coslat / np.cos(base.lat)
def _combine_operation(self, op, other, reverse=False):
"""Combine two differentials, or a differential with a representation.
If ``other`` is of the same differential type as ``self``, the
components will simply be combined. If both are different parts of
a `~astropy.coordinates.SphericalDifferential` (e.g., a
`~astropy.coordinates.UnitSphericalDifferential` and a
`~astropy.coordinates.RadialDifferential`), they will combined
appropriately.
If ``other`` is a representation, it will be used as a base for which
to evaluate the differential, and the result is a new representation.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc.
other : `~astropy.coordinates.BaseRepresentation` instance
The other differential or representation.
reverse : bool
Whether the operands should be reversed (e.g., as we got here via
``self.__rsub__`` because ``self`` is a subclass of ``other``).
"""
if (isinstance(other, BaseSphericalDifferential) and
not isinstance(self, type(other)) or
isinstance(other, RadialDifferential)):
all_components = set(self.components) | set(other.components)
first, second = (self, other) if not reverse else (other, self)
result_args = {c: op(getattr(first, c, 0.), getattr(second, c, 0.))
for c in all_components}
return SphericalDifferential(**result_args)
return super()._combine_operation(op, other, reverse)
class UnitSphericalDifferential(BaseSphericalDifferential):
"""Differential(s) of points on a unit sphere.
Parameters
----------
d_lon, d_lat : `~astropy.units.Quantity`
The longitude and latitude of the differentials.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
"""
base_representation = UnitSphericalRepresentation
@classproperty
def _dimensional_differential(cls):
return SphericalDifferential
def __init__(self, d_lon, d_lat, copy=True):
super().__init__(d_lon, d_lat, copy=copy)
if not self._d_lon.unit.is_equivalent(self._d_lat.unit):
raise u.UnitsError('d_lon and d_lat should have equivalent units.')
def to_cartesian(self, base):
if isinstance(base, SphericalRepresentation):
scale = base.distance
elif isinstance(base, PhysicsSphericalRepresentation):
scale = base.r
else:
return super().to_cartesian(base)
base = base.represent_as(UnitSphericalRepresentation)
return scale * super().to_cartesian(base)
def represent_as(self, other_class, base=None):
# Only have enough information to represent other unit-spherical.
if issubclass(other_class, UnitSphericalCosLatDifferential):
return other_class(self._d_lon_coslat(base), self.d_lat)
return super().represent_as(other_class, base)
@classmethod
def from_representation(cls, representation, base=None):
# All spherical differentials can be done without going to Cartesian,
# though CosLat needs base for the latitude.
if isinstance(representation, SphericalDifferential):
return cls(representation.d_lon, representation.d_lat)
elif isinstance(representation, (SphericalCosLatDifferential,
UnitSphericalCosLatDifferential)):
d_lon = cls._get_d_lon(representation.d_lon_coslat, base)
return cls(d_lon, representation.d_lat)
elif isinstance(representation, PhysicsSphericalDifferential):
return cls(representation.d_phi, -representation.d_theta)
return super().from_representation(representation, base)
class SphericalDifferential(BaseSphericalDifferential):
"""Differential(s) of points in 3D spherical coordinates.
Parameters
----------
d_lon, d_lat : `~astropy.units.Quantity`
The differential longitude and latitude.
d_distance : `~astropy.units.Quantity`
The differential distance.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
"""
base_representation = SphericalRepresentation
_unit_differential = UnitSphericalDifferential
def __init__(self, d_lon, d_lat, d_distance, copy=True):
super().__init__(d_lon, d_lat, d_distance, copy=copy)
if not self._d_lon.unit.is_equivalent(self._d_lat.unit):
raise u.UnitsError('d_lon and d_lat should have equivalent units.')
def represent_as(self, other_class, base=None):
# All spherical differentials can be done without going to Cartesian,
# though CosLat needs base for the latitude.
if issubclass(other_class, UnitSphericalDifferential):
return other_class(self.d_lon, self.d_lat)
elif issubclass(other_class, RadialDifferential):
return other_class(self.d_distance)
elif issubclass(other_class, SphericalCosLatDifferential):
return other_class(self._d_lon_coslat(base), self.d_lat,
self.d_distance)
elif issubclass(other_class, UnitSphericalCosLatDifferential):
return other_class(self._d_lon_coslat(base), self.d_lat)
elif issubclass(other_class, PhysicsSphericalDifferential):
return other_class(self.d_lon, -self.d_lat, self.d_distance)
else:
return super().represent_as(other_class, base)
@classmethod
def from_representation(cls, representation, base=None):
# Other spherical differentials can be done without going to Cartesian,
# though CosLat needs base for the latitude.
if isinstance(representation, SphericalCosLatDifferential):
d_lon = cls._get_d_lon(representation.d_lon_coslat, base)
return cls(d_lon, representation.d_lat, representation.d_distance)
elif isinstance(representation, PhysicsSphericalDifferential):
return cls(representation.d_phi, -representation.d_theta,
representation.d_r)
return super().from_representation(representation, base)
class BaseSphericalCosLatDifferential(BaseDifferential):
"""Differentials from points on a spherical base representation.
With cos(lat) assumed to be included in the longitude differential.
"""
@classmethod
def _get_base_vectors(cls, base):
"""Get unit vectors and scale factors from (unit)spherical base.
Parameters
----------
base : instance of ``self.base_representation``
The points for which the unit vectors and scale factors should be
retrieved.
Returns
-------
unit_vectors : dict of `CartesianRepresentation`
In the directions of the coordinates of base.
scale_factors : dict of `~astropy.units.Quantity`
Scale factors for each of the coordinates. The scale factor for
longitude does not include the cos(lat) factor.
Raises
------
TypeError : if the base is not of the correct type
"""
cls._check_base(base)
return base.unit_vectors(), base.scale_factors(omit_coslat=True)
def _d_lon(self, base):
"""Convert longitude differential with cos(lat) to one without.
Parameters
----------
base : instance of ``cls.base_representation``
The base from which the latitude will be taken.
"""
self._check_base(base)
return self.d_lon_coslat / np.cos(base.lat)
@classmethod
def _get_d_lon_coslat(cls, d_lon, base):
"""Convert longitude differential d_lon to d_lon_coslat.
Parameters
----------
d_lon : `~astropy.units.Quantity`
Value of the longitude differential without ``cos(lat)``.
base : instance of ``cls.base_representation``
The base from which the latitude will be taken.
"""
cls._check_base(base)
return d_lon * np.cos(base.lat)
def _combine_operation(self, op, other, reverse=False):
"""Combine two differentials, or a differential with a representation.
If ``other`` is of the same differential type as ``self``, the
components will simply be combined. If both are different parts of
a `~astropy.coordinates.SphericalDifferential` (e.g., a
`~astropy.coordinates.UnitSphericalDifferential` and a
`~astropy.coordinates.RadialDifferential`), they will combined
appropriately.
If ``other`` is a representation, it will be used as a base for which
to evaluate the differential, and the result is a new representation.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc.
other : `~astropy.coordinates.BaseRepresentation` instance
The other differential or representation.
reverse : bool
Whether the operands should be reversed (e.g., as we got here via
``self.__rsub__`` because ``self`` is a subclass of ``other``).
"""
if (isinstance(other, BaseSphericalCosLatDifferential) and
not isinstance(self, type(other)) or
isinstance(other, RadialDifferential)):
all_components = set(self.components) | set(other.components)
first, second = (self, other) if not reverse else (other, self)
result_args = {c: op(getattr(first, c, 0.), getattr(second, c, 0.))
for c in all_components}
return SphericalCosLatDifferential(**result_args)
return super()._combine_operation(op, other, reverse)
class UnitSphericalCosLatDifferential(BaseSphericalCosLatDifferential):
"""Differential(s) of points on a unit sphere.
Parameters
----------
d_lon_coslat, d_lat : `~astropy.units.Quantity`
The longitude and latitude of the differentials.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
"""
base_representation = UnitSphericalRepresentation
attr_classes = OrderedDict([('d_lon_coslat', u.Quantity),
('d_lat', u.Quantity)])
@classproperty
def _dimensional_differential(cls):
return SphericalCosLatDifferential
def __init__(self, d_lon_coslat, d_lat, copy=True):
super().__init__(d_lon_coslat, d_lat, copy=copy)
if not self._d_lon_coslat.unit.is_equivalent(self._d_lat.unit):
raise u.UnitsError('d_lon_coslat and d_lat should have equivalent '
'units.')
def to_cartesian(self, base):
if isinstance(base, SphericalRepresentation):
scale = base.distance
elif isinstance(base, PhysicsSphericalRepresentation):
scale = base.r
else:
return super().to_cartesian(base)
base = base.represent_as(UnitSphericalRepresentation)
return scale * super().to_cartesian(base)
def represent_as(self, other_class, base=None):
# Only have enough information to represent other unit-spherical.
if issubclass(other_class, UnitSphericalDifferential):
return other_class(self._d_lon(base), self.d_lat)
return super().represent_as(other_class, base)
@classmethod
def from_representation(cls, representation, base=None):
# All spherical differentials can be done without going to Cartesian,
# though w/o CosLat needs base for the latitude.
if isinstance(representation, SphericalCosLatDifferential):
return cls(representation.d_lon_coslat, representation.d_lat)
elif isinstance(representation, (SphericalDifferential,
UnitSphericalDifferential)):
d_lon_coslat = cls._get_d_lon_coslat(representation.d_lon, base)
return cls(d_lon_coslat, representation.d_lat)
elif isinstance(representation, PhysicsSphericalDifferential):
d_lon_coslat = cls._get_d_lon_coslat(representation.d_phi, base)
return cls(d_lon_coslat, -representation.d_theta)
return super().from_representation(representation, base)
class SphericalCosLatDifferential(BaseSphericalCosLatDifferential):
"""Differential(s) of points in 3D spherical coordinates.
Parameters
----------
d_lon_coslat, d_lat : `~astropy.units.Quantity`
The differential longitude (with cos(lat) included) and latitude.
d_distance : `~astropy.units.Quantity`
The differential distance.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
"""
base_representation = SphericalRepresentation
_unit_differential = UnitSphericalCosLatDifferential
attr_classes = OrderedDict([('d_lon_coslat', u.Quantity),
('d_lat', u.Quantity),
('d_distance', u.Quantity)])
def __init__(self, d_lon_coslat, d_lat, d_distance, copy=True):
super().__init__(d_lon_coslat, d_lat, d_distance, copy=copy)
if not self._d_lon_coslat.unit.is_equivalent(self._d_lat.unit):
raise u.UnitsError('d_lon_coslat and d_lat should have equivalent '
'units.')
def represent_as(self, other_class, base=None):
# All spherical differentials can be done without going to Cartesian,
# though some need base for the latitude to remove cos(lat).
if issubclass(other_class, UnitSphericalCosLatDifferential):
return other_class(self.d_lon_coslat, self.d_lat)
elif issubclass(other_class, RadialDifferential):
return other_class(self.d_distance)
elif issubclass(other_class, SphericalDifferential):
return other_class(self._d_lon(base), self.d_lat, self.d_distance)
elif issubclass(other_class, UnitSphericalDifferential):
return other_class(self._d_lon(base), self.d_lat)
elif issubclass(other_class, PhysicsSphericalDifferential):
return other_class(self._d_lon(base), -self.d_lat, self.d_distance)
return super().represent_as(other_class, base)
@classmethod
def from_representation(cls, representation, base=None):
# Other spherical differentials can be done without going to Cartesian,
# though we need base for the latitude to remove coslat.
if isinstance(representation, SphericalDifferential):
d_lon_coslat = cls._get_d_lon_coslat(representation.d_lon, base)
return cls(d_lon_coslat, representation.d_lat,
representation.d_distance)
elif isinstance(representation, PhysicsSphericalDifferential):
d_lon_coslat = cls._get_d_lon_coslat(representation.d_phi, base)
return cls(d_lon_coslat, -representation.d_theta,
representation.d_r)
return super().from_representation(representation, base)
class RadialDifferential(BaseDifferential):
"""Differential(s) of radial distances.
Parameters
----------
d_distance : `~astropy.units.Quantity`
The differential distance.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
"""
base_representation = RadialRepresentation
def to_cartesian(self, base):
return self.d_distance * base.represent_as(
UnitSphericalRepresentation).to_cartesian()
@classmethod
def from_cartesian(cls, other, base):
return cls(other.dot(base.represent_as(UnitSphericalRepresentation)),
copy=False)
@classmethod
def from_representation(cls, representation, base=None):
if isinstance(representation, (SphericalDifferential,
SphericalCosLatDifferential)):
return cls(representation.d_distance)
elif isinstance(representation, PhysicsSphericalDifferential):
return cls(representation.d_r)
else:
return super().from_representation(representation, base)
def _combine_operation(self, op, other, reverse=False):
if isinstance(other, self.base_representation):
if reverse:
first, second = other.distance, self.d_distance
else:
first, second = self.d_distance, other.distance
return other.__class__(op(first, second), copy=False)
elif isinstance(other, (BaseSphericalDifferential,
BaseSphericalCosLatDifferential)):
all_components = set(self.components) | set(other.components)
first, second = (self, other) if not reverse else (other, self)
result_args = {c: op(getattr(first, c, 0.), getattr(second, c, 0.))
for c in all_components}
return SphericalDifferential(**result_args)
else:
return super()._combine_operation(op, other, reverse)
class PhysicsSphericalDifferential(BaseDifferential):
"""Differential(s) of 3D spherical coordinates using physics convention.
Parameters
----------
d_phi, d_theta : `~astropy.units.Quantity`
The differential azimuth and inclination.
d_r : `~astropy.units.Quantity`
The differential radial distance.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
"""
base_representation = PhysicsSphericalRepresentation
def __init__(self, d_phi, d_theta, d_r, copy=True):
super().__init__(d_phi, d_theta, d_r, copy=copy)
if not self._d_phi.unit.is_equivalent(self._d_theta.unit):
raise u.UnitsError('d_phi and d_theta should have equivalent '
'units.')
def represent_as(self, other_class, base=None):
# All spherical differentials can be done without going to Cartesian,
# though CosLat needs base for the latitude. For those, explicitly
# do the equivalent of self._d_lon_coslat in SphericalDifferential.
if issubclass(other_class, SphericalDifferential):
return other_class(self.d_phi, -self.d_theta, self.d_r)
elif issubclass(other_class, UnitSphericalDifferential):
return other_class(self.d_phi, -self.d_theta)
elif issubclass(other_class, SphericalCosLatDifferential):
self._check_base(base)
d_lon_coslat = self.d_phi * np.sin(base.theta)
return other_class(d_lon_coslat, -self.d_theta, self.d_r)
elif issubclass(other_class, UnitSphericalCosLatDifferential):
self._check_base(base)
d_lon_coslat = self.d_phi * np.sin(base.theta)
return other_class(d_lon_coslat, -self.d_theta)
elif issubclass(other_class, RadialDifferential):
return other_class(self.d_r)
return super().represent_as(other_class, base)
@classmethod
def from_representation(cls, representation, base=None):
# Other spherical differentials can be done without going to Cartesian,
# though we need base for the latitude to remove coslat. For that case,
# do the equivalent of cls._d_lon in SphericalDifferential.
if isinstance(representation, SphericalDifferential):
return cls(representation.d_lon, -representation.d_lat,
representation.d_distance)
elif isinstance(representation, SphericalCosLatDifferential):
cls._check_base(base)
d_phi = representation.d_lon_coslat / np.sin(base.theta)
return cls(d_phi, -representation.d_lat, representation.d_distance)
return super().from_representation(representation, base)
class CylindricalDifferential(BaseDifferential):
"""Differential(s) of points in cylindrical coordinates.
Parameters
----------
d_rho : `~astropy.units.Quantity`
The differential cylindrical radius.
d_phi : `~astropy.units.Quantity`
The differential azimuth.
d_z : `~astropy.units.Quantity`
The differential height.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
"""
base_representation = CylindricalRepresentation
def __init__(self, d_rho, d_phi, d_z, copy=False):
super().__init__(d_rho, d_phi, d_z, copy=copy)
if not self._d_rho.unit.is_equivalent(self._d_z.unit):
raise u.UnitsError("d_rho and d_z should have equivalent units.")
|
476011c15f7e5e417343eb87056e63768b97cda0f4368042664e7355e2d30014 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains convenience functions for getting a coordinate object
for a named object by querying SESAME and getting the first returned result.
Note that this is intended to be a convenience, and is very simple. If you
need precise coordinates for an object you should find the appropriate
reference for that measurement and input the coordinates manually.
"""
# Standard library
import os
import re
import socket
import urllib.request
import urllib.parse
import urllib.error
# Astropy
from astropy import units as u
from .sky_coordinate import SkyCoord
from astropy.utils import data
from astropy.utils.state import ScienceState
__all__ = ["get_icrs_coordinates"]
class sesame_url(ScienceState):
"""
The URL(s) to Sesame's web-queryable database.
"""
_value = ["http://cdsweb.u-strasbg.fr/cgi-bin/nph-sesame/",
"http://vizier.cfa.harvard.edu/viz-bin/nph-sesame/"]
@classmethod
def validate(cls, value):
# TODO: Implement me
return value
class sesame_database(ScienceState):
"""
This specifies the default database that SESAME will query when
using the name resolve mechanism in the coordinates
subpackage. Default is to search all databases, but this can be
'all', 'simbad', 'ned', or 'vizier'.
"""
_value = 'all'
@classmethod
def validate(cls, value):
if value not in ['all', 'simbad', 'ned', 'vizier']:
raise ValueError("Unknown database '{0}'".format(value))
return value
class NameResolveError(Exception):
pass
def _parse_response(resp_data):
"""
Given a string response from SESAME, parse out the coordinates by looking
for a line starting with a J, meaning ICRS J2000 coordinates.
Parameters
----------
resp_data : str
The string HTTP response from SESAME.
Returns
-------
ra : str
The string Right Ascension parsed from the HTTP response.
dec : str
The string Declination parsed from the HTTP response.
"""
pattr = re.compile(r"%J\s*([0-9\.]+)\s*([\+\-\.0-9]+)")
matched = pattr.search(resp_data.decode('utf-8'))
if matched is None:
return None, None
else:
ra, dec = matched.groups()
return ra, dec
def get_icrs_coordinates(name, parse=False):
"""
Retrieve an ICRS object by using an online name resolving service to
retrieve coordinates for the specified name. By default, this will
search all available databases until a match is found. If you would like
to specify the database, use the science state
``astropy.coordinates.name_resolve.sesame_database``. You can also
specify a list of servers to use for querying Sesame using the science
state ``astropy.coordinates.name_resolve.sesame_url``. This will try
each one in order until a valid response is returned. By default, this
list includes the main Sesame host and a mirror at vizier. The
configuration item `astropy.utils.data.Conf.remote_timeout` controls the
number of seconds to wait for a response from the server before giving
up.
Parameters
----------
name : str
The name of the object to get coordinates for, e.g. ``'M42'``.
parse: bool
Whether to attempt extracting the coordinates from the name by
parsing with a regex. For objects catalog names that have
J-coordinates embedded in their names eg:
'CRTS SSS100805 J194428-420209', this may be much faster than a
sesame query for the same object name. The coordinates extracted
in this way may differ from the database coordinates by a few
deci-arcseconds, so only use this option if you do not need
sub-arcsecond accuracy for coordinates.
Returns
-------
coord : `astropy.coordinates.ICRS` object
The object's coordinates in the ICRS frame.
"""
# if requested, first try extract coordinates embedded in the object name.
# Do this first since it may be much faster than doing the sesame query
if parse:
from . import jparser
if jparser.search(name):
return jparser.to_skycoord(name)
else:
# if the parser failed, fall back to sesame query.
pass
# maybe emit a warning instead of silently falling back to sesame?
database = sesame_database.get()
# The web API just takes the first letter of the database name
db = database.upper()[0]
# Make sure we don't have duplicates in the url list
urls = []
domains = []
for url in sesame_url.get():
domain = urllib.parse.urlparse(url).netloc
# Check for duplicates
if domain not in domains:
domains.append(domain)
# Add the query to the end of the url, add to url list
fmt_url = os.path.join(url, "{db}?{name}")
fmt_url = fmt_url.format(name=urllib.parse.quote(name), db=db)
urls.append(fmt_url)
exceptions = []
for url in urls:
try:
# Retrieve ascii name resolve data from CDS
resp = urllib.request.urlopen(url, timeout=data.conf.remote_timeout)
resp_data = resp.read()
break
except urllib.error.URLError as e:
exceptions.append(e)
continue
except socket.timeout as e:
# There are some cases where urllib2 does not catch socket.timeout
# especially while receiving response data on an already previously
# working request
e.reason = "Request took longer than the allowed {:.1f} " \
"seconds".format(data.conf.remote_timeout)
exceptions.append(e)
continue
# All Sesame URL's failed...
else:
messages = ["{url}: {e.reason}".format(url=url, e=e)
for url, e in zip(urls, exceptions)]
raise NameResolveError("All Sesame queries failed. Unable to "
"retrieve coordinates. See errors per URL "
"below: \n {}".format("\n".join(messages)))
ra, dec = _parse_response(resp_data)
if ra is None and dec is None:
if db == "A":
err = "Unable to find coordinates for name '{0}'".format(name)
else:
err = "Unable to find coordinates for name '{0}' in database {1}"\
.format(name, database)
raise NameResolveError(err)
# Return SkyCoord object
sc = SkyCoord(ra=ra, dec=dec, unit=(u.degree, u.degree), frame='icrs')
return sc
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.