response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Test add_array_2D utility function.
Test by adding an array of ones out of an array of zeros. | def test_add_array_equal_shape():
"""
Test add_array_2D utility function.
Test by adding an array of ones out of an array of zeros.
"""
large_test_array = np.zeros((11, 11))
small_test_array = np.ones((11, 11))
large_test_array_ref = large_test_array.copy()
large_test_array_ref += small_test_array
added_array = add_array(large_test_array, small_test_array, (5, 5))
assert np.all(added_array == large_test_array_ref) |
Test subpixel_indices utility function.
Test by asserting that the function returns correct results for
given test values. | def test_subpixel_indices(position, subpixel_index):
"""
Test subpixel_indices utility function.
Test by asserting that the function returns correct results for
given test values.
"""
assert np.all(subpixel_indices(position, subsampling) == subpixel_index) |
This main function is executed by the ``samp_hub`` command line tool. | def hub_script(timeout=0):
"""
This main function is executed by the ``samp_hub`` command line tool.
"""
parser = argparse.ArgumentParser(prog="samp_hub " + __version__)
parser.add_argument(
"-k", "--secret", dest="secret", metavar="CODE", help="custom secret code."
)
parser.add_argument(
"-d", "--addr", dest="addr", metavar="ADDR", help="listening address (or IP)."
)
parser.add_argument(
"-p",
"--port",
dest="port",
metavar="PORT",
type=int,
help="listening port number.",
)
parser.add_argument(
"-f", "--lockfile", dest="lockfile", metavar="FILE", help="custom lockfile."
)
parser.add_argument(
"-w",
"--no-web-profile",
dest="web_profile",
action="store_false",
help="run the Hub disabling the Web Profile.",
default=True,
)
parser.add_argument(
"-P",
"--pool-size",
dest="pool_size",
metavar="SIZE",
type=int,
help="the socket connections pool size.",
default=20,
)
timeout_group = parser.add_argument_group(
"Timeout group",
"Special options to setup hub and client timeouts."
"It contains a set of special options that allows to set up the Hub and "
"clients inactivity timeouts, that is the Hub or client inactivity time "
"interval after which the Hub shuts down or unregisters the client. "
"Notification of samp.hub.disconnect MType is sent to the clients "
"forcibly unregistered for timeout expiration.",
)
timeout_group.add_argument(
"-t",
"--timeout",
dest="timeout",
metavar="SECONDS",
help=(
"set the Hub inactivity timeout in SECONDS. By default it "
"is set to 0, that is the Hub never expires."
),
type=int,
default=0,
)
timeout_group.add_argument(
"-c",
"--client-timeout",
dest="client_timeout",
metavar="SECONDS",
help=(
"set the client inactivity timeout in SECONDS. By default it "
"is set to 0, that is the client never expires."
),
type=int,
default=0,
)
parser.add_argument_group(timeout_group)
log_group = parser.add_argument_group(
"Logging options",
"Additional options which allow to customize the logging output. By "
"default the SAMP Hub uses the standard output and standard error "
"devices to print out INFO level logging messages. Using the options "
"here below it is possible to modify the logging level and also "
"specify the output files where redirect the logging messages.",
)
log_group.add_argument(
"-L",
"--log-level",
dest="loglevel",
metavar="LEVEL",
help="set the Hub instance log level (OFF, ERROR, WARNING, INFO, DEBUG).",
type=str,
choices=["OFF", "ERROR", "WARNING", "INFO", "DEBUG"],
default="INFO",
)
log_group.add_argument(
"-O",
"--log-output",
dest="logout",
metavar="FILE",
help="set the output file for the log messages.",
default="",
)
parser.add_argument_group(log_group)
adv_group = parser.add_argument_group(
"Advanced group",
"Advanced options addressed to facilitate administrative tasks and "
"allow new non-standard Hub behaviors. In particular the --label "
"options is used to assign a value to hub.label token and is used to "
"assign a name to the Hub instance. "
"The very special --multi option allows to start a Hub in multi-instance mode. "
"Multi-instance mode is a non-standard Hub behavior that enables "
"multiple contemporaneous running Hubs. Multi-instance hubs place "
"their non-standard lock-files within the <home directory>/.samp-1 "
"directory naming them making use of the format: "
"samp-hub-<PID>-<ID>, where PID is the Hub process ID while ID is an "
"internal ID (integer).",
)
adv_group.add_argument(
"-l",
"--label",
dest="label",
metavar="LABEL",
help="assign a LABEL to the Hub.",
default="",
)
adv_group.add_argument(
"-m",
"--multi",
dest="mode",
help=(
"run the Hub in multi-instance mode generating a custom "
"lockfile with a random name."
),
action="store_const",
const="multiple",
default="single",
)
parser.add_argument_group(adv_group)
options = parser.parse_args()
try:
if options.loglevel in ("OFF", "ERROR", "WARNING", "DEBUG", "INFO"):
log.setLevel(options.loglevel)
if options.logout != "":
context = log.log_to_file(options.logout)
else:
class dummy_context:
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
pass
context = dummy_context()
with context:
args = copy.deepcopy(options.__dict__)
del args["loglevel"]
del args["logout"]
hub = SAMPHubServer(**args)
hub.start(False)
if not timeout:
while hub.is_running:
time.sleep(0.01)
else:
time.sleep(timeout)
hub.stop()
except KeyboardInterrupt:
try:
hub.stop()
except NameError:
pass
except OSError as e:
print(f"[SAMP] Error: I/O error({e.errno}): {e.strerror}")
sys.exit(1)
except SystemExit:
pass |
Read in the lockfile given by ``lockfilename`` into a dictionary. | def read_lockfile(lockfilename):
"""
Read in the lockfile given by ``lockfilename`` into a dictionary.
"""
# lockfilename may be a local file or a remote URL, but
# get_readable_fileobj takes care of this.
lockfiledict = {}
with get_readable_fileobj(lockfilename) as f:
for line in f:
if not line.startswith("#"):
kw, val = line.split("=")
lockfiledict[kw.strip()] = val.strip()
return lockfiledict |
Get either the hub given by the environment variable SAMP_HUB, or the one
given by the lockfile .samp in the user home directory. | def get_main_running_hub():
"""
Get either the hub given by the environment variable SAMP_HUB, or the one
given by the lockfile .samp in the user home directory.
"""
hubs = get_running_hubs()
if not hubs:
raise SAMPHubError("Unable to find a running SAMP Hub.")
# CHECK FOR SAMP_HUB ENVIRONMENT VARIABLE
if "SAMP_HUB" in os.environ:
# For the time being I assume just the std profile supported.
if os.environ["SAMP_HUB"].startswith("std-lockurl:"):
lockfilename = os.environ["SAMP_HUB"][len("std-lockurl:") :]
else:
raise SAMPHubError("SAMP Hub profile not supported.")
else:
lockfilename = os.path.join(_find_home(), ".samp")
return hubs[lockfilename] |
Return a dictionary containing the lock-file contents of all the currently
running hubs (single and/or multiple mode).
The dictionary format is:
``{<lock-file>: {<token-name>: <token-string>, ...}, ...}``
where ``{<lock-file>}`` is the lock-file name, ``{<token-name>}`` and
``{<token-string>}`` are the lock-file tokens (name and content).
Returns
-------
running_hubs : dict
Lock-file contents of all the currently running hubs. | def get_running_hubs():
"""
Return a dictionary containing the lock-file contents of all the currently
running hubs (single and/or multiple mode).
The dictionary format is:
``{<lock-file>: {<token-name>: <token-string>, ...}, ...}``
where ``{<lock-file>}`` is the lock-file name, ``{<token-name>}`` and
``{<token-string>}`` are the lock-file tokens (name and content).
Returns
-------
running_hubs : dict
Lock-file contents of all the currently running hubs.
"""
hubs = {}
lockfilename = ""
# HUB SINGLE INSTANCE MODE
# CHECK FOR SAMP_HUB ENVIRONMENT VARIABLE
if "SAMP_HUB" in os.environ:
# For the time being I assume just the std profile supported.
if os.environ["SAMP_HUB"].startswith("std-lockurl:"):
lockfilename = os.environ["SAMP_HUB"][len("std-lockurl:") :]
else:
lockfilename = os.path.join(_find_home(), ".samp")
hub_is_running, lockfiledict = check_running_hub(lockfilename)
if hub_is_running:
hubs[lockfilename] = lockfiledict
# HUB MULTIPLE INSTANCE MODE
lockfiledir = ""
lockfiledir = os.path.join(_find_home(), ".samp-1")
if os.path.isdir(lockfiledir):
for filename in os.listdir(lockfiledir):
if filename.startswith("samp-hub"):
lockfilename = os.path.join(lockfiledir, filename)
hub_is_running, lockfiledict = check_running_hub(lockfilename)
if hub_is_running:
hubs[lockfilename] = lockfiledict
return hubs |
Test whether a hub identified by ``lockfilename`` is running or not.
Parameters
----------
lockfilename : str
Lock-file name (path + file name) of the Hub to be tested.
Returns
-------
is_running : bool
Whether the hub is running
hub_params : dict
If the hub is running this contains the parameters from the lockfile | def check_running_hub(lockfilename):
"""
Test whether a hub identified by ``lockfilename`` is running or not.
Parameters
----------
lockfilename : str
Lock-file name (path + file name) of the Hub to be tested.
Returns
-------
is_running : bool
Whether the hub is running
hub_params : dict
If the hub is running this contains the parameters from the lockfile
"""
is_running = False
lockfiledict = {}
# Check whether a lockfile already exists
try:
lockfiledict = read_lockfile(lockfilename)
except OSError:
return is_running, lockfiledict
if "samp.hub.xmlrpc.url" in lockfiledict:
try:
proxy = xmlrpc.ServerProxy(
lockfiledict["samp.hub.xmlrpc.url"].replace("\\", ""), allow_none=1
)
proxy.samp.hub.ping()
is_running = True
except xmlrpc.ProtocolError:
# There is a protocol error (e.g. for authentication required),
# but the server is alive
is_running = True
except OSError:
pass
return is_running, lockfiledict |
Get attributes recursively. | def getattr_recursive(variable, attribute):
"""
Get attributes recursively.
"""
if "." in attribute:
top, remaining = attribute.split(".", 1)
return getattr_recursive(getattr(variable, top), remaining)
else:
return getattr(variable, attribute) |
Find the number of arguments a function or method takes (excluding ``self``). | def get_num_args(f):
"""
Find the number of arguments a function or method takes (excluding ``self``).
"""
if inspect.ismethod(f):
return f.__func__.__code__.co_argcount - 1
elif inspect.isfunction(f):
return f.__code__.co_argcount
else:
raise TypeError("f should be a function or a method") |
Test that SAMPHubProxy can be instantiated | def test_SAMPHubProxy():
"""Test that SAMPHubProxy can be instantiated"""
SAMPHubProxy() |
Test that SAMPClient can be instantiated | def test_SAMPClient():
"""Test that SAMPClient can be instantiated"""
proxy = SAMPHubProxy()
SAMPClient(proxy) |
Test that SAMPIntegratedClient can be instantiated | def test_SAMPIntegratedClient():
"""Test that SAMPIntegratedClient can be instantiated"""
SAMPIntegratedClient() |
A fixture that can be used by client tests that require a HUB. | def samp_hub():
"""A fixture that can be used by client tests that require a HUB."""
my_hub = SAMPHubServer()
my_hub.start()
yield
my_hub.stop() |
Test that SAMP returns a warning if no receiver got the message. | def test_SAMPIntegratedClient_notify_all(samp_hub):
"""Test that SAMP returns a warning if no receiver got the message."""
client = SAMPIntegratedClient()
client.connect()
message = {"samp.mtype": "coverage.load.moc.fits"}
with pytest.warns(SAMPWarning):
client.notify_all(message)
client.disconnect() |
Test that SAMPIntegratedClient can reconnect.
This is a regression test for bug [#2673]
https://github.com/astropy/astropy/issues/2673 | def test_reconnect(samp_hub):
"""Test that SAMPIntegratedClient can reconnect.
This is a regression test for bug [#2673]
https://github.com/astropy/astropy/issues/2673
"""
my_client = SAMPIntegratedClient()
my_client.connect()
my_client.disconnect()
my_client.connect() |
Test that SAMPHubError can be instantiated | def test_SAMPHubError():
"""Test that SAMPHubError can be instantiated"""
SAMPHubError("test") |
Test that SAMPClientError can be instantiated | def test_SAMPClientError():
"""Test that SAMPClientError can be instantiated"""
SAMPClientError("test") |
Test that SAMPProxyError can be instantiated | def test_SAMPProxyError():
"""Test that SAMPProxyError can be instantiated"""
SAMPProxyError("test", "any") |
Test that SAMPHub can be instantiated | def test_SAMPHubServer():
"""Test that SAMPHub can be instantiated"""
SAMPHubServer(web_profile=False, mode="multiple", pool_size=1) |
Test that SAMPHub can be run | def test_SAMPHubServer_run():
"""Test that SAMPHub can be run"""
hub = SAMPHubServer(web_profile=False, mode="multiple", pool_size=1)
hub.start()
time.sleep(1)
hub.stop() |
Test that SAMPHub can be restarted after it has been stopped, including
when web profile support is enabled. | def test_SAMPHubServer_run_repeated():
"""
Test that SAMPHub can be restarted after it has been stopped, including
when web profile support is enabled.
"""
hub = SAMPHubServer(web_profile=True, mode="multiple", pool_size=1)
hub.start()
time.sleep(1)
hub.stop()
time.sleep(1)
hub.start()
time.sleep(1)
hub.stop() |
Compute optimal segmentation of data with Scargle's Bayesian Blocks.
This is a flexible implementation of the Bayesian Blocks algorithm
described in Scargle 2013 [1]_.
Parameters
----------
t : array-like
data times (one dimensional, length N)
x : array-like, optional
data values
sigma : array-like or float, optional
data errors
fitness : str or object
the fitness function to use for the model.
If a string, the following options are supported:
- 'events' : binned or unbinned event data. Arguments are ``gamma``,
which gives the slope of the prior on the number of bins, or
``ncp_prior``, which is :math:`-\ln({\tt gamma})`.
- 'regular_events' : non-overlapping events measured at multiples of a
fundamental tick rate, ``dt``, which must be specified as an
additional argument. Extra arguments are ``p0``, which gives the
false alarm probability to compute the prior, or ``gamma``, which
gives the slope of the prior on the number of bins, or ``ncp_prior``,
which is :math:`-\ln({\tt gamma})`.
- 'measures' : fitness for a measured sequence with Gaussian errors.
Extra arguments are ``p0``, which gives the false alarm probability
to compute the prior, or ``gamma``, which gives the slope of the
prior on the number of bins, or ``ncp_prior``, which is
:math:`-\ln({\tt gamma})`.
In all three cases, if more than one of ``p0``, ``gamma``, and
``ncp_prior`` is chosen, ``ncp_prior`` takes precedence over ``gamma``
which takes precedence over ``p0``.
Alternatively, the fitness parameter can be an instance of
:class:`FitnessFunc` or a subclass thereof.
**kwargs :
any additional keyword arguments will be passed to the specified
:class:`FitnessFunc` derived class.
Returns
-------
edges : ndarray
array containing the (N+1) edges defining the N bins
Examples
--------
.. testsetup::
>>> np.random.seed(12345)
Event data:
>>> t = np.random.normal(size=100)
>>> edges = bayesian_blocks(t, fitness='events', p0=0.01)
Event data with repeats:
>>> t = np.random.normal(size=100)
>>> t[80:] = t[:20]
>>> edges = bayesian_blocks(t, fitness='events', p0=0.01)
Regular event data:
>>> dt = 0.05
>>> t = dt * np.arange(1000)
>>> x = np.zeros(len(t))
>>> x[np.random.randint(0, len(t), len(t) // 10)] = 1
>>> edges = bayesian_blocks(t, x, fitness='regular_events', dt=dt)
Measured point data with errors:
>>> t = 100 * np.random.random(100)
>>> x = np.exp(-0.5 * (t - 50) ** 2)
>>> sigma = 0.1
>>> x_obs = np.random.normal(x, sigma)
>>> edges = bayesian_blocks(t, x_obs, sigma, fitness='measures')
References
----------
.. [1] Scargle, J et al. (2013)
https://ui.adsabs.harvard.edu/abs/2013ApJ...764..167S
.. [2] Bellman, R.E., Dreyfus, S.E., 1962. Applied Dynamic
Programming. Princeton University Press, Princeton.
https://press.princeton.edu/books/hardcover/9780691651873/applied-dynamic-programming
.. [3] Bellman, R., Roth, R., 1969. Curve fitting by segmented
straight lines. J. Amer. Statist. Assoc. 64, 1079–1084.
https://www.tandfonline.com/doi/abs/10.1080/01621459.1969.10501038
See Also
--------
astropy.stats.histogram : compute a histogram using bayesian blocks | def bayesian_blocks(t, x=None, sigma=None, fitness="events", **kwargs):
r"""Compute optimal segmentation of data with Scargle's Bayesian Blocks.
This is a flexible implementation of the Bayesian Blocks algorithm
described in Scargle 2013 [1]_.
Parameters
----------
t : array-like
data times (one dimensional, length N)
x : array-like, optional
data values
sigma : array-like or float, optional
data errors
fitness : str or object
the fitness function to use for the model.
If a string, the following options are supported:
- 'events' : binned or unbinned event data. Arguments are ``gamma``,
which gives the slope of the prior on the number of bins, or
``ncp_prior``, which is :math:`-\ln({\tt gamma})`.
- 'regular_events' : non-overlapping events measured at multiples of a
fundamental tick rate, ``dt``, which must be specified as an
additional argument. Extra arguments are ``p0``, which gives the
false alarm probability to compute the prior, or ``gamma``, which
gives the slope of the prior on the number of bins, or ``ncp_prior``,
which is :math:`-\ln({\tt gamma})`.
- 'measures' : fitness for a measured sequence with Gaussian errors.
Extra arguments are ``p0``, which gives the false alarm probability
to compute the prior, or ``gamma``, which gives the slope of the
prior on the number of bins, or ``ncp_prior``, which is
:math:`-\ln({\tt gamma})`.
In all three cases, if more than one of ``p0``, ``gamma``, and
``ncp_prior`` is chosen, ``ncp_prior`` takes precedence over ``gamma``
which takes precedence over ``p0``.
Alternatively, the fitness parameter can be an instance of
:class:`FitnessFunc` or a subclass thereof.
**kwargs :
any additional keyword arguments will be passed to the specified
:class:`FitnessFunc` derived class.
Returns
-------
edges : ndarray
array containing the (N+1) edges defining the N bins
Examples
--------
.. testsetup::
>>> np.random.seed(12345)
Event data:
>>> t = np.random.normal(size=100)
>>> edges = bayesian_blocks(t, fitness='events', p0=0.01)
Event data with repeats:
>>> t = np.random.normal(size=100)
>>> t[80:] = t[:20]
>>> edges = bayesian_blocks(t, fitness='events', p0=0.01)
Regular event data:
>>> dt = 0.05
>>> t = dt * np.arange(1000)
>>> x = np.zeros(len(t))
>>> x[np.random.randint(0, len(t), len(t) // 10)] = 1
>>> edges = bayesian_blocks(t, x, fitness='regular_events', dt=dt)
Measured point data with errors:
>>> t = 100 * np.random.random(100)
>>> x = np.exp(-0.5 * (t - 50) ** 2)
>>> sigma = 0.1
>>> x_obs = np.random.normal(x, sigma)
>>> edges = bayesian_blocks(t, x_obs, sigma, fitness='measures')
References
----------
.. [1] Scargle, J et al. (2013)
https://ui.adsabs.harvard.edu/abs/2013ApJ...764..167S
.. [2] Bellman, R.E., Dreyfus, S.E., 1962. Applied Dynamic
Programming. Princeton University Press, Princeton.
https://press.princeton.edu/books/hardcover/9780691651873/applied-dynamic-programming
.. [3] Bellman, R., Roth, R., 1969. Curve fitting by segmented
straight lines. J. Amer. Statist. Assoc. 64, 1079–1084.
https://www.tandfonline.com/doi/abs/10.1080/01621459.1969.10501038
See Also
--------
astropy.stats.histogram : compute a histogram using bayesian blocks
"""
FITNESS_DICT = {
"events": Events,
"regular_events": RegularEvents,
"measures": PointMeasures,
}
fitness = FITNESS_DICT.get(fitness, fitness)
if type(fitness) is type and issubclass(fitness, FitnessFunc):
fitfunc = fitness(**kwargs)
elif isinstance(fitness, FitnessFunc):
fitfunc = fitness
else:
raise ValueError("fitness parameter not understood")
return fitfunc.fit(t, x, sigma) |
Compute the biweight location.
The biweight location is a robust statistic for determining the
central location of a distribution. It is given by:
.. math::
\zeta_{biloc}= M + \frac{\sum_{|u_i|<1} \ (x_i - M) (1 - u_i^2)^2}
{\sum_{|u_i|<1} \ (1 - u_i^2)^2}
where :math:`x` is the input data, :math:`M` is the sample median
(or the input initial location guess) and :math:`u_i` is given by:
.. math::
u_{i} = \frac{(x_i - M)}{c * MAD}
where :math:`c` is the tuning constant and :math:`MAD` is the
`median absolute deviation
<https://en.wikipedia.org/wiki/Median_absolute_deviation>`_. The
biweight location tuning constant ``c`` is typically 6.0 (the
default).
If :math:`MAD` is zero, then the median will be returned.
Parameters
----------
data : array-like
Input array or object that can be converted to an array.
``data`` can be a `~numpy.ma.MaskedArray`.
c : float, optional
Tuning constant for the biweight estimator (default = 6.0).
M : float or array-like, optional
Initial guess for the location. If ``M`` is a scalar value,
then its value will be used for the entire array (or along each
``axis``, if specified). If ``M`` is an array, then its must be
an array containing the initial location estimate along each
``axis`` of the input array. If `None` (default), then the
median of the input array will be used (or along each ``axis``,
if specified).
axis : None, int, or tuple of int, optional
The axis or axes along which the biweight locations are
computed. If `None` (default), then the biweight location of
the flattened input array will be computed.
ignore_nan : bool, optional
Whether to ignore NaN values in the input ``data``.
Returns
-------
biweight_location : float or `~numpy.ndarray`
The biweight location of the input data. If ``axis`` is `None`
then a scalar will be returned, otherwise a `~numpy.ndarray`
will be returned.
See Also
--------
biweight_scale, biweight_midvariance, biweight_midcovariance
References
----------
.. [1] Beers, Flynn, and Gebhardt (1990; AJ 100, 32) (https://ui.adsabs.harvard.edu/abs/1990AJ....100...32B)
.. [2] https://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/biwloc.htm
Examples
--------
Generate random variates from a Gaussian distribution and return the
biweight location of the distribution:
>>> import numpy as np
>>> from astropy.stats import biweight_location
>>> rand = np.random.default_rng(12345)
>>> biloc = biweight_location(rand.standard_normal(1000))
>>> print(biloc) # doctest: +FLOAT_CMP
0.01535330525461019 | def biweight_location(data, c=6.0, M=None, axis=None, *, ignore_nan=False):
r"""
Compute the biweight location.
The biweight location is a robust statistic for determining the
central location of a distribution. It is given by:
.. math::
\zeta_{biloc}= M + \frac{\sum_{|u_i|<1} \ (x_i - M) (1 - u_i^2)^2}
{\sum_{|u_i|<1} \ (1 - u_i^2)^2}
where :math:`x` is the input data, :math:`M` is the sample median
(or the input initial location guess) and :math:`u_i` is given by:
.. math::
u_{i} = \frac{(x_i - M)}{c * MAD}
where :math:`c` is the tuning constant and :math:`MAD` is the
`median absolute deviation
<https://en.wikipedia.org/wiki/Median_absolute_deviation>`_. The
biweight location tuning constant ``c`` is typically 6.0 (the
default).
If :math:`MAD` is zero, then the median will be returned.
Parameters
----------
data : array-like
Input array or object that can be converted to an array.
``data`` can be a `~numpy.ma.MaskedArray`.
c : float, optional
Tuning constant for the biweight estimator (default = 6.0).
M : float or array-like, optional
Initial guess for the location. If ``M`` is a scalar value,
then its value will be used for the entire array (or along each
``axis``, if specified). If ``M`` is an array, then its must be
an array containing the initial location estimate along each
``axis`` of the input array. If `None` (default), then the
median of the input array will be used (or along each ``axis``,
if specified).
axis : None, int, or tuple of int, optional
The axis or axes along which the biweight locations are
computed. If `None` (default), then the biweight location of
the flattened input array will be computed.
ignore_nan : bool, optional
Whether to ignore NaN values in the input ``data``.
Returns
-------
biweight_location : float or `~numpy.ndarray`
The biweight location of the input data. If ``axis`` is `None`
then a scalar will be returned, otherwise a `~numpy.ndarray`
will be returned.
See Also
--------
biweight_scale, biweight_midvariance, biweight_midcovariance
References
----------
.. [1] Beers, Flynn, and Gebhardt (1990; AJ 100, 32) (https://ui.adsabs.harvard.edu/abs/1990AJ....100...32B)
.. [2] https://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/biwloc.htm
Examples
--------
Generate random variates from a Gaussian distribution and return the
biweight location of the distribution:
>>> import numpy as np
>>> from astropy.stats import biweight_location
>>> rand = np.random.default_rng(12345)
>>> biloc = biweight_location(rand.standard_normal(1000))
>>> print(biloc) # doctest: +FLOAT_CMP
0.01535330525461019
"""
median_func, sum_func = _stat_functions(data, ignore_nan=ignore_nan)
if isinstance(data, np.ma.MaskedArray) and ignore_nan:
data = np.ma.masked_where(np.isnan(data), data, copy=True)
data = np.asanyarray(data).astype(np.float64)
if M is None:
M = median_func(data, axis=axis)
if axis is not None:
M = np.expand_dims(M, axis=axis)
# set up the differences
d = data - M
# set up the weighting
mad = median_absolute_deviation(data, axis=axis, ignore_nan=ignore_nan)
# mad = 0 means data is constant or mostly constant
# mad = np.nan means data contains NaNs and ignore_nan=False
if axis is None and (mad == 0.0 or np.isnan(mad)):
return M
if axis is not None:
mad = np.expand_dims(mad, axis=axis)
with np.errstate(divide="ignore", invalid="ignore"):
u = d / (c * mad)
# now remove the outlier points
# ignore RuntimeWarnings for comparisons with NaN data values
with np.errstate(invalid="ignore"):
mask = np.abs(u) >= 1
u = (1 - u**2) ** 2
u[mask] = 0
# If mad == 0 along the specified ``axis`` in the input data, return
# the median value along that axis.
# Ignore RuntimeWarnings for divide by zero
with np.errstate(divide="ignore", invalid="ignore"):
value = M.squeeze() + (sum_func(d * u, axis=axis) / sum_func(u, axis=axis))
if np.isscalar(value):
return value
where_func = np.where
if isinstance(data, np.ma.MaskedArray):
where_func = np.ma.where # return MaskedArray
return where_func(mad.squeeze() == 0, M.squeeze(), value) |
Compute the biweight scale.
The biweight scale is a robust statistic for determining the
standard deviation of a distribution. It is the square root of the
`biweight midvariance
<https://en.wikipedia.org/wiki/Robust_measures_of_scale#The_biweight_midvariance>`_.
It is given by:
.. math::
\zeta_{biscl} = \sqrt{n} \ \frac{\sqrt{\sum_{|u_i| < 1} \
(x_i - M)^2 (1 - u_i^2)^4}} {|(\sum_{|u_i| < 1} \
(1 - u_i^2) (1 - 5u_i^2))|}
where :math:`x` is the input data, :math:`M` is the sample median
(or the input location) and :math:`u_i` is given by:
.. math::
u_{i} = \frac{(x_i - M)}{c * MAD}
where :math:`c` is the tuning constant and :math:`MAD` is the
`median absolute deviation
<https://en.wikipedia.org/wiki/Median_absolute_deviation>`_. The
biweight midvariance tuning constant ``c`` is typically 9.0 (the
default).
If :math:`MAD` is zero, then zero will be returned.
For the standard definition of biweight scale, :math:`n` is the
total number of points in the array (or along the input ``axis``, if
specified). That definition is used if ``modify_sample_size`` is
`False`, which is the default.
However, if ``modify_sample_size = True``, then :math:`n` is the
number of points for which :math:`|u_i| < 1` (i.e. the total number
of non-rejected values), i.e.
.. math::
n = \sum_{|u_i| < 1} \ 1
which results in a value closer to the true standard deviation for
small sample sizes or for a large number of rejected values.
Parameters
----------
data : array-like
Input array or object that can be converted to an array.
``data`` can be a `~numpy.ma.MaskedArray`.
c : float, optional
Tuning constant for the biweight estimator (default = 9.0).
M : float or array-like, optional
The location estimate. If ``M`` is a scalar value, then its
value will be used for the entire array (or along each ``axis``,
if specified). If ``M`` is an array, then its must be an array
containing the location estimate along each ``axis`` of the
input array. If `None` (default), then the median of the input
array will be used (or along each ``axis``, if specified).
axis : None, int, or tuple of int, optional
The axis or axes along which the biweight scales are computed.
If `None` (default), then the biweight scale of the flattened
input array will be computed.
modify_sample_size : bool, optional
If `False` (default), then the sample size used is the total
number of elements in the array (or along the input ``axis``, if
specified), which follows the standard definition of biweight
scale. If `True`, then the sample size is reduced to correct
for any rejected values (i.e. the sample size used includes only
the non-rejected values), which results in a value closer to the
true standard deviation for small sample sizes or for a large
number of rejected values.
ignore_nan : bool, optional
Whether to ignore NaN values in the input ``data``.
Returns
-------
biweight_scale : float or `~numpy.ndarray`
The biweight scale of the input data. If ``axis`` is `None`
then a scalar will be returned, otherwise a `~numpy.ndarray`
will be returned.
See Also
--------
biweight_midvariance, biweight_midcovariance, biweight_location, astropy.stats.mad_std, astropy.stats.median_absolute_deviation
References
----------
.. [1] Beers, Flynn, and Gebhardt (1990; AJ 100, 32) (https://ui.adsabs.harvard.edu/abs/1990AJ....100...32B)
.. [2] https://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/biwscale.htm
Examples
--------
Generate random variates from a Gaussian distribution and return the
biweight scale of the distribution:
>>> import numpy as np
>>> from astropy.stats import biweight_scale
>>> rand = np.random.default_rng(12345)
>>> biscl = biweight_scale(rand.standard_normal(1000))
>>> print(biscl) # doctest: +FLOAT_CMP
1.0239311812635818 | def biweight_scale(
data, c=9.0, M=None, axis=None, modify_sample_size=False, *, ignore_nan=False
):
r"""
Compute the biweight scale.
The biweight scale is a robust statistic for determining the
standard deviation of a distribution. It is the square root of the
`biweight midvariance
<https://en.wikipedia.org/wiki/Robust_measures_of_scale#The_biweight_midvariance>`_.
It is given by:
.. math::
\zeta_{biscl} = \sqrt{n} \ \frac{\sqrt{\sum_{|u_i| < 1} \
(x_i - M)^2 (1 - u_i^2)^4}} {|(\sum_{|u_i| < 1} \
(1 - u_i^2) (1 - 5u_i^2))|}
where :math:`x` is the input data, :math:`M` is the sample median
(or the input location) and :math:`u_i` is given by:
.. math::
u_{i} = \frac{(x_i - M)}{c * MAD}
where :math:`c` is the tuning constant and :math:`MAD` is the
`median absolute deviation
<https://en.wikipedia.org/wiki/Median_absolute_deviation>`_. The
biweight midvariance tuning constant ``c`` is typically 9.0 (the
default).
If :math:`MAD` is zero, then zero will be returned.
For the standard definition of biweight scale, :math:`n` is the
total number of points in the array (or along the input ``axis``, if
specified). That definition is used if ``modify_sample_size`` is
`False`, which is the default.
However, if ``modify_sample_size = True``, then :math:`n` is the
number of points for which :math:`|u_i| < 1` (i.e. the total number
of non-rejected values), i.e.
.. math::
n = \sum_{|u_i| < 1} \ 1
which results in a value closer to the true standard deviation for
small sample sizes or for a large number of rejected values.
Parameters
----------
data : array-like
Input array or object that can be converted to an array.
``data`` can be a `~numpy.ma.MaskedArray`.
c : float, optional
Tuning constant for the biweight estimator (default = 9.0).
M : float or array-like, optional
The location estimate. If ``M`` is a scalar value, then its
value will be used for the entire array (or along each ``axis``,
if specified). If ``M`` is an array, then its must be an array
containing the location estimate along each ``axis`` of the
input array. If `None` (default), then the median of the input
array will be used (or along each ``axis``, if specified).
axis : None, int, or tuple of int, optional
The axis or axes along which the biweight scales are computed.
If `None` (default), then the biweight scale of the flattened
input array will be computed.
modify_sample_size : bool, optional
If `False` (default), then the sample size used is the total
number of elements in the array (or along the input ``axis``, if
specified), which follows the standard definition of biweight
scale. If `True`, then the sample size is reduced to correct
for any rejected values (i.e. the sample size used includes only
the non-rejected values), which results in a value closer to the
true standard deviation for small sample sizes or for a large
number of rejected values.
ignore_nan : bool, optional
Whether to ignore NaN values in the input ``data``.
Returns
-------
biweight_scale : float or `~numpy.ndarray`
The biweight scale of the input data. If ``axis`` is `None`
then a scalar will be returned, otherwise a `~numpy.ndarray`
will be returned.
See Also
--------
biweight_midvariance, biweight_midcovariance, biweight_location, astropy.stats.mad_std, astropy.stats.median_absolute_deviation
References
----------
.. [1] Beers, Flynn, and Gebhardt (1990; AJ 100, 32) (https://ui.adsabs.harvard.edu/abs/1990AJ....100...32B)
.. [2] https://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/biwscale.htm
Examples
--------
Generate random variates from a Gaussian distribution and return the
biweight scale of the distribution:
>>> import numpy as np
>>> from astropy.stats import biweight_scale
>>> rand = np.random.default_rng(12345)
>>> biscl = biweight_scale(rand.standard_normal(1000))
>>> print(biscl) # doctest: +FLOAT_CMP
1.0239311812635818
"""
return np.sqrt(
biweight_midvariance(
data,
c=c,
M=M,
axis=axis,
modify_sample_size=modify_sample_size,
ignore_nan=ignore_nan,
)
) |
Compute the biweight midvariance.
The biweight midvariance is a robust statistic for determining the
variance of a distribution. Its square root is a robust estimator
of scale (i.e. standard deviation). It is given by:
.. math::
\zeta_{bivar} = n \ \frac{\sum_{|u_i| < 1} \
(x_i - M)^2 (1 - u_i^2)^4} {(\sum_{|u_i| < 1} \
(1 - u_i^2) (1 - 5u_i^2))^2}
where :math:`x` is the input data, :math:`M` is the sample median
(or the input location) and :math:`u_i` is given by:
.. math::
u_{i} = \frac{(x_i - M)}{c * MAD}
where :math:`c` is the tuning constant and :math:`MAD` is the
`median absolute deviation
<https://en.wikipedia.org/wiki/Median_absolute_deviation>`_. The
biweight midvariance tuning constant ``c`` is typically 9.0 (the
default).
If :math:`MAD` is zero, then zero will be returned.
For the standard definition of `biweight midvariance
<https://en.wikipedia.org/wiki/Robust_measures_of_scale#The_biweight_midvariance>`_,
:math:`n` is the total number of points in the array (or along the
input ``axis``, if specified). That definition is used if
``modify_sample_size`` is `False`, which is the default.
However, if ``modify_sample_size = True``, then :math:`n` is the
number of points for which :math:`|u_i| < 1` (i.e. the total number
of non-rejected values), i.e.
.. math::
n = \sum_{|u_i| < 1} \ 1
which results in a value closer to the true variance for small
sample sizes or for a large number of rejected values.
Parameters
----------
data : array-like
Input array or object that can be converted to an array.
``data`` can be a `~numpy.ma.MaskedArray`.
c : float, optional
Tuning constant for the biweight estimator (default = 9.0).
M : float or array-like, optional
The location estimate. If ``M`` is a scalar value, then its
value will be used for the entire array (or along each ``axis``,
if specified). If ``M`` is an array, then its must be an array
containing the location estimate along each ``axis`` of the
input array. If `None` (default), then the median of the input
array will be used (or along each ``axis``, if specified).
axis : None, int, or tuple of int, optional
The axis or axes along which the biweight midvariances are
computed. If `None` (default), then the biweight midvariance of
the flattened input array will be computed.
modify_sample_size : bool, optional
If `False` (default), then the sample size used is the total
number of elements in the array (or along the input ``axis``, if
specified), which follows the standard definition of biweight
midvariance. If `True`, then the sample size is reduced to
correct for any rejected values (i.e. the sample size used
includes only the non-rejected values), which results in a value
closer to the true variance for small sample sizes or for a
large number of rejected values.
ignore_nan : bool, optional
Whether to ignore NaN values in the input ``data``.
Returns
-------
biweight_midvariance : float or `~numpy.ndarray`
The biweight midvariance of the input data. If ``axis`` is
`None` then a scalar will be returned, otherwise a
`~numpy.ndarray` will be returned.
See Also
--------
biweight_midcovariance, biweight_midcorrelation, astropy.stats.mad_std, astropy.stats.median_absolute_deviation
References
----------
.. [1] https://en.wikipedia.org/wiki/Robust_measures_of_scale#The_biweight_midvariance
.. [2] Beers, Flynn, and Gebhardt (1990; AJ 100, 32) (https://ui.adsabs.harvard.edu/abs/1990AJ....100...32B)
Examples
--------
Generate random variates from a Gaussian distribution and return the
biweight midvariance of the distribution:
>>> import numpy as np
>>> from astropy.stats import biweight_midvariance
>>> rand = np.random.default_rng(12345)
>>> bivar = biweight_midvariance(rand.standard_normal(1000))
>>> print(bivar) # doctest: +FLOAT_CMP
1.0484350639638342 | def biweight_midvariance(
data, c=9.0, M=None, axis=None, modify_sample_size=False, *, ignore_nan=False
):
r"""
Compute the biweight midvariance.
The biweight midvariance is a robust statistic for determining the
variance of a distribution. Its square root is a robust estimator
of scale (i.e. standard deviation). It is given by:
.. math::
\zeta_{bivar} = n \ \frac{\sum_{|u_i| < 1} \
(x_i - M)^2 (1 - u_i^2)^4} {(\sum_{|u_i| < 1} \
(1 - u_i^2) (1 - 5u_i^2))^2}
where :math:`x` is the input data, :math:`M` is the sample median
(or the input location) and :math:`u_i` is given by:
.. math::
u_{i} = \frac{(x_i - M)}{c * MAD}
where :math:`c` is the tuning constant and :math:`MAD` is the
`median absolute deviation
<https://en.wikipedia.org/wiki/Median_absolute_deviation>`_. The
biweight midvariance tuning constant ``c`` is typically 9.0 (the
default).
If :math:`MAD` is zero, then zero will be returned.
For the standard definition of `biweight midvariance
<https://en.wikipedia.org/wiki/Robust_measures_of_scale#The_biweight_midvariance>`_,
:math:`n` is the total number of points in the array (or along the
input ``axis``, if specified). That definition is used if
``modify_sample_size`` is `False`, which is the default.
However, if ``modify_sample_size = True``, then :math:`n` is the
number of points for which :math:`|u_i| < 1` (i.e. the total number
of non-rejected values), i.e.
.. math::
n = \sum_{|u_i| < 1} \ 1
which results in a value closer to the true variance for small
sample sizes or for a large number of rejected values.
Parameters
----------
data : array-like
Input array or object that can be converted to an array.
``data`` can be a `~numpy.ma.MaskedArray`.
c : float, optional
Tuning constant for the biweight estimator (default = 9.0).
M : float or array-like, optional
The location estimate. If ``M`` is a scalar value, then its
value will be used for the entire array (or along each ``axis``,
if specified). If ``M`` is an array, then its must be an array
containing the location estimate along each ``axis`` of the
input array. If `None` (default), then the median of the input
array will be used (or along each ``axis``, if specified).
axis : None, int, or tuple of int, optional
The axis or axes along which the biweight midvariances are
computed. If `None` (default), then the biweight midvariance of
the flattened input array will be computed.
modify_sample_size : bool, optional
If `False` (default), then the sample size used is the total
number of elements in the array (or along the input ``axis``, if
specified), which follows the standard definition of biweight
midvariance. If `True`, then the sample size is reduced to
correct for any rejected values (i.e. the sample size used
includes only the non-rejected values), which results in a value
closer to the true variance for small sample sizes or for a
large number of rejected values.
ignore_nan : bool, optional
Whether to ignore NaN values in the input ``data``.
Returns
-------
biweight_midvariance : float or `~numpy.ndarray`
The biweight midvariance of the input data. If ``axis`` is
`None` then a scalar will be returned, otherwise a
`~numpy.ndarray` will be returned.
See Also
--------
biweight_midcovariance, biweight_midcorrelation, astropy.stats.mad_std, astropy.stats.median_absolute_deviation
References
----------
.. [1] https://en.wikipedia.org/wiki/Robust_measures_of_scale#The_biweight_midvariance
.. [2] Beers, Flynn, and Gebhardt (1990; AJ 100, 32) (https://ui.adsabs.harvard.edu/abs/1990AJ....100...32B)
Examples
--------
Generate random variates from a Gaussian distribution and return the
biweight midvariance of the distribution:
>>> import numpy as np
>>> from astropy.stats import biweight_midvariance
>>> rand = np.random.default_rng(12345)
>>> bivar = biweight_midvariance(rand.standard_normal(1000))
>>> print(bivar) # doctest: +FLOAT_CMP
1.0484350639638342
"""
median_func, sum_func = _stat_functions(data, ignore_nan=ignore_nan)
if isinstance(data, np.ma.MaskedArray) and ignore_nan:
data = np.ma.masked_where(np.isnan(data), data, copy=True)
data = np.asanyarray(data).astype(np.float64)
if M is None:
M = median_func(data, axis=axis)
if axis is not None:
M = np.expand_dims(M, axis=axis)
# set up the differences
d = data - M
# set up the weighting
mad = median_absolute_deviation(data, axis=axis, ignore_nan=ignore_nan)
if axis is None:
# data is constant or mostly constant OR
# data contains NaNs and ignore_nan=False
if mad == 0.0 or np.isnan(mad):
return mad**2 # variance units
else:
mad = np.expand_dims(mad, axis=axis)
with np.errstate(divide="ignore", invalid="ignore"):
u = d / (c * mad)
# now remove the outlier points
# ignore RuntimeWarnings for comparisons with NaN data values
with np.errstate(invalid="ignore"):
mask = np.abs(u) < 1
if isinstance(mask, np.ma.MaskedArray):
mask = mask.filled(fill_value=False) # exclude masked data values
u = u**2
if modify_sample_size:
n = sum_func(mask, axis=axis)
else:
# set good values to 1, bad values to 0
include_mask = np.ones(data.shape)
if isinstance(data, np.ma.MaskedArray):
include_mask[data.mask] = 0
if ignore_nan:
include_mask[np.isnan(data)] = 0
n = np.sum(include_mask, axis=axis)
f1 = d * d * (1.0 - u) ** 4
f1[~mask] = 0.0
f1 = sum_func(f1, axis=axis)
f2 = (1.0 - u) * (1.0 - 5.0 * u)
f2[~mask] = 0.0
f2 = np.abs(np.sum(f2, axis=axis)) ** 2
# If mad == 0 along the specified ``axis`` in the input data, return
# 0.0 along that axis.
# Ignore RuntimeWarnings for divide by zero.
with np.errstate(divide="ignore", invalid="ignore"):
value = n * f1 / f2
if np.isscalar(value):
return value
where_func = np.where
if isinstance(data, np.ma.MaskedArray):
where_func = np.ma.where # return MaskedArray
return where_func(mad.squeeze() == 0, 0.0, value) |
Compute the biweight midcovariance between pairs of multiple
variables.
The biweight midcovariance is a robust and resistant estimator of
the covariance between two variables.
This function computes the biweight midcovariance between all pairs
of the input variables (rows) in the input data. The output array
will have a shape of (N_variables, N_variables). The diagonal
elements will be the biweight midvariances of each input variable
(see :func:`biweight_midvariance`). The off-diagonal elements will
be the biweight midcovariances between each pair of input variables.
For example, if the input array ``data`` contains three variables
(rows) ``x``, ``y``, and ``z``, the output `~numpy.ndarray`
midcovariance matrix will be:
.. math::
\begin{pmatrix}
\zeta_{xx} & \zeta_{xy} & \zeta_{xz} \\
\zeta_{yx} & \zeta_{yy} & \zeta_{yz} \\
\zeta_{zx} & \zeta_{zy} & \zeta_{zz}
\end{pmatrix}
where :math:`\zeta_{xx}`, :math:`\zeta_{yy}`, and :math:`\zeta_{zz}`
are the biweight midvariances of each variable. The biweight
midcovariance between :math:`x` and :math:`y` is :math:`\zeta_{xy}`
(:math:`= \zeta_{yx}`). The biweight midcovariance between
:math:`x` and :math:`z` is :math:`\zeta_{xz}` (:math:`=
\zeta_{zx}`). The biweight midcovariance between :math:`y` and
:math:`z` is :math:`\zeta_{yz}` (:math:`= \zeta_{zy}`).
The biweight midcovariance between two variables :math:`x` and
:math:`y` is given by:
.. math::
\zeta_{xy} = n_{xy} \ \frac{\sum_{|u_i| < 1, \ |v_i| < 1} \
(x_i - M_x) (1 - u_i^2)^2 (y_i - M_y) (1 - v_i^2)^2}
{(\sum_{|u_i| < 1} \ (1 - u_i^2) (1 - 5u_i^2))
(\sum_{|v_i| < 1} \ (1 - v_i^2) (1 - 5v_i^2))}
where :math:`M_x` and :math:`M_y` are the medians (or the input
locations) of the two variables and :math:`u_i` and :math:`v_i` are
given by:
.. math::
u_{i} = \frac{(x_i - M_x)}{c * MAD_x}
v_{i} = \frac{(y_i - M_y)}{c * MAD_y}
where :math:`c` is the biweight tuning constant and :math:`MAD_x`
and :math:`MAD_y` are the `median absolute deviation
<https://en.wikipedia.org/wiki/Median_absolute_deviation>`_ of the
:math:`x` and :math:`y` variables. The biweight midvariance tuning
constant ``c`` is typically 9.0 (the default).
If :math:`MAD_x` or :math:`MAD_y` are zero, then zero will be
returned for that element.
For the standard definition of biweight midcovariance,
:math:`n_{xy}` is the total number of observations of each variable.
That definition is used if ``modify_sample_size`` is `False`, which
is the default.
However, if ``modify_sample_size = True``, then :math:`n_{xy}` is the
number of observations for which :math:`|u_i| < 1` and/or :math:`|v_i|
< 1`, i.e.
.. math::
n_{xx} = \sum_{|u_i| < 1} \ 1
.. math::
n_{xy} = n_{yx} = \sum_{|u_i| < 1, \ |v_i| < 1} \ 1
.. math::
n_{yy} = \sum_{|v_i| < 1} \ 1
which results in a value closer to the true variance for small
sample sizes or for a large number of rejected values.
Parameters
----------
data : 2D or 1D array-like
Input data either as a 2D or 1D array. For a 2D array, it
should have a shape (N_variables, N_observations). A 1D array
may be input for observations of a single variable, in which
case the biweight midvariance will be calculated (no
covariance). Each row of ``data`` represents a variable, and
each column a single observation of all those variables (same as
the `numpy.cov` convention).
c : float, optional
Tuning constant for the biweight estimator (default = 9.0).
M : float or 1D array-like, optional
The location estimate of each variable, either as a scalar or
array. If ``M`` is an array, then its must be a 1D array
containing the location estimate of each row (i.e. ``a.ndim``
elements). If ``M`` is a scalar value, then its value will be
used for each variable (row). If `None` (default), then the
median of each variable (row) will be used.
modify_sample_size : bool, optional
If `False` (default), then the sample size used is the total
number of observations of each variable, which follows the
standard definition of biweight midcovariance. If `True`, then
the sample size is reduced to correct for any rejected values
(see formula above), which results in a value closer to the true
covariance for small sample sizes or for a large number of
rejected values.
Returns
-------
biweight_midcovariance : ndarray
A 2D array representing the biweight midcovariances between each
pair of the variables (rows) in the input array. The output
array will have a shape of (N_variables, N_variables). The
diagonal elements will be the biweight midvariances of each
input variable. The off-diagonal elements will be the biweight
midcovariances between each pair of input variables.
See Also
--------
biweight_midvariance, biweight_midcorrelation, biweight_scale, biweight_location
References
----------
.. [1] https://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/biwmidc.htm
Examples
--------
Compute the biweight midcovariance between two random variables:
>>> import numpy as np
>>> from astropy.stats import biweight_midcovariance
>>> # Generate two random variables x and y
>>> rng = np.random.default_rng(1)
>>> x = rng.normal(0, 1, 200)
>>> y = rng.normal(0, 3, 200)
>>> # Introduce an obvious outlier
>>> x[0] = 30.0
>>> # Calculate the biweight midcovariances between x and y
>>> bicov = biweight_midcovariance([x, y])
>>> print(bicov) # doctest: +FLOAT_CMP
[[0.83435568 0.02379316]
[0.02379316 7.15665769]]
>>> # Print standard deviation estimates
>>> print(np.sqrt(bicov.diagonal())) # doctest: +FLOAT_CMP
[0.91343072 2.67519302] | def biweight_midcovariance(data, c=9.0, M=None, modify_sample_size=False):
r"""
Compute the biweight midcovariance between pairs of multiple
variables.
The biweight midcovariance is a robust and resistant estimator of
the covariance between two variables.
This function computes the biweight midcovariance between all pairs
of the input variables (rows) in the input data. The output array
will have a shape of (N_variables, N_variables). The diagonal
elements will be the biweight midvariances of each input variable
(see :func:`biweight_midvariance`). The off-diagonal elements will
be the biweight midcovariances between each pair of input variables.
For example, if the input array ``data`` contains three variables
(rows) ``x``, ``y``, and ``z``, the output `~numpy.ndarray`
midcovariance matrix will be:
.. math::
\begin{pmatrix}
\zeta_{xx} & \zeta_{xy} & \zeta_{xz} \\
\zeta_{yx} & \zeta_{yy} & \zeta_{yz} \\
\zeta_{zx} & \zeta_{zy} & \zeta_{zz}
\end{pmatrix}
where :math:`\zeta_{xx}`, :math:`\zeta_{yy}`, and :math:`\zeta_{zz}`
are the biweight midvariances of each variable. The biweight
midcovariance between :math:`x` and :math:`y` is :math:`\zeta_{xy}`
(:math:`= \zeta_{yx}`). The biweight midcovariance between
:math:`x` and :math:`z` is :math:`\zeta_{xz}` (:math:`=
\zeta_{zx}`). The biweight midcovariance between :math:`y` and
:math:`z` is :math:`\zeta_{yz}` (:math:`= \zeta_{zy}`).
The biweight midcovariance between two variables :math:`x` and
:math:`y` is given by:
.. math::
\zeta_{xy} = n_{xy} \ \frac{\sum_{|u_i| < 1, \ |v_i| < 1} \
(x_i - M_x) (1 - u_i^2)^2 (y_i - M_y) (1 - v_i^2)^2}
{(\sum_{|u_i| < 1} \ (1 - u_i^2) (1 - 5u_i^2))
(\sum_{|v_i| < 1} \ (1 - v_i^2) (1 - 5v_i^2))}
where :math:`M_x` and :math:`M_y` are the medians (or the input
locations) of the two variables and :math:`u_i` and :math:`v_i` are
given by:
.. math::
u_{i} = \frac{(x_i - M_x)}{c * MAD_x}
v_{i} = \frac{(y_i - M_y)}{c * MAD_y}
where :math:`c` is the biweight tuning constant and :math:`MAD_x`
and :math:`MAD_y` are the `median absolute deviation
<https://en.wikipedia.org/wiki/Median_absolute_deviation>`_ of the
:math:`x` and :math:`y` variables. The biweight midvariance tuning
constant ``c`` is typically 9.0 (the default).
If :math:`MAD_x` or :math:`MAD_y` are zero, then zero will be
returned for that element.
For the standard definition of biweight midcovariance,
:math:`n_{xy}` is the total number of observations of each variable.
That definition is used if ``modify_sample_size`` is `False`, which
is the default.
However, if ``modify_sample_size = True``, then :math:`n_{xy}` is the
number of observations for which :math:`|u_i| < 1` and/or :math:`|v_i|
< 1`, i.e.
.. math::
n_{xx} = \sum_{|u_i| < 1} \ 1
.. math::
n_{xy} = n_{yx} = \sum_{|u_i| < 1, \ |v_i| < 1} \ 1
.. math::
n_{yy} = \sum_{|v_i| < 1} \ 1
which results in a value closer to the true variance for small
sample sizes or for a large number of rejected values.
Parameters
----------
data : 2D or 1D array-like
Input data either as a 2D or 1D array. For a 2D array, it
should have a shape (N_variables, N_observations). A 1D array
may be input for observations of a single variable, in which
case the biweight midvariance will be calculated (no
covariance). Each row of ``data`` represents a variable, and
each column a single observation of all those variables (same as
the `numpy.cov` convention).
c : float, optional
Tuning constant for the biweight estimator (default = 9.0).
M : float or 1D array-like, optional
The location estimate of each variable, either as a scalar or
array. If ``M`` is an array, then its must be a 1D array
containing the location estimate of each row (i.e. ``a.ndim``
elements). If ``M`` is a scalar value, then its value will be
used for each variable (row). If `None` (default), then the
median of each variable (row) will be used.
modify_sample_size : bool, optional
If `False` (default), then the sample size used is the total
number of observations of each variable, which follows the
standard definition of biweight midcovariance. If `True`, then
the sample size is reduced to correct for any rejected values
(see formula above), which results in a value closer to the true
covariance for small sample sizes or for a large number of
rejected values.
Returns
-------
biweight_midcovariance : ndarray
A 2D array representing the biweight midcovariances between each
pair of the variables (rows) in the input array. The output
array will have a shape of (N_variables, N_variables). The
diagonal elements will be the biweight midvariances of each
input variable. The off-diagonal elements will be the biweight
midcovariances between each pair of input variables.
See Also
--------
biweight_midvariance, biweight_midcorrelation, biweight_scale, biweight_location
References
----------
.. [1] https://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/biwmidc.htm
Examples
--------
Compute the biweight midcovariance between two random variables:
>>> import numpy as np
>>> from astropy.stats import biweight_midcovariance
>>> # Generate two random variables x and y
>>> rng = np.random.default_rng(1)
>>> x = rng.normal(0, 1, 200)
>>> y = rng.normal(0, 3, 200)
>>> # Introduce an obvious outlier
>>> x[0] = 30.0
>>> # Calculate the biweight midcovariances between x and y
>>> bicov = biweight_midcovariance([x, y])
>>> print(bicov) # doctest: +FLOAT_CMP
[[0.83435568 0.02379316]
[0.02379316 7.15665769]]
>>> # Print standard deviation estimates
>>> print(np.sqrt(bicov.diagonal())) # doctest: +FLOAT_CMP
[0.91343072 2.67519302]
"""
data = np.asanyarray(data).astype(np.float64)
# ensure data is 2D
if data.ndim == 1:
data = data[np.newaxis, :]
if data.ndim != 2:
raise ValueError("The input array must be 2D or 1D.")
# estimate location if not given
if M is None:
M = np.median(data, axis=1)
M = np.asanyarray(M)
if M.ndim > 1:
raise ValueError("M must be a scalar or 1D array.")
# set up the differences
d = (data.T - M).T
# set up the weighting
mad = median_absolute_deviation(data, axis=1)
with np.errstate(divide="ignore", invalid="ignore"):
u = (d.T / (c * mad)).T
# now remove the outlier points
# ignore RuntimeWarnings for comparisons with NaN data values
with np.errstate(invalid="ignore"):
mask = np.abs(u) < 1
u = u**2
if modify_sample_size:
maskf = mask.astype(float)
n = np.inner(maskf, maskf)
else:
n = data[0].size
usub1 = 1.0 - u
usub5 = 1.0 - 5.0 * u
usub1[~mask] = 0.0
with np.errstate(divide="ignore", invalid="ignore"):
numerator = d * usub1**2
denominator = (usub1 * usub5).sum(axis=1)[:, np.newaxis]
numerator_matrix = np.dot(numerator, numerator.T)
denominator_matrix = np.dot(denominator, denominator.T)
value = n * (numerator_matrix / denominator_matrix)
idx = np.where(mad == 0)[0]
value[idx, :] = 0
value[:, idx] = 0
return value |
Compute the biweight midcorrelation between two variables.
The `biweight midcorrelation
<https://en.wikipedia.org/wiki/Biweight_midcorrelation>`_ is a
measure of similarity between samples. It is given by:
.. math::
r_{bicorr} = \frac{\zeta_{xy}}{\sqrt{\zeta_{xx} \ \zeta_{yy}}}
where :math:`\zeta_{xx}` is the biweight midvariance of :math:`x`,
:math:`\zeta_{yy}` is the biweight midvariance of :math:`y`, and
:math:`\zeta_{xy}` is the biweight midcovariance of :math:`x` and
:math:`y`.
Parameters
----------
x, y : 1D array-like
Input arrays for the two variables. ``x`` and ``y`` must be 1D
arrays and have the same number of elements.
c : float, optional
Tuning constant for the biweight estimator (default = 9.0). See
`biweight_midcovariance` for more details.
M : float or array-like, optional
The location estimate. If ``M`` is a scalar value, then its
value will be used for the entire array (or along each ``axis``,
if specified). If ``M`` is an array, then its must be an array
containing the location estimate along each ``axis`` of the
input array. If `None` (default), then the median of the input
array will be used (or along each ``axis``, if specified). See
`biweight_midcovariance` for more details.
modify_sample_size : bool, optional
If `False` (default), then the sample size used is the total
number of elements in the array (or along the input ``axis``, if
specified), which follows the standard definition of biweight
midcovariance. If `True`, then the sample size is reduced to
correct for any rejected values (i.e. the sample size used
includes only the non-rejected values), which results in a value
closer to the true midcovariance for small sample sizes or for a
large number of rejected values. See `biweight_midcovariance`
for more details.
Returns
-------
biweight_midcorrelation : float
The biweight midcorrelation between ``x`` and ``y``.
See Also
--------
biweight_scale, biweight_midvariance, biweight_midcovariance, biweight_location
References
----------
.. [1] https://en.wikipedia.org/wiki/Biweight_midcorrelation
Examples
--------
Calculate the biweight midcorrelation between two variables:
>>> import numpy as np
>>> from astropy.stats import biweight_midcorrelation
>>> rng = np.random.default_rng(12345)
>>> x = rng.normal(0, 1, 200)
>>> y = rng.normal(0, 3, 200)
>>> # Introduce an obvious outlier
>>> x[0] = 30.0
>>> bicorr = biweight_midcorrelation(x, y)
>>> print(bicorr) # doctest: +FLOAT_CMP
-0.09203238319481295 | def biweight_midcorrelation(x, y, c=9.0, M=None, modify_sample_size=False):
r"""
Compute the biweight midcorrelation between two variables.
The `biweight midcorrelation
<https://en.wikipedia.org/wiki/Biweight_midcorrelation>`_ is a
measure of similarity between samples. It is given by:
.. math::
r_{bicorr} = \frac{\zeta_{xy}}{\sqrt{\zeta_{xx} \ \zeta_{yy}}}
where :math:`\zeta_{xx}` is the biweight midvariance of :math:`x`,
:math:`\zeta_{yy}` is the biweight midvariance of :math:`y`, and
:math:`\zeta_{xy}` is the biweight midcovariance of :math:`x` and
:math:`y`.
Parameters
----------
x, y : 1D array-like
Input arrays for the two variables. ``x`` and ``y`` must be 1D
arrays and have the same number of elements.
c : float, optional
Tuning constant for the biweight estimator (default = 9.0). See
`biweight_midcovariance` for more details.
M : float or array-like, optional
The location estimate. If ``M`` is a scalar value, then its
value will be used for the entire array (or along each ``axis``,
if specified). If ``M`` is an array, then its must be an array
containing the location estimate along each ``axis`` of the
input array. If `None` (default), then the median of the input
array will be used (or along each ``axis``, if specified). See
`biweight_midcovariance` for more details.
modify_sample_size : bool, optional
If `False` (default), then the sample size used is the total
number of elements in the array (or along the input ``axis``, if
specified), which follows the standard definition of biweight
midcovariance. If `True`, then the sample size is reduced to
correct for any rejected values (i.e. the sample size used
includes only the non-rejected values), which results in a value
closer to the true midcovariance for small sample sizes or for a
large number of rejected values. See `biweight_midcovariance`
for more details.
Returns
-------
biweight_midcorrelation : float
The biweight midcorrelation between ``x`` and ``y``.
See Also
--------
biweight_scale, biweight_midvariance, biweight_midcovariance, biweight_location
References
----------
.. [1] https://en.wikipedia.org/wiki/Biweight_midcorrelation
Examples
--------
Calculate the biweight midcorrelation between two variables:
>>> import numpy as np
>>> from astropy.stats import biweight_midcorrelation
>>> rng = np.random.default_rng(12345)
>>> x = rng.normal(0, 1, 200)
>>> y = rng.normal(0, 3, 200)
>>> # Introduce an obvious outlier
>>> x[0] = 30.0
>>> bicorr = biweight_midcorrelation(x, y)
>>> print(bicorr) # doctest: +FLOAT_CMP
-0.09203238319481295
"""
x = np.asanyarray(x)
y = np.asanyarray(y)
if x.ndim != 1:
raise ValueError("x must be a 1D array.")
if y.ndim != 1:
raise ValueError("y must be a 1D array.")
if x.shape != y.shape:
raise ValueError("x and y must have the same shape.")
bicorr = biweight_midcovariance(
[x, y], c=c, M=M, modify_sample_size=modify_sample_size
)
return bicorr[0, 1] / (np.sqrt(bicorr[0, 0] * bicorr[1, 1])) |
Computes the circular mean angle of an array of circular data.
Parameters
----------
data : ndarray or `~astropy.units.Quantity`
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
axis : int, optional
Axis along which circular means are computed. The default is to compute
the mean of the flattened array.
weights : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights`` represents a
weighting factor for each group such that ``sum(weights, axis)``
equals the number of observations. See [1]_, remark 1.4, page 22, for
detailed explanation.
Returns
-------
circmean : ndarray or `~astropy.units.Quantity`
Circular mean.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import circmean
>>> from astropy import units as u
>>> data = np.array([51, 67, 40, 109, 31, 358])*u.deg
>>> circmean(data) # doctest: +FLOAT_CMP
<Quantity 48.62718088722989 deg>
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf> | def circmean(data, axis=None, weights=None):
"""Computes the circular mean angle of an array of circular data.
Parameters
----------
data : ndarray or `~astropy.units.Quantity`
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
axis : int, optional
Axis along which circular means are computed. The default is to compute
the mean of the flattened array.
weights : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights`` represents a
weighting factor for each group such that ``sum(weights, axis)``
equals the number of observations. See [1]_, remark 1.4, page 22, for
detailed explanation.
Returns
-------
circmean : ndarray or `~astropy.units.Quantity`
Circular mean.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import circmean
>>> from astropy import units as u
>>> data = np.array([51, 67, 40, 109, 31, 358])*u.deg
>>> circmean(data) # doctest: +FLOAT_CMP
<Quantity 48.62718088722989 deg>
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
"""
return _angle(data, 1, 0.0, axis, weights) |
Computes the circular variance of an array of circular data.
There are some concepts for defining measures of dispersion for circular
data. The variance implemented here is based on the definition given by
[1]_, which is also the same used by the R package 'CircStats' [2]_.
Parameters
----------
data : ndarray or `~astropy.units.Quantity`
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
Dimensionless, if Quantity.
axis : int, optional
Axis along which circular variances are computed. The default is to
compute the variance of the flattened array.
weights : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights`` represents a
weighting factor for each group such that ``sum(weights, axis)``
equals the number of observations. See [1]_, remark 1.4, page 22,
for detailed explanation.
Returns
-------
circvar : ndarray or `~astropy.units.Quantity` ['dimensionless']
Circular variance.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import circvar
>>> from astropy import units as u
>>> data = np.array([51, 67, 40, 109, 31, 358])*u.deg
>>> circvar(data) # doctest: +FLOAT_CMP
<Quantity 0.16356352748437508>
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
Notes
-----
For Scipy < 1.9.0, ``scipy.stats.circvar`` uses a different
definition based on an approximation using the limit of small
angles that approaches the linear variance. For Scipy >= 1.9.0,
``scipy.stats.cirvar`` uses a definition consistent with this
implementation. | def circvar(data, axis=None, weights=None):
"""Computes the circular variance of an array of circular data.
There are some concepts for defining measures of dispersion for circular
data. The variance implemented here is based on the definition given by
[1]_, which is also the same used by the R package 'CircStats' [2]_.
Parameters
----------
data : ndarray or `~astropy.units.Quantity`
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
Dimensionless, if Quantity.
axis : int, optional
Axis along which circular variances are computed. The default is to
compute the variance of the flattened array.
weights : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights`` represents a
weighting factor for each group such that ``sum(weights, axis)``
equals the number of observations. See [1]_, remark 1.4, page 22,
for detailed explanation.
Returns
-------
circvar : ndarray or `~astropy.units.Quantity` ['dimensionless']
Circular variance.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import circvar
>>> from astropy import units as u
>>> data = np.array([51, 67, 40, 109, 31, 358])*u.deg
>>> circvar(data) # doctest: +FLOAT_CMP
<Quantity 0.16356352748437508>
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
Notes
-----
For Scipy < 1.9.0, ``scipy.stats.circvar`` uses a different
definition based on an approximation using the limit of small
angles that approaches the linear variance. For Scipy >= 1.9.0,
``scipy.stats.cirvar`` uses a definition consistent with this
implementation.
"""
return 1.0 - _length(data, 1, 0.0, axis, weights) |
Computes the circular standard deviation of an array of circular data.
The standard deviation implemented here is based on the definitions given
by [1]_, which is also the same used by the R package 'CirStat' [2]_.
Two methods are implemented: 'angular' and 'circular'. The former is
defined as sqrt(2 * (1 - R)) and it is bounded in [0, 2*Pi]. The
latter is defined as sqrt(-2 * ln(R)) and it is bounded in [0, inf].
Following 'CircStat' the default method used to obtain the standard
deviation is 'angular'.
Parameters
----------
data : ndarray or `~astropy.units.Quantity`
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
If quantity, must be dimensionless.
axis : int, optional
Axis along which circular variances are computed. The default is to
compute the variance of the flattened array.
weights : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights`` represents a
weighting factor for each group such that ``sum(weights, axis)``
equals the number of observations. See [3]_, remark 1.4, page 22,
for detailed explanation.
method : str, optional
The method used to estimate the standard deviation:
- 'angular' : obtains the angular deviation
- 'circular' : obtains the circular deviation
Returns
-------
circstd : ndarray or `~astropy.units.Quantity` ['dimensionless']
Angular or circular standard deviation.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import circstd
>>> from astropy import units as u
>>> data = np.array([51, 67, 40, 109, 31, 358])*u.deg
>>> circstd(data) # doctest: +FLOAT_CMP
<Quantity 0.57195022>
Alternatively, using the 'circular' method:
>>> import numpy as np
>>> from astropy.stats import circstd
>>> from astropy import units as u
>>> data = np.array([51, 67, 40, 109, 31, 358])*u.deg
>>> circstd(data, method='circular') # doctest: +FLOAT_CMP
<Quantity 0.59766999>
References
----------
.. [1] P. Berens. "CircStat: A MATLAB Toolbox for Circular Statistics".
Journal of Statistical Software, vol 31, issue 10, 2009.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
.. [3] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001. | def circstd(data, axis=None, weights=None, method="angular"):
"""Computes the circular standard deviation of an array of circular data.
The standard deviation implemented here is based on the definitions given
by [1]_, which is also the same used by the R package 'CirStat' [2]_.
Two methods are implemented: 'angular' and 'circular'. The former is
defined as sqrt(2 * (1 - R)) and it is bounded in [0, 2*Pi]. The
latter is defined as sqrt(-2 * ln(R)) and it is bounded in [0, inf].
Following 'CircStat' the default method used to obtain the standard
deviation is 'angular'.
Parameters
----------
data : ndarray or `~astropy.units.Quantity`
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
If quantity, must be dimensionless.
axis : int, optional
Axis along which circular variances are computed. The default is to
compute the variance of the flattened array.
weights : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights`` represents a
weighting factor for each group such that ``sum(weights, axis)``
equals the number of observations. See [3]_, remark 1.4, page 22,
for detailed explanation.
method : str, optional
The method used to estimate the standard deviation:
- 'angular' : obtains the angular deviation
- 'circular' : obtains the circular deviation
Returns
-------
circstd : ndarray or `~astropy.units.Quantity` ['dimensionless']
Angular or circular standard deviation.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import circstd
>>> from astropy import units as u
>>> data = np.array([51, 67, 40, 109, 31, 358])*u.deg
>>> circstd(data) # doctest: +FLOAT_CMP
<Quantity 0.57195022>
Alternatively, using the 'circular' method:
>>> import numpy as np
>>> from astropy.stats import circstd
>>> from astropy import units as u
>>> data = np.array([51, 67, 40, 109, 31, 358])*u.deg
>>> circstd(data, method='circular') # doctest: +FLOAT_CMP
<Quantity 0.59766999>
References
----------
.. [1] P. Berens. "CircStat: A MATLAB Toolbox for Circular Statistics".
Journal of Statistical Software, vol 31, issue 10, 2009.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
.. [3] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
"""
if method not in ("angular", "circular"):
raise ValueError("method should be either 'angular' or 'circular'")
if method == "angular":
return np.sqrt(2.0 * (1.0 - _length(data, 1, 0.0, axis, weights)))
else:
return np.sqrt(-2.0 * np.log(_length(data, 1, 0.0, axis, weights))) |
Computes the ``p``-th trigonometric circular moment for an array
of circular data.
Parameters
----------
data : ndarray or `~astropy.units.Quantity`
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
p : float, optional
Order of the circular moment.
centered : bool, optional
If ``True``, central circular moments are computed. Default value is
``False``.
axis : int, optional
Axis along which circular moments are computed. The default is to
compute the circular moment of the flattened array.
weights : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights`` represents a
weighting factor for each group such that ``sum(weights, axis)``
equals the number of observations. See [1]_, remark 1.4, page 22,
for detailed explanation.
Returns
-------
circmoment : ndarray or `~astropy.units.Quantity`
The first and second elements correspond to the direction and length of
the ``p``-th circular moment, respectively.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import circmoment
>>> from astropy import units as u
>>> data = np.array([51, 67, 40, 109, 31, 358])*u.deg
>>> circmoment(data, p=2) # doctest: +FLOAT_CMP
(<Quantity 90.99263082432564 deg>, <Quantity 0.48004283892950717>)
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf> | def circmoment(data, p=1.0, centered=False, axis=None, weights=None):
"""Computes the ``p``-th trigonometric circular moment for an array
of circular data.
Parameters
----------
data : ndarray or `~astropy.units.Quantity`
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
p : float, optional
Order of the circular moment.
centered : bool, optional
If ``True``, central circular moments are computed. Default value is
``False``.
axis : int, optional
Axis along which circular moments are computed. The default is to
compute the circular moment of the flattened array.
weights : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights`` represents a
weighting factor for each group such that ``sum(weights, axis)``
equals the number of observations. See [1]_, remark 1.4, page 22,
for detailed explanation.
Returns
-------
circmoment : ndarray or `~astropy.units.Quantity`
The first and second elements correspond to the direction and length of
the ``p``-th circular moment, respectively.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import circmoment
>>> from astropy import units as u
>>> data = np.array([51, 67, 40, 109, 31, 358])*u.deg
>>> circmoment(data, p=2) # doctest: +FLOAT_CMP
(<Quantity 90.99263082432564 deg>, <Quantity 0.48004283892950717>)
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
"""
if centered:
phi = circmean(data, axis, weights)
else:
phi = 0.0
return _angle(data, p, phi, axis, weights), _length(data, p, phi, axis, weights) |
Computes the circular correlation coefficient between two array of
circular data.
Parameters
----------
alpha : ndarray or `~astropy.units.Quantity`
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
beta : ndarray or `~astropy.units.Quantity`
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
axis : int, optional
Axis along which circular correlation coefficients are computed.
The default is the compute the circular correlation coefficient of the
flattened array.
weights_alpha : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights_alpha``
represents a weighting factor for each group such that
``sum(weights_alpha, axis)`` equals the number of observations.
See [1]_, remark 1.4, page 22, for detailed explanation.
weights_beta : numpy.ndarray, optional
See description of ``weights_alpha``.
Returns
-------
rho : ndarray or `~astropy.units.Quantity` ['dimensionless']
Circular correlation coefficient.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import circcorrcoef
>>> from astropy import units as u
>>> alpha = np.array([356, 97, 211, 232, 343, 292, 157, 302, 335, 302,
... 324, 85, 324, 340, 157, 238, 254, 146, 232, 122,
... 329])*u.deg
>>> beta = np.array([119, 162, 221, 259, 270, 29, 97, 292, 40, 313, 94,
... 45, 47, 108, 221, 270, 119, 248, 270, 45, 23])*u.deg
>>> circcorrcoef(alpha, beta) # doctest: +FLOAT_CMP
<Quantity 0.2704648826748831>
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf> | def circcorrcoef(alpha, beta, axis=None, weights_alpha=None, weights_beta=None):
"""Computes the circular correlation coefficient between two array of
circular data.
Parameters
----------
alpha : ndarray or `~astropy.units.Quantity`
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
beta : ndarray or `~astropy.units.Quantity`
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
axis : int, optional
Axis along which circular correlation coefficients are computed.
The default is the compute the circular correlation coefficient of the
flattened array.
weights_alpha : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights_alpha``
represents a weighting factor for each group such that
``sum(weights_alpha, axis)`` equals the number of observations.
See [1]_, remark 1.4, page 22, for detailed explanation.
weights_beta : numpy.ndarray, optional
See description of ``weights_alpha``.
Returns
-------
rho : ndarray or `~astropy.units.Quantity` ['dimensionless']
Circular correlation coefficient.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import circcorrcoef
>>> from astropy import units as u
>>> alpha = np.array([356, 97, 211, 232, 343, 292, 157, 302, 335, 302,
... 324, 85, 324, 340, 157, 238, 254, 146, 232, 122,
... 329])*u.deg
>>> beta = np.array([119, 162, 221, 259, 270, 29, 97, 292, 40, 313, 94,
... 45, 47, 108, 221, 270, 119, 248, 270, 45, 23])*u.deg
>>> circcorrcoef(alpha, beta) # doctest: +FLOAT_CMP
<Quantity 0.2704648826748831>
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
"""
if np.size(alpha, axis) != np.size(beta, axis):
raise ValueError("alpha and beta must be arrays of the same size")
mu_a = circmean(alpha, axis, weights_alpha)
mu_b = circmean(beta, axis, weights_beta)
sin_a = np.sin(alpha - mu_a)
sin_b = np.sin(beta - mu_b)
rho = np.sum(sin_a * sin_b) / np.sqrt(np.sum(sin_a * sin_a) * np.sum(sin_b * sin_b))
return rho |
Performs the Rayleigh test of uniformity.
This test is used to identify a non-uniform distribution, i.e. it is
designed for detecting an unimodal deviation from uniformity. More
precisely, it assumes the following hypotheses:
- H0 (null hypothesis): The population is distributed uniformly around the
circle.
- H1 (alternative hypothesis): The population is not distributed uniformly
around the circle.
Small p-values suggest to reject the null hypothesis.
Parameters
----------
data : ndarray or `~astropy.units.Quantity`
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
axis : int, optional
Axis along which the Rayleigh test will be performed.
weights : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights`` represents a
weighting factor for each group such that ``np.sum(weights, axis)``
equals the number of observations.
See [1]_, remark 1.4, page 22, for detailed explanation.
Returns
-------
p-value : float or `~astropy.units.Quantity` ['dimensionless']
Examples
--------
>>> import numpy as np
>>> from astropy.stats import rayleightest
>>> from astropy import units as u
>>> data = np.array([130, 90, 0, 145])*u.deg
>>> rayleightest(data) # doctest: +FLOAT_CMP
<Quantity 0.2563487733797317>
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
.. [3] M. Chirstman., C. Miller. "Testing a Sample of Directions for
Uniformity." Lecture Notes, STA 6934/5805. University of Florida, 2007.
.. [4] D. Wilkie. "Rayleigh Test for Randomness of Circular Data". Applied
Statistics. 1983.
<http://wexler.free.fr/library/files/wilkie%20(1983)%20rayleigh%20test%20for%20randomness%20of%20circular%20data.pdf> | def rayleightest(data, axis=None, weights=None):
"""Performs the Rayleigh test of uniformity.
This test is used to identify a non-uniform distribution, i.e. it is
designed for detecting an unimodal deviation from uniformity. More
precisely, it assumes the following hypotheses:
- H0 (null hypothesis): The population is distributed uniformly around the
circle.
- H1 (alternative hypothesis): The population is not distributed uniformly
around the circle.
Small p-values suggest to reject the null hypothesis.
Parameters
----------
data : ndarray or `~astropy.units.Quantity`
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
axis : int, optional
Axis along which the Rayleigh test will be performed.
weights : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights`` represents a
weighting factor for each group such that ``np.sum(weights, axis)``
equals the number of observations.
See [1]_, remark 1.4, page 22, for detailed explanation.
Returns
-------
p-value : float or `~astropy.units.Quantity` ['dimensionless']
Examples
--------
>>> import numpy as np
>>> from astropy.stats import rayleightest
>>> from astropy import units as u
>>> data = np.array([130, 90, 0, 145])*u.deg
>>> rayleightest(data) # doctest: +FLOAT_CMP
<Quantity 0.2563487733797317>
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
.. [3] M. Chirstman., C. Miller. "Testing a Sample of Directions for
Uniformity." Lecture Notes, STA 6934/5805. University of Florida, 2007.
.. [4] D. Wilkie. "Rayleigh Test for Randomness of Circular Data". Applied
Statistics. 1983.
<http://wexler.free.fr/library/files/wilkie%20(1983)%20rayleigh%20test%20for%20randomness%20of%20circular%20data.pdf>
"""
n = np.size(data, axis=axis)
Rbar = _length(data, 1, 0.0, axis, weights)
z = n * Rbar * Rbar
# see [3] and [4] for the formulae below
tmp = 1.0
if n < 50:
tmp = (
1.0
+ (2.0 * z - z * z) / (4.0 * n)
- (24.0 * z - 132.0 * z**2.0 + 76.0 * z**3.0 - 9.0 * z**4.0)
/ (288.0 * n * n)
)
p_value = np.exp(-z) * tmp
return p_value |
Performs the Rayleigh test of uniformity where the alternative
hypothesis H1 is assumed to have a known mean angle ``mu``.
Parameters
----------
data : ndarray or `~astropy.units.Quantity`
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
mu : float or `~astropy.units.Quantity` ['angle'], optional
Mean angle. Assumed to be known.
axis : int, optional
Axis along which the V test will be performed.
weights : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights`` represents a
weighting factor for each group such that ``sum(weights, axis)``
equals the number of observations. See [1]_, remark 1.4, page 22,
for detailed explanation.
Returns
-------
p-value : float or `~astropy.units.Quantity` ['dimensionless']
Examples
--------
>>> import numpy as np
>>> from astropy.stats import vtest
>>> from astropy import units as u
>>> data = np.array([130, 90, 0, 145])*u.deg
>>> vtest(data) # doctest: +FLOAT_CMP
<Quantity 0.6223678199713766>
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
.. [3] M. Chirstman., C. Miller. "Testing a Sample of Directions for
Uniformity." Lecture Notes, STA 6934/5805. University of Florida, 2007. | def vtest(data, mu=0.0, axis=None, weights=None):
"""Performs the Rayleigh test of uniformity where the alternative
hypothesis H1 is assumed to have a known mean angle ``mu``.
Parameters
----------
data : ndarray or `~astropy.units.Quantity`
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
mu : float or `~astropy.units.Quantity` ['angle'], optional
Mean angle. Assumed to be known.
axis : int, optional
Axis along which the V test will be performed.
weights : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights`` represents a
weighting factor for each group such that ``sum(weights, axis)``
equals the number of observations. See [1]_, remark 1.4, page 22,
for detailed explanation.
Returns
-------
p-value : float or `~astropy.units.Quantity` ['dimensionless']
Examples
--------
>>> import numpy as np
>>> from astropy.stats import vtest
>>> from astropy import units as u
>>> data = np.array([130, 90, 0, 145])*u.deg
>>> vtest(data) # doctest: +FLOAT_CMP
<Quantity 0.6223678199713766>
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
.. [3] M. Chirstman., C. Miller. "Testing a Sample of Directions for
Uniformity." Lecture Notes, STA 6934/5805. University of Florida, 2007.
"""
from scipy.stats import norm
if weights is None:
weights = np.ones((1,))
try:
weights = np.broadcast_to(weights, data.shape)
except ValueError:
raise ValueError("Weights and data have inconsistent shape.")
n = np.size(data, axis=axis)
R0bar = np.sum(weights * np.cos(data - mu), axis) / np.sum(weights, axis)
z = np.sqrt(2.0 * n) * R0bar
pz = norm.cdf(z)
fz = norm.pdf(z)
# see reference [3]
p_value = (
1
- pz
+ fz
* (
(3 * z - z**3) / (16.0 * n)
+ (15 * z + 305 * z**3 - 125 * z**5 + 9 * z**7) / (4608.0 * n * n)
)
)
return p_value |
Computes the Maximum Likelihood Estimator (MLE) for the parameters of
the von Mises distribution.
Parameters
----------
data : ndarray or `~astropy.units.Quantity`
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
axis : int, optional
Axis along which the mle will be computed.
weights : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights`` represents a
weighting factor for each group such that ``sum(weights, axis)``
equals the number of observations. See [1]_, remark 1.4, page 22,
for detailed explanation.
Returns
-------
mu : float or `~astropy.units.Quantity`
The mean (aka location parameter).
kappa : float or `~astropy.units.Quantity` ['dimensionless']
The concentration parameter.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import vonmisesmle
>>> from astropy import units as u
>>> data = np.array([130, 90, 0, 145])*u.deg
>>> vonmisesmle(data) # doctest: +FLOAT_CMP
(<Quantity 101.16894320013179 deg>, <Quantity 1.49358958737054>)
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf> | def vonmisesmle(data, axis=None, weights=None):
"""Computes the Maximum Likelihood Estimator (MLE) for the parameters of
the von Mises distribution.
Parameters
----------
data : ndarray or `~astropy.units.Quantity`
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
axis : int, optional
Axis along which the mle will be computed.
weights : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights`` represents a
weighting factor for each group such that ``sum(weights, axis)``
equals the number of observations. See [1]_, remark 1.4, page 22,
for detailed explanation.
Returns
-------
mu : float or `~astropy.units.Quantity`
The mean (aka location parameter).
kappa : float or `~astropy.units.Quantity` ['dimensionless']
The concentration parameter.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import vonmisesmle
>>> from astropy import units as u
>>> data = np.array([130, 90, 0, 145])*u.deg
>>> vonmisesmle(data) # doctest: +FLOAT_CMP
(<Quantity 101.16894320013179 deg>, <Quantity 1.49358958737054>)
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
"""
mu = circmean(data, axis=axis, weights=weights)
kappa = _A1inv(_length(data, p=1, phi=0.0, axis=axis, weights=weights))
return mu, kappa |
Binomial proportion confidence interval given k successes,
n trials.
Parameters
----------
k : int or numpy.ndarray
Number of successes (0 <= ``k`` <= ``n``).
n : int or numpy.ndarray
Number of trials (``n`` > 0). If both ``k`` and ``n`` are arrays,
they must have the same shape.
confidence_level : float, optional
Desired probability content of interval. Default is 0.68269,
corresponding to 1 sigma in a 1-dimensional Gaussian distribution.
Confidence level must be in range [0, 1].
interval : {'wilson', 'jeffreys', 'flat', 'wald'}, optional
Formula used for confidence interval. See notes for details. The
``'wilson'`` and ``'jeffreys'`` intervals generally give similar
results, while 'flat' is somewhat different, especially for small
values of ``n``. ``'wilson'`` should be somewhat faster than
``'flat'`` or ``'jeffreys'``. The 'wald' interval is generally not
recommended. It is provided for comparison purposes. Default is
``'wilson'``.
Returns
-------
conf_interval : ndarray
``conf_interval[0]`` and ``conf_interval[1]`` correspond to the lower
and upper limits, respectively, for each element in ``k``, ``n``.
Notes
-----
In situations where a probability of success is not known, it can
be estimated from a number of trials (n) and number of
observed successes (k). For example, this is done in Monte
Carlo experiments designed to estimate a detection efficiency. It
is simple to take the sample proportion of successes (k/n)
as a reasonable best estimate of the true probability
:math:`\epsilon`. However, deriving an accurate confidence
interval on :math:`\epsilon` is non-trivial. There are several
formulas for this interval (see [1]_). Four intervals are implemented
here:
**1. The Wilson Interval.** This interval, attributed to Wilson [2]_,
is given by
.. math::
CI_{\rm Wilson} = \frac{k + \kappa^2/2}{n + \kappa^2}
\pm \frac{\kappa n^{1/2}}{n + \kappa^2}
((\hat{\epsilon}(1 - \hat{\epsilon}) + \kappa^2/(4n))^{1/2}
where :math:`\hat{\epsilon} = k / n` and :math:`\kappa` is the
number of standard deviations corresponding to the desired
confidence interval for a *normal* distribution (for example,
1.0 for a confidence interval of 68.269%). For a
confidence interval of 100(1 - :math:`\alpha`)%,
.. math::
\kappa = \Phi^{-1}(1-\alpha/2) = \sqrt{2}{\rm erf}^{-1}(1-\alpha).
**2. The Jeffreys Interval.** This interval is derived by applying
Bayes' theorem to the binomial distribution with the
noninformative Jeffreys prior [3]_, [4]_. The noninformative Jeffreys
prior is the Beta distribution, Beta(1/2, 1/2), which has the density
function
.. math::
f(\epsilon) = \pi^{-1} \epsilon^{-1/2}(1-\epsilon)^{-1/2}.
The justification for this prior is that it is invariant under
reparameterizations of the binomial proportion.
The posterior density function is also a Beta distribution: Beta(k
+ 1/2, n - k + 1/2). The interval is then chosen so that it is
*equal-tailed*: Each tail (outside the interval) contains
:math:`\alpha`/2 of the posterior probability, and the interval
itself contains 1 - :math:`\alpha`. This interval must be
calculated numerically. Additionally, when k = 0 the lower limit
is set to 0 and when k = n the upper limit is set to 1, so that in
these cases, there is only one tail containing :math:`\alpha`/2
and the interval itself contains 1 - :math:`\alpha`/2 rather than
the nominal 1 - :math:`\alpha`.
**3. A Flat prior.** This is similar to the Jeffreys interval,
but uses a flat (uniform) prior on the binomial proportion
over the range 0 to 1 rather than the reparametrization-invariant
Jeffreys prior. The posterior density function is a Beta distribution:
Beta(k + 1, n - k + 1). The same comments about the nature of the
interval (equal-tailed, etc.) also apply to this option.
**4. The Wald Interval.** This interval is given by
.. math::
CI_{\rm Wald} = \hat{\epsilon} \pm
\kappa \sqrt{\frac{\hat{\epsilon}(1-\hat{\epsilon})}{n}}
The Wald interval gives acceptable results in some limiting
cases. Particularly, when n is very large, and the true proportion
:math:`\epsilon` is not "too close" to 0 or 1. However, as the
later is not verifiable when trying to estimate :math:`\epsilon`,
this is not very helpful. Its use is not recommended, but it is
provided here for comparison purposes due to its prevalence in
everyday practical statistics.
This function requires ``scipy`` for all interval types.
References
----------
.. [1] Brown, Lawrence D.; Cai, T. Tony; DasGupta, Anirban (2001).
"Interval Estimation for a Binomial Proportion". Statistical
Science 16 (2): 101-133. doi:10.1214/ss/1009213286
.. [2] Wilson, E. B. (1927). "Probable inference, the law of
succession, and statistical inference". Journal of the American
Statistical Association 22: 209-212.
.. [3] Jeffreys, Harold (1946). "An Invariant Form for the Prior
Probability in Estimation Problems". Proc. R. Soc. Lond.. A 24 186
(1007): 453-461. doi:10.1098/rspa.1946.0056
.. [4] Jeffreys, Harold (1998). Theory of Probability. Oxford
University Press, 3rd edition. ISBN 978-0198503682
Examples
--------
Integer inputs return an array with shape (2,):
>>> binom_conf_interval(4, 5, interval='wilson') # doctest: +FLOAT_CMP
array([0.57921724, 0.92078259])
Arrays of arbitrary dimension are supported. The Wilson and Jeffreys
intervals give similar results, even for small k, n:
>>> binom_conf_interval([1, 2], 5, interval='wilson') # doctest: +FLOAT_CMP
array([[0.07921741, 0.21597328],
[0.42078276, 0.61736012]])
>>> binom_conf_interval([1, 2,], 5, interval='jeffreys') # doctest: +FLOAT_CMP
array([[0.0842525 , 0.21789949],
[0.42218001, 0.61753691]])
>>> binom_conf_interval([1, 2], 5, interval='flat') # doctest: +FLOAT_CMP
array([[0.12139799, 0.24309021],
[0.45401727, 0.61535699]])
In contrast, the Wald interval gives poor results for small k, n.
For k = 0 or k = n, the interval always has zero length.
>>> binom_conf_interval([1, 2], 5, interval='wald') # doctest: +FLOAT_CMP
array([[0.02111437, 0.18091075],
[0.37888563, 0.61908925]])
For confidence intervals approaching 1, the Wald interval for
0 < k < n can give intervals that extend outside [0, 1]:
>>> binom_conf_interval([1, 2], 5, interval='wald', confidence_level=0.99) # doctest: +FLOAT_CMP
array([[-0.26077835, -0.16433593],
[ 0.66077835, 0.96433593]]) | def binom_conf_interval(k, n, confidence_level=0.68269, interval="wilson"):
r"""Binomial proportion confidence interval given k successes,
n trials.
Parameters
----------
k : int or numpy.ndarray
Number of successes (0 <= ``k`` <= ``n``).
n : int or numpy.ndarray
Number of trials (``n`` > 0). If both ``k`` and ``n`` are arrays,
they must have the same shape.
confidence_level : float, optional
Desired probability content of interval. Default is 0.68269,
corresponding to 1 sigma in a 1-dimensional Gaussian distribution.
Confidence level must be in range [0, 1].
interval : {'wilson', 'jeffreys', 'flat', 'wald'}, optional
Formula used for confidence interval. See notes for details. The
``'wilson'`` and ``'jeffreys'`` intervals generally give similar
results, while 'flat' is somewhat different, especially for small
values of ``n``. ``'wilson'`` should be somewhat faster than
``'flat'`` or ``'jeffreys'``. The 'wald' interval is generally not
recommended. It is provided for comparison purposes. Default is
``'wilson'``.
Returns
-------
conf_interval : ndarray
``conf_interval[0]`` and ``conf_interval[1]`` correspond to the lower
and upper limits, respectively, for each element in ``k``, ``n``.
Notes
-----
In situations where a probability of success is not known, it can
be estimated from a number of trials (n) and number of
observed successes (k). For example, this is done in Monte
Carlo experiments designed to estimate a detection efficiency. It
is simple to take the sample proportion of successes (k/n)
as a reasonable best estimate of the true probability
:math:`\epsilon`. However, deriving an accurate confidence
interval on :math:`\epsilon` is non-trivial. There are several
formulas for this interval (see [1]_). Four intervals are implemented
here:
**1. The Wilson Interval.** This interval, attributed to Wilson [2]_,
is given by
.. math::
CI_{\rm Wilson} = \frac{k + \kappa^2/2}{n + \kappa^2}
\pm \frac{\kappa n^{1/2}}{n + \kappa^2}
((\hat{\epsilon}(1 - \hat{\epsilon}) + \kappa^2/(4n))^{1/2}
where :math:`\hat{\epsilon} = k / n` and :math:`\kappa` is the
number of standard deviations corresponding to the desired
confidence interval for a *normal* distribution (for example,
1.0 for a confidence interval of 68.269%). For a
confidence interval of 100(1 - :math:`\alpha`)%,
.. math::
\kappa = \Phi^{-1}(1-\alpha/2) = \sqrt{2}{\rm erf}^{-1}(1-\alpha).
**2. The Jeffreys Interval.** This interval is derived by applying
Bayes' theorem to the binomial distribution with the
noninformative Jeffreys prior [3]_, [4]_. The noninformative Jeffreys
prior is the Beta distribution, Beta(1/2, 1/2), which has the density
function
.. math::
f(\epsilon) = \pi^{-1} \epsilon^{-1/2}(1-\epsilon)^{-1/2}.
The justification for this prior is that it is invariant under
reparameterizations of the binomial proportion.
The posterior density function is also a Beta distribution: Beta(k
+ 1/2, n - k + 1/2). The interval is then chosen so that it is
*equal-tailed*: Each tail (outside the interval) contains
:math:`\alpha`/2 of the posterior probability, and the interval
itself contains 1 - :math:`\alpha`. This interval must be
calculated numerically. Additionally, when k = 0 the lower limit
is set to 0 and when k = n the upper limit is set to 1, so that in
these cases, there is only one tail containing :math:`\alpha`/2
and the interval itself contains 1 - :math:`\alpha`/2 rather than
the nominal 1 - :math:`\alpha`.
**3. A Flat prior.** This is similar to the Jeffreys interval,
but uses a flat (uniform) prior on the binomial proportion
over the range 0 to 1 rather than the reparametrization-invariant
Jeffreys prior. The posterior density function is a Beta distribution:
Beta(k + 1, n - k + 1). The same comments about the nature of the
interval (equal-tailed, etc.) also apply to this option.
**4. The Wald Interval.** This interval is given by
.. math::
CI_{\rm Wald} = \hat{\epsilon} \pm
\kappa \sqrt{\frac{\hat{\epsilon}(1-\hat{\epsilon})}{n}}
The Wald interval gives acceptable results in some limiting
cases. Particularly, when n is very large, and the true proportion
:math:`\epsilon` is not "too close" to 0 or 1. However, as the
later is not verifiable when trying to estimate :math:`\epsilon`,
this is not very helpful. Its use is not recommended, but it is
provided here for comparison purposes due to its prevalence in
everyday practical statistics.
This function requires ``scipy`` for all interval types.
References
----------
.. [1] Brown, Lawrence D.; Cai, T. Tony; DasGupta, Anirban (2001).
"Interval Estimation for a Binomial Proportion". Statistical
Science 16 (2): 101-133. doi:10.1214/ss/1009213286
.. [2] Wilson, E. B. (1927). "Probable inference, the law of
succession, and statistical inference". Journal of the American
Statistical Association 22: 209-212.
.. [3] Jeffreys, Harold (1946). "An Invariant Form for the Prior
Probability in Estimation Problems". Proc. R. Soc. Lond.. A 24 186
(1007): 453-461. doi:10.1098/rspa.1946.0056
.. [4] Jeffreys, Harold (1998). Theory of Probability. Oxford
University Press, 3rd edition. ISBN 978-0198503682
Examples
--------
Integer inputs return an array with shape (2,):
>>> binom_conf_interval(4, 5, interval='wilson') # doctest: +FLOAT_CMP
array([0.57921724, 0.92078259])
Arrays of arbitrary dimension are supported. The Wilson and Jeffreys
intervals give similar results, even for small k, n:
>>> binom_conf_interval([1, 2], 5, interval='wilson') # doctest: +FLOAT_CMP
array([[0.07921741, 0.21597328],
[0.42078276, 0.61736012]])
>>> binom_conf_interval([1, 2,], 5, interval='jeffreys') # doctest: +FLOAT_CMP
array([[0.0842525 , 0.21789949],
[0.42218001, 0.61753691]])
>>> binom_conf_interval([1, 2], 5, interval='flat') # doctest: +FLOAT_CMP
array([[0.12139799, 0.24309021],
[0.45401727, 0.61535699]])
In contrast, the Wald interval gives poor results for small k, n.
For k = 0 or k = n, the interval always has zero length.
>>> binom_conf_interval([1, 2], 5, interval='wald') # doctest: +FLOAT_CMP
array([[0.02111437, 0.18091075],
[0.37888563, 0.61908925]])
For confidence intervals approaching 1, the Wald interval for
0 < k < n can give intervals that extend outside [0, 1]:
>>> binom_conf_interval([1, 2], 5, interval='wald', confidence_level=0.99) # doctest: +FLOAT_CMP
array([[-0.26077835, -0.16433593],
[ 0.66077835, 0.96433593]])
"""
if confidence_level < 0.0 or confidence_level > 1.0:
raise ValueError("confidence_level must be between 0. and 1.")
alpha = 1.0 - confidence_level
k = np.asarray(k).astype(int)
n = np.asarray(n).astype(int)
if (n <= 0).any():
raise ValueError("n must be positive")
if (k < 0).any() or (k > n).any():
raise ValueError("k must be in {0, 1, .., n}")
if interval == "wilson" or interval == "wald":
from scipy.special import erfinv
kappa = np.sqrt(2.0) * min(erfinv(confidence_level), 1.0e10) # Avoid overflows.
k = k.astype(float)
n = n.astype(float)
p = k / n
if interval == "wilson":
midpoint = (k + kappa**2 / 2.0) / (n + kappa**2)
halflength = (
(kappa * np.sqrt(n))
/ (n + kappa**2)
* np.sqrt(p * (1 - p) + kappa**2 / (4 * n))
)
conf_interval = np.array([midpoint - halflength, midpoint + halflength])
# Correct intervals out of range due to floating point errors.
conf_interval[conf_interval < 0.0] = 0.0
conf_interval[conf_interval > 1.0] = 1.0
else:
midpoint = p
halflength = kappa * np.sqrt(p * (1.0 - p) / n)
conf_interval = np.array([midpoint - halflength, midpoint + halflength])
elif interval == "jeffreys" or interval == "flat":
from scipy.special import betaincinv
if interval == "jeffreys":
lowerbound = betaincinv(k + 0.5, n - k + 0.5, 0.5 * alpha)
upperbound = betaincinv(k + 0.5, n - k + 0.5, 1.0 - 0.5 * alpha)
else:
lowerbound = betaincinv(k + 1, n - k + 1, 0.5 * alpha)
upperbound = betaincinv(k + 1, n - k + 1, 1.0 - 0.5 * alpha)
# Set lower or upper bound to k/n when k/n = 0 or 1
# We have to treat the special case of k/n being scalars,
# which is an ugly kludge
if lowerbound.ndim == 0:
if k == 0:
lowerbound = 0.0
elif k == n:
upperbound = 1.0
else:
lowerbound[k == 0] = 0
upperbound[k == n] = 1
conf_interval = np.array([lowerbound, upperbound])
else:
raise ValueError(f"Unrecognized interval: {interval:s}")
return conf_interval |
Binomial proportion and confidence interval in bins of a continuous
variable ``x``.
Given a set of datapoint pairs where the ``x`` values are
continuously distributed and the ``success`` values are binomial
("success / failure" or "true / false"), place the pairs into
bins according to ``x`` value and calculate the binomial proportion
(fraction of successes) and confidence interval in each bin.
Parameters
----------
x : sequence
Values.
success : sequence of bool
Success (`True`) or failure (`False`) corresponding to each value
in ``x``. Must be same length as ``x``.
bins : int or sequence of scalar, optional
If bins is an int, it defines the number of equal-width bins
in the given range (10, by default). If bins is a sequence, it
defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths (in this case, 'range' is ignored).
range : (float, float), optional
The lower and upper range of the bins. If `None` (default),
the range is set to ``(x.min(), x.max())``. Values outside the
range are ignored.
confidence_level : float, optional
Must be in range [0, 1].
Desired probability content in the confidence
interval ``(p - perr[0], p + perr[1])`` in each bin. Default is
0.68269.
interval : {'wilson', 'jeffreys', 'flat', 'wald'}, optional
Formula used to calculate confidence interval on the
binomial proportion in each bin. See `binom_conf_interval` for
definition of the intervals. The 'wilson', 'jeffreys',
and 'flat' intervals generally give similar results. 'wilson'
should be somewhat faster, while 'jeffreys' and 'flat' are
marginally superior, but differ in the assumed prior.
The 'wald' interval is generally not recommended.
It is provided for comparison purposes. Default is 'wilson'.
Returns
-------
bin_ctr : ndarray
Central value of bins. Bins without any entries are not returned.
bin_halfwidth : ndarray
Half-width of each bin such that ``bin_ctr - bin_halfwidth`` and
``bin_ctr + bins_halfwidth`` give the left and right side of each bin,
respectively.
p : ndarray
Efficiency in each bin.
perr : ndarray
2-d array of shape (2, len(p)) representing the upper and lower
uncertainty on p in each bin.
Notes
-----
This function requires ``scipy`` for all interval types.
See Also
--------
binom_conf_interval : Function used to estimate confidence interval in
each bin.
Examples
--------
Suppose we wish to estimate the efficiency of a survey in
detecting astronomical sources as a function of magnitude (i.e.,
the probability of detecting a source given its magnitude). In a
realistic case, we might prepare a large number of sources with
randomly selected magnitudes, inject them into simulated images,
and then record which were detected at the end of the reduction
pipeline. As a toy example, we generate 100 data points with
randomly selected magnitudes between 20 and 30 and "observe" them
with a known detection function (here, the error function, with
50% detection probability at magnitude 25):
>>> from scipy.special import erf
>>> from scipy.stats.distributions import binom
>>> def true_efficiency(x):
... return 0.5 - 0.5 * erf((x - 25.) / 2.)
>>> mag = 20. + 10. * np.random.rand(100)
>>> detected = binom.rvs(1, true_efficiency(mag))
>>> bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20)
>>> plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
... label='estimate')
.. plot::
import numpy as np
from scipy.special import erf
from scipy.stats.distributions import binom
import matplotlib.pyplot as plt
from astropy.stats import binned_binom_proportion
def true_efficiency(x):
return 0.5 - 0.5 * erf((x - 25.) / 2.)
np.random.seed(400)
mag = 20. + 10. * np.random.rand(100)
np.random.seed(600)
detected = binom.rvs(1, true_efficiency(mag))
bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20)
plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
label='estimate')
X = np.linspace(20., 30., 1000)
plt.plot(X, true_efficiency(X), label='true efficiency')
plt.ylim(0., 1.)
plt.title('Detection efficiency vs magnitude')
plt.xlabel('Magnitude')
plt.ylabel('Detection efficiency')
plt.legend()
plt.show()
The above example uses the Wilson confidence interval to calculate
the uncertainty ``perr`` in each bin (see the definition of various
confidence intervals in `binom_conf_interval`). A commonly used
alternative is the Wald interval. However, the Wald interval can
give nonsensical uncertainties when the efficiency is near 0 or 1,
and is therefore **not** recommended. As an illustration, the
following example shows the same data as above but uses the Wald
interval rather than the Wilson interval to calculate ``perr``:
>>> bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20,
... interval='wald')
>>> plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
... label='estimate')
.. plot::
import numpy as np
from scipy.special import erf
from scipy.stats.distributions import binom
import matplotlib.pyplot as plt
from astropy.stats import binned_binom_proportion
def true_efficiency(x):
return 0.5 - 0.5 * erf((x - 25.) / 2.)
np.random.seed(400)
mag = 20. + 10. * np.random.rand(100)
np.random.seed(600)
detected = binom.rvs(1, true_efficiency(mag))
bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20,
interval='wald')
plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
label='estimate')
X = np.linspace(20., 30., 1000)
plt.plot(X, true_efficiency(X), label='true efficiency')
plt.ylim(0., 1.)
plt.title('The Wald interval can give nonsensical uncertainties')
plt.xlabel('Magnitude')
plt.ylabel('Detection efficiency')
plt.legend()
plt.show() | def binned_binom_proportion(
x, success, bins=10, range=None, confidence_level=0.68269, interval="wilson"
):
"""Binomial proportion and confidence interval in bins of a continuous
variable ``x``.
Given a set of datapoint pairs where the ``x`` values are
continuously distributed and the ``success`` values are binomial
("success / failure" or "true / false"), place the pairs into
bins according to ``x`` value and calculate the binomial proportion
(fraction of successes) and confidence interval in each bin.
Parameters
----------
x : sequence
Values.
success : sequence of bool
Success (`True`) or failure (`False`) corresponding to each value
in ``x``. Must be same length as ``x``.
bins : int or sequence of scalar, optional
If bins is an int, it defines the number of equal-width bins
in the given range (10, by default). If bins is a sequence, it
defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths (in this case, 'range' is ignored).
range : (float, float), optional
The lower and upper range of the bins. If `None` (default),
the range is set to ``(x.min(), x.max())``. Values outside the
range are ignored.
confidence_level : float, optional
Must be in range [0, 1].
Desired probability content in the confidence
interval ``(p - perr[0], p + perr[1])`` in each bin. Default is
0.68269.
interval : {'wilson', 'jeffreys', 'flat', 'wald'}, optional
Formula used to calculate confidence interval on the
binomial proportion in each bin. See `binom_conf_interval` for
definition of the intervals. The 'wilson', 'jeffreys',
and 'flat' intervals generally give similar results. 'wilson'
should be somewhat faster, while 'jeffreys' and 'flat' are
marginally superior, but differ in the assumed prior.
The 'wald' interval is generally not recommended.
It is provided for comparison purposes. Default is 'wilson'.
Returns
-------
bin_ctr : ndarray
Central value of bins. Bins without any entries are not returned.
bin_halfwidth : ndarray
Half-width of each bin such that ``bin_ctr - bin_halfwidth`` and
``bin_ctr + bins_halfwidth`` give the left and right side of each bin,
respectively.
p : ndarray
Efficiency in each bin.
perr : ndarray
2-d array of shape (2, len(p)) representing the upper and lower
uncertainty on p in each bin.
Notes
-----
This function requires ``scipy`` for all interval types.
See Also
--------
binom_conf_interval : Function used to estimate confidence interval in
each bin.
Examples
--------
Suppose we wish to estimate the efficiency of a survey in
detecting astronomical sources as a function of magnitude (i.e.,
the probability of detecting a source given its magnitude). In a
realistic case, we might prepare a large number of sources with
randomly selected magnitudes, inject them into simulated images,
and then record which were detected at the end of the reduction
pipeline. As a toy example, we generate 100 data points with
randomly selected magnitudes between 20 and 30 and "observe" them
with a known detection function (here, the error function, with
50% detection probability at magnitude 25):
>>> from scipy.special import erf
>>> from scipy.stats.distributions import binom
>>> def true_efficiency(x):
... return 0.5 - 0.5 * erf((x - 25.) / 2.)
>>> mag = 20. + 10. * np.random.rand(100)
>>> detected = binom.rvs(1, true_efficiency(mag))
>>> bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20)
>>> plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
... label='estimate')
.. plot::
import numpy as np
from scipy.special import erf
from scipy.stats.distributions import binom
import matplotlib.pyplot as plt
from astropy.stats import binned_binom_proportion
def true_efficiency(x):
return 0.5 - 0.5 * erf((x - 25.) / 2.)
np.random.seed(400)
mag = 20. + 10. * np.random.rand(100)
np.random.seed(600)
detected = binom.rvs(1, true_efficiency(mag))
bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20)
plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
label='estimate')
X = np.linspace(20., 30., 1000)
plt.plot(X, true_efficiency(X), label='true efficiency')
plt.ylim(0., 1.)
plt.title('Detection efficiency vs magnitude')
plt.xlabel('Magnitude')
plt.ylabel('Detection efficiency')
plt.legend()
plt.show()
The above example uses the Wilson confidence interval to calculate
the uncertainty ``perr`` in each bin (see the definition of various
confidence intervals in `binom_conf_interval`). A commonly used
alternative is the Wald interval. However, the Wald interval can
give nonsensical uncertainties when the efficiency is near 0 or 1,
and is therefore **not** recommended. As an illustration, the
following example shows the same data as above but uses the Wald
interval rather than the Wilson interval to calculate ``perr``:
>>> bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20,
... interval='wald')
>>> plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
... label='estimate')
.. plot::
import numpy as np
from scipy.special import erf
from scipy.stats.distributions import binom
import matplotlib.pyplot as plt
from astropy.stats import binned_binom_proportion
def true_efficiency(x):
return 0.5 - 0.5 * erf((x - 25.) / 2.)
np.random.seed(400)
mag = 20. + 10. * np.random.rand(100)
np.random.seed(600)
detected = binom.rvs(1, true_efficiency(mag))
bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20,
interval='wald')
plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
label='estimate')
X = np.linspace(20., 30., 1000)
plt.plot(X, true_efficiency(X), label='true efficiency')
plt.ylim(0., 1.)
plt.title('The Wald interval can give nonsensical uncertainties')
plt.xlabel('Magnitude')
plt.ylabel('Detection efficiency')
plt.legend()
plt.show()
"""
x = np.ravel(x)
success = np.ravel(success).astype(bool)
if x.shape != success.shape:
raise ValueError("sizes of x and success must match")
# Put values into a histogram (`n`). Put "successful" values
# into a second histogram (`k`) with identical binning.
n, bin_edges = np.histogram(x, bins=bins, range=range)
k, bin_edges = np.histogram(x[success], bins=bin_edges)
bin_ctr = (bin_edges[:-1] + bin_edges[1:]) / 2.0
bin_halfwidth = bin_ctr - bin_edges[:-1]
# Remove bins with zero entries.
valid = n > 0
bin_ctr = bin_ctr[valid]
bin_halfwidth = bin_halfwidth[valid]
n = n[valid]
k = k[valid]
p = k / n
bounds = binom_conf_interval(
k, n, confidence_level=confidence_level, interval=interval
)
perr = np.abs(bounds - p)
return bin_ctr, bin_halfwidth, p, perr |
Poisson parameter confidence interval given observed counts.
Parameters
----------
n : int or numpy.ndarray
Number of counts (0 <= ``n``).
interval : {'root-n','root-n-0','pearson','sherpagehrels','frequentist-confidence', 'kraft-burrows-nousek'}, optional
Formula used for confidence interval. See notes for details.
Default is ``'root-n'``.
sigma : float, optional
Number of sigma for confidence interval; only supported for
the 'frequentist-confidence' mode.
background : float, optional
Number of counts expected from the background; only supported for
the 'kraft-burrows-nousek' mode. This number is assumed to be determined
from a large region so that the uncertainty on its value is negligible.
confidence_level : float, optional
Confidence level between 0 and 1; only supported for the
'kraft-burrows-nousek' mode.
Returns
-------
conf_interval : ndarray
``conf_interval[0]`` and ``conf_interval[1]`` correspond to the lower
and upper limits, respectively, for each element in ``n``.
Notes
-----
The "right" confidence interval to use for Poisson data is a
matter of debate. The CDF working group `recommends
<https://web.archive.org/web/20210222093249/https://www-cdf.fnal.gov/physics/statistics/notes/pois_eb.txt>`_
using root-n throughout, largely in the interest of
comprehensibility, but discusses other possibilities. The ATLAS
group also discusses several
possibilities but concludes that no single representation is
suitable for all cases. The suggestion has also been `floated
<https://ui.adsabs.harvard.edu/abs/2012EPJP..127...24A>`_ that error
bars should be attached to theoretical predictions instead of
observed data, which this function will not help with (but it's
easy; then you really should use the square root of the theoretical
prediction).
The intervals implemented here are:
**1. 'root-n'** This is a very widely used standard rule derived
from the maximum-likelihood estimator for the mean of the Poisson
process. While it produces questionable results for small n and
outright wrong results for n=0, it is standard enough that people are
(supposedly) used to interpreting these wonky values. The interval is
.. math::
CI = (n-\sqrt{n}, n+\sqrt{n})
**2. 'root-n-0'** This is identical to the above except that where
n is zero the interval returned is (0,1).
**3. 'pearson'** This is an only-slightly-more-complicated rule
based on Pearson's chi-squared rule (as `explained
<https://web.archive.org/web/20210222093249/https://www-cdf.fnal.gov/physics/statistics/notes/pois_eb.txt>`_ by
the CDF working group). It also has the nice feature that if your
theory curve touches an endpoint of the interval, then your data
point is indeed one sigma away. The interval is
.. math::
CI = (n+0.5-\sqrt{n+0.25}, n+0.5+\sqrt{n+0.25})
**4. 'sherpagehrels'** This rule is used by default in the fitting
package 'sherpa'. The `documentation
<https://cxc.cfa.harvard.edu/sherpa/statistics/#chigehrels>`_ claims
it is based on a numerical approximation published in `Gehrels
(1986) <https://ui.adsabs.harvard.edu/abs/1986ApJ...303..336G>`_ but it
does not actually appear there. It is symmetrical, and while the
upper limits are within about 1% of those given by
'frequentist-confidence', the lower limits can be badly wrong. The
interval is
.. math::
CI = (n-1-\sqrt{n+0.75}, n+1+\sqrt{n+0.75})
**5. 'frequentist-confidence'** These are frequentist central
confidence intervals:
.. math::
CI = (0.5 F_{\chi^2}^{-1}(\alpha;2n),
0.5 F_{\chi^2}^{-1}(1-\alpha;2(n+1)))
where :math:`F_{\chi^2}^{-1}` is the quantile of the chi-square
distribution with the indicated number of degrees of freedom and
:math:`\alpha` is the one-tailed probability of the normal
distribution (at the point given by the parameter 'sigma'). See
`Maxwell (2011)
<https://ui.adsabs.harvard.edu/abs/2011arXiv1102.0822M>`_ for further
details.
**6. 'kraft-burrows-nousek'** This is a Bayesian approach which allows
for the presence of a known background :math:`B` in the source signal
:math:`N`.
For a given confidence level :math:`CL` the confidence interval
:math:`[S_\mathrm{min}, S_\mathrm{max}]` is given by:
.. math::
CL = \int^{S_\mathrm{max}}_{S_\mathrm{min}} f_{N,B}(S)dS
where the function :math:`f_{N,B}` is:
.. math::
f_{N,B}(S) = C \frac{e^{-(S+B)}(S+B)^N}{N!}
and the normalization constant :math:`C`:
.. math::
C = \left[ \int_0^\infty \frac{e^{-(S+B)}(S+B)^N}{N!} dS \right] ^{-1}
= \left( \sum^N_{n=0} \frac{e^{-B}B^n}{n!} \right)^{-1}
See `Kraft, Burrows, and Nousek (1991)
<https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_ for further
details.
These formulas implement a positive, uniform prior.
`Kraft, Burrows, and Nousek (1991)
<https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_ discuss this
choice in more detail and show that the problem is relatively
insensitive to the choice of prior.
This function has an optional dependency: Either `Scipy
<https://www.scipy.org/>`_ or `mpmath <https://mpmath.org/>`_ need
to be available (Scipy works only for N < 100).
This code is very intense numerically, which makes it much slower than
the other methods, in particular for large count numbers (above 1000
even with ``mpmath``). Fortunately, some of the other methods or a
Gaussian approximation usually work well in this regime.
Examples
--------
>>> poisson_conf_interval(np.arange(10), interval='root-n').T
array([[ 0. , 0. ],
[ 0. , 2. ],
[ 0.58578644, 3.41421356],
[ 1.26794919, 4.73205081],
[ 2. , 6. ],
[ 2.76393202, 7.23606798],
[ 3.55051026, 8.44948974],
[ 4.35424869, 9.64575131],
[ 5.17157288, 10.82842712],
[ 6. , 12. ]])
>>> poisson_conf_interval(np.arange(10), interval='root-n-0').T
array([[ 0. , 1. ],
[ 0. , 2. ],
[ 0.58578644, 3.41421356],
[ 1.26794919, 4.73205081],
[ 2. , 6. ],
[ 2.76393202, 7.23606798],
[ 3.55051026, 8.44948974],
[ 4.35424869, 9.64575131],
[ 5.17157288, 10.82842712],
[ 6. , 12. ]])
>>> poisson_conf_interval(np.arange(10), interval='pearson').T
array([[ 0. , 1. ],
[ 0.38196601, 2.61803399],
[ 1. , 4. ],
[ 1.69722436, 5.30277564],
[ 2.43844719, 6.56155281],
[ 3.20871215, 7.79128785],
[ 4. , 9. ],
[ 4.8074176 , 10.1925824 ],
[ 5.62771868, 11.37228132],
[ 6.45861873, 12.54138127]])
>>> poisson_conf_interval(
... np.arange(10), interval='frequentist-confidence').T
array([[ 0. , 1.84102165],
[ 0.17275378, 3.29952656],
[ 0.70818544, 4.63785962],
[ 1.36729531, 5.91818583],
[ 2.08566081, 7.16275317],
[ 2.84030886, 8.38247265],
[ 3.62006862, 9.58364155],
[ 4.41852954, 10.77028072],
[ 5.23161394, 11.94514152],
[ 6.05653896, 13.11020414]])
>>> poisson_conf_interval(
... 7, interval='frequentist-confidence').T
array([ 4.41852954, 10.77028072])
>>> poisson_conf_interval(
... 10, background=1.5, confidence_level=0.95,
... interval='kraft-burrows-nousek').T # doctest: +FLOAT_CMP
array([[ 3.47894005, 16.113329533]]) | def poisson_conf_interval(
n, interval="root-n", sigma=1, background=0, confidence_level=None
):
r"""Poisson parameter confidence interval given observed counts.
Parameters
----------
n : int or numpy.ndarray
Number of counts (0 <= ``n``).
interval : {'root-n','root-n-0','pearson','sherpagehrels','frequentist-confidence', 'kraft-burrows-nousek'}, optional
Formula used for confidence interval. See notes for details.
Default is ``'root-n'``.
sigma : float, optional
Number of sigma for confidence interval; only supported for
the 'frequentist-confidence' mode.
background : float, optional
Number of counts expected from the background; only supported for
the 'kraft-burrows-nousek' mode. This number is assumed to be determined
from a large region so that the uncertainty on its value is negligible.
confidence_level : float, optional
Confidence level between 0 and 1; only supported for the
'kraft-burrows-nousek' mode.
Returns
-------
conf_interval : ndarray
``conf_interval[0]`` and ``conf_interval[1]`` correspond to the lower
and upper limits, respectively, for each element in ``n``.
Notes
-----
The "right" confidence interval to use for Poisson data is a
matter of debate. The CDF working group `recommends
<https://web.archive.org/web/20210222093249/https://www-cdf.fnal.gov/physics/statistics/notes/pois_eb.txt>`_
using root-n throughout, largely in the interest of
comprehensibility, but discusses other possibilities. The ATLAS
group also discusses several
possibilities but concludes that no single representation is
suitable for all cases. The suggestion has also been `floated
<https://ui.adsabs.harvard.edu/abs/2012EPJP..127...24A>`_ that error
bars should be attached to theoretical predictions instead of
observed data, which this function will not help with (but it's
easy; then you really should use the square root of the theoretical
prediction).
The intervals implemented here are:
**1. 'root-n'** This is a very widely used standard rule derived
from the maximum-likelihood estimator for the mean of the Poisson
process. While it produces questionable results for small n and
outright wrong results for n=0, it is standard enough that people are
(supposedly) used to interpreting these wonky values. The interval is
.. math::
CI = (n-\sqrt{n}, n+\sqrt{n})
**2. 'root-n-0'** This is identical to the above except that where
n is zero the interval returned is (0,1).
**3. 'pearson'** This is an only-slightly-more-complicated rule
based on Pearson's chi-squared rule (as `explained
<https://web.archive.org/web/20210222093249/https://www-cdf.fnal.gov/physics/statistics/notes/pois_eb.txt>`_ by
the CDF working group). It also has the nice feature that if your
theory curve touches an endpoint of the interval, then your data
point is indeed one sigma away. The interval is
.. math::
CI = (n+0.5-\sqrt{n+0.25}, n+0.5+\sqrt{n+0.25})
**4. 'sherpagehrels'** This rule is used by default in the fitting
package 'sherpa'. The `documentation
<https://cxc.cfa.harvard.edu/sherpa/statistics/#chigehrels>`_ claims
it is based on a numerical approximation published in `Gehrels
(1986) <https://ui.adsabs.harvard.edu/abs/1986ApJ...303..336G>`_ but it
does not actually appear there. It is symmetrical, and while the
upper limits are within about 1% of those given by
'frequentist-confidence', the lower limits can be badly wrong. The
interval is
.. math::
CI = (n-1-\sqrt{n+0.75}, n+1+\sqrt{n+0.75})
**5. 'frequentist-confidence'** These are frequentist central
confidence intervals:
.. math::
CI = (0.5 F_{\chi^2}^{-1}(\alpha;2n),
0.5 F_{\chi^2}^{-1}(1-\alpha;2(n+1)))
where :math:`F_{\chi^2}^{-1}` is the quantile of the chi-square
distribution with the indicated number of degrees of freedom and
:math:`\alpha` is the one-tailed probability of the normal
distribution (at the point given by the parameter 'sigma'). See
`Maxwell (2011)
<https://ui.adsabs.harvard.edu/abs/2011arXiv1102.0822M>`_ for further
details.
**6. 'kraft-burrows-nousek'** This is a Bayesian approach which allows
for the presence of a known background :math:`B` in the source signal
:math:`N`.
For a given confidence level :math:`CL` the confidence interval
:math:`[S_\mathrm{min}, S_\mathrm{max}]` is given by:
.. math::
CL = \int^{S_\mathrm{max}}_{S_\mathrm{min}} f_{N,B}(S)dS
where the function :math:`f_{N,B}` is:
.. math::
f_{N,B}(S) = C \frac{e^{-(S+B)}(S+B)^N}{N!}
and the normalization constant :math:`C`:
.. math::
C = \left[ \int_0^\infty \frac{e^{-(S+B)}(S+B)^N}{N!} dS \right] ^{-1}
= \left( \sum^N_{n=0} \frac{e^{-B}B^n}{n!} \right)^{-1}
See `Kraft, Burrows, and Nousek (1991)
<https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_ for further
details.
These formulas implement a positive, uniform prior.
`Kraft, Burrows, and Nousek (1991)
<https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_ discuss this
choice in more detail and show that the problem is relatively
insensitive to the choice of prior.
This function has an optional dependency: Either `Scipy
<https://www.scipy.org/>`_ or `mpmath <https://mpmath.org/>`_ need
to be available (Scipy works only for N < 100).
This code is very intense numerically, which makes it much slower than
the other methods, in particular for large count numbers (above 1000
even with ``mpmath``). Fortunately, some of the other methods or a
Gaussian approximation usually work well in this regime.
Examples
--------
>>> poisson_conf_interval(np.arange(10), interval='root-n').T
array([[ 0. , 0. ],
[ 0. , 2. ],
[ 0.58578644, 3.41421356],
[ 1.26794919, 4.73205081],
[ 2. , 6. ],
[ 2.76393202, 7.23606798],
[ 3.55051026, 8.44948974],
[ 4.35424869, 9.64575131],
[ 5.17157288, 10.82842712],
[ 6. , 12. ]])
>>> poisson_conf_interval(np.arange(10), interval='root-n-0').T
array([[ 0. , 1. ],
[ 0. , 2. ],
[ 0.58578644, 3.41421356],
[ 1.26794919, 4.73205081],
[ 2. , 6. ],
[ 2.76393202, 7.23606798],
[ 3.55051026, 8.44948974],
[ 4.35424869, 9.64575131],
[ 5.17157288, 10.82842712],
[ 6. , 12. ]])
>>> poisson_conf_interval(np.arange(10), interval='pearson').T
array([[ 0. , 1. ],
[ 0.38196601, 2.61803399],
[ 1. , 4. ],
[ 1.69722436, 5.30277564],
[ 2.43844719, 6.56155281],
[ 3.20871215, 7.79128785],
[ 4. , 9. ],
[ 4.8074176 , 10.1925824 ],
[ 5.62771868, 11.37228132],
[ 6.45861873, 12.54138127]])
>>> poisson_conf_interval(
... np.arange(10), interval='frequentist-confidence').T
array([[ 0. , 1.84102165],
[ 0.17275378, 3.29952656],
[ 0.70818544, 4.63785962],
[ 1.36729531, 5.91818583],
[ 2.08566081, 7.16275317],
[ 2.84030886, 8.38247265],
[ 3.62006862, 9.58364155],
[ 4.41852954, 10.77028072],
[ 5.23161394, 11.94514152],
[ 6.05653896, 13.11020414]])
>>> poisson_conf_interval(
... 7, interval='frequentist-confidence').T
array([ 4.41852954, 10.77028072])
>>> poisson_conf_interval(
... 10, background=1.5, confidence_level=0.95,
... interval='kraft-burrows-nousek').T # doctest: +FLOAT_CMP
array([[ 3.47894005, 16.113329533]])
"""
if not np.isscalar(n):
n = np.asanyarray(n)
if interval == "root-n":
_check_poisson_conf_inputs(sigma, background, confidence_level, interval)
conf_interval = np.array([n - np.sqrt(n), n + np.sqrt(n)])
elif interval == "root-n-0":
_check_poisson_conf_inputs(sigma, background, confidence_level, interval)
conf_interval = np.array([n - np.sqrt(n), n + np.sqrt(n)])
if np.isscalar(n):
if n == 0:
conf_interval[1] = 1
else:
conf_interval[1, n == 0] = 1
elif interval == "pearson":
_check_poisson_conf_inputs(sigma, background, confidence_level, interval)
conf_interval = np.array(
[n + 0.5 - np.sqrt(n + 0.25), n + 0.5 + np.sqrt(n + 0.25)]
)
elif interval == "sherpagehrels":
_check_poisson_conf_inputs(sigma, background, confidence_level, interval)
conf_interval = np.array([n - 1 - np.sqrt(n + 0.75), n + 1 + np.sqrt(n + 0.75)])
elif interval == "frequentist-confidence":
_check_poisson_conf_inputs(1.0, background, confidence_level, interval)
import scipy.stats
alpha = scipy.stats.norm.sf(sigma)
conf_interval = np.array(
[
0.5 * scipy.stats.chi2(2 * n).ppf(alpha),
0.5 * scipy.stats.chi2(2 * n + 2).isf(alpha),
]
)
if np.isscalar(n):
if n == 0:
conf_interval[0] = 0
else:
conf_interval[0, n == 0] = 0
elif interval == "kraft-burrows-nousek":
# Deprecation warning in Python 3.9 when N is float, so we force int,
# see https://github.com/astropy/astropy/issues/10832
if np.isscalar(n):
if not isinstance(n, int):
raise TypeError("Number of counts must be integer.")
elif not issubclass(n.dtype.type, np.integer):
raise TypeError("Number of counts must be integer.")
if confidence_level is None:
raise ValueError(
f"Set confidence_level for method {interval}. (sigma is ignored.)"
)
confidence_level = np.asanyarray(confidence_level)
if np.any(confidence_level <= 0) or np.any(confidence_level >= 1):
raise ValueError("confidence_level must be a number between 0 and 1.")
background = np.asanyarray(background)
if np.any(background < 0):
raise ValueError("Background must be >= 0.")
conf_interval = np.vectorize(_kraft_burrows_nousek, cache=True)(
n, background, confidence_level
)
conf_interval = np.vstack(conf_interval)
else:
raise ValueError(f"Invalid method for Poisson confidence intervals: {interval}")
return conf_interval |
Calculate the median absolute deviation (MAD).
The MAD is defined as ``median(abs(a - median(a)))``.
Parameters
----------
data : array-like
Input array or object that can be converted to an array.
axis : None, int, or tuple of int, optional
The axis or axes along which the MADs are computed. The default
(`None`) is to compute the MAD of the flattened array.
func : callable, optional
The function used to compute the median. Defaults to `numpy.ma.median`
for masked arrays, otherwise to `numpy.median`.
ignore_nan : bool
Ignore NaN values (treat them as if they are not in the array) when
computing the median. This will use `numpy.ma.median` if ``axis`` is
specified, or `numpy.nanmedian` if ``axis==None`` and numpy's version
is >1.10 because nanmedian is slightly faster in this case.
Returns
-------
mad : float or `~numpy.ndarray`
The median absolute deviation of the input array. If ``axis``
is `None` then a scalar will be returned, otherwise a
`~numpy.ndarray` will be returned.
Examples
--------
Generate random variates from a Gaussian distribution and return the
median absolute deviation for that distribution::
>>> import numpy as np
>>> from astropy.stats import median_absolute_deviation
>>> rand = np.random.default_rng(12345)
>>> from numpy.random import randn
>>> mad = median_absolute_deviation(rand.standard_normal(1000))
>>> print(mad) # doctest: +FLOAT_CMP
0.6829504282771885
See Also
--------
mad_std | def median_absolute_deviation(data, axis=None, func=None, ignore_nan=False):
"""
Calculate the median absolute deviation (MAD).
The MAD is defined as ``median(abs(a - median(a)))``.
Parameters
----------
data : array-like
Input array or object that can be converted to an array.
axis : None, int, or tuple of int, optional
The axis or axes along which the MADs are computed. The default
(`None`) is to compute the MAD of the flattened array.
func : callable, optional
The function used to compute the median. Defaults to `numpy.ma.median`
for masked arrays, otherwise to `numpy.median`.
ignore_nan : bool
Ignore NaN values (treat them as if they are not in the array) when
computing the median. This will use `numpy.ma.median` if ``axis`` is
specified, or `numpy.nanmedian` if ``axis==None`` and numpy's version
is >1.10 because nanmedian is slightly faster in this case.
Returns
-------
mad : float or `~numpy.ndarray`
The median absolute deviation of the input array. If ``axis``
is `None` then a scalar will be returned, otherwise a
`~numpy.ndarray` will be returned.
Examples
--------
Generate random variates from a Gaussian distribution and return the
median absolute deviation for that distribution::
>>> import numpy as np
>>> from astropy.stats import median_absolute_deviation
>>> rand = np.random.default_rng(12345)
>>> from numpy.random import randn
>>> mad = median_absolute_deviation(rand.standard_normal(1000))
>>> print(mad) # doctest: +FLOAT_CMP
0.6829504282771885
See Also
--------
mad_std
"""
if func is None:
# Check if the array has a mask and if so use np.ma.median
# See https://github.com/numpy/numpy/issues/7330 why using np.ma.median
# for normal arrays should not be done (summary: np.ma.median always
# returns an masked array even if the result should be scalar). (#4658)
if isinstance(data, np.ma.MaskedArray):
is_masked = True
func = np.ma.median
if ignore_nan:
data = np.ma.masked_where(np.isnan(data), data, copy=True)
elif ignore_nan:
is_masked = False
func = np.nanmedian
else:
is_masked = False
func = np.median # drops units if result is NaN
else:
is_masked = None
data = np.asanyarray(data)
# np.nanmedian has `keepdims`, which is a good option if we're not allowing
# user-passed functions here
data_median = func(data, axis=axis)
# broadcast the median array before subtraction
if axis is not None:
data_median = np.expand_dims(data_median, axis=axis)
result = func(np.abs(data - data_median), axis=axis, overwrite_input=True)
if axis is None and np.ma.isMaskedArray(result):
# return scalar version
result = result.item()
elif np.ma.isMaskedArray(result) and not is_masked:
# if the input array was not a masked array, we don't want to return a
# masked array
result = result.filled(fill_value=np.nan)
return result |
Calculate a robust standard deviation using the `median absolute
deviation (MAD)
<https://en.wikipedia.org/wiki/Median_absolute_deviation>`_.
The standard deviation estimator is given by:
.. math::
\sigma \approx \frac{\textrm{MAD}}{\Phi^{-1}(3/4)}
\approx 1.4826 \ \textrm{MAD}
where :math:`\Phi^{-1}(P)` is the normal inverse cumulative
distribution function evaluated at probability :math:`P = 3/4`.
Parameters
----------
data : array-like
Data array or object that can be converted to an array.
axis : None, int, or tuple of int, optional
The axis or axes along which the robust standard deviations are
computed. The default (`None`) is to compute the robust
standard deviation of the flattened array.
func : callable, optional
The function used to compute the median. Defaults to `numpy.ma.median`
for masked arrays, otherwise to `numpy.median`.
ignore_nan : bool
Ignore NaN values (treat them as if they are not in the array) when
computing the median. This will use `numpy.ma.median` if ``axis`` is
specified, or `numpy.nanmedian` if ``axis=None`` and numpy's version is
>1.10 because nanmedian is slightly faster in this case.
Returns
-------
mad_std : float or `~numpy.ndarray`
The robust standard deviation of the input data. If ``axis`` is
`None` then a scalar will be returned, otherwise a
`~numpy.ndarray` will be returned.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import mad_std
>>> rand = np.random.default_rng(12345)
>>> madstd = mad_std(rand.normal(5, 2, (100, 100)))
>>> print(madstd) # doctest: +FLOAT_CMP
1.984147963351707
See Also
--------
biweight_midvariance, biweight_midcovariance, median_absolute_deviation | def mad_std(data, axis=None, func=None, ignore_nan=False):
r"""
Calculate a robust standard deviation using the `median absolute
deviation (MAD)
<https://en.wikipedia.org/wiki/Median_absolute_deviation>`_.
The standard deviation estimator is given by:
.. math::
\sigma \approx \frac{\textrm{MAD}}{\Phi^{-1}(3/4)}
\approx 1.4826 \ \textrm{MAD}
where :math:`\Phi^{-1}(P)` is the normal inverse cumulative
distribution function evaluated at probability :math:`P = 3/4`.
Parameters
----------
data : array-like
Data array or object that can be converted to an array.
axis : None, int, or tuple of int, optional
The axis or axes along which the robust standard deviations are
computed. The default (`None`) is to compute the robust
standard deviation of the flattened array.
func : callable, optional
The function used to compute the median. Defaults to `numpy.ma.median`
for masked arrays, otherwise to `numpy.median`.
ignore_nan : bool
Ignore NaN values (treat them as if they are not in the array) when
computing the median. This will use `numpy.ma.median` if ``axis`` is
specified, or `numpy.nanmedian` if ``axis=None`` and numpy's version is
>1.10 because nanmedian is slightly faster in this case.
Returns
-------
mad_std : float or `~numpy.ndarray`
The robust standard deviation of the input data. If ``axis`` is
`None` then a scalar will be returned, otherwise a
`~numpy.ndarray` will be returned.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import mad_std
>>> rand = np.random.default_rng(12345)
>>> madstd = mad_std(rand.normal(5, 2, (100, 100)))
>>> print(madstd) # doctest: +FLOAT_CMP
1.984147963351707
See Also
--------
biweight_midvariance, biweight_midcovariance, median_absolute_deviation
"""
# NOTE: 1. / scipy.stats.norm.ppf(0.75) = 1.482602218505602
MAD = median_absolute_deviation(data, axis=axis, func=func, ignore_nan=ignore_nan)
return MAD * 1.482602218505602 |
Computes the signal to noise ratio for source being observed in the
optical/IR using a CCD.
Parameters
----------
t : float or numpy.ndarray
CCD integration time in seconds
source_eps : float
Number of electrons (photons) or DN per second in the aperture from the
source. Note that this should already have been scaled by the filter
transmission and the quantum efficiency of the CCD. If the input is in
DN, then be sure to set the gain to the proper value for the CCD.
If the input is in electrons per second, then keep the gain as its
default of 1.0.
sky_eps : float
Number of electrons (photons) or DN per second per pixel from the sky
background. Should already be scaled by filter transmission and QE.
This must be in the same units as source_eps for the calculation to
make sense.
dark_eps : float
Number of thermal electrons per second per pixel. If this is given in
DN or ADU, then multiply by the gain to get the value in electrons.
rd : float
Read noise of the CCD in electrons. If this is given in
DN or ADU, then multiply by the gain to get the value in electrons.
npix : float
Size of the aperture in pixels
gain : float, optional
Gain of the CCD. In units of electrons per DN.
Returns
-------
SNR : float or numpy.ndarray
Signal to noise ratio calculated from the inputs | def signal_to_noise_oir_ccd(t, source_eps, sky_eps, dark_eps, rd, npix, gain=1.0):
"""Computes the signal to noise ratio for source being observed in the
optical/IR using a CCD.
Parameters
----------
t : float or numpy.ndarray
CCD integration time in seconds
source_eps : float
Number of electrons (photons) or DN per second in the aperture from the
source. Note that this should already have been scaled by the filter
transmission and the quantum efficiency of the CCD. If the input is in
DN, then be sure to set the gain to the proper value for the CCD.
If the input is in electrons per second, then keep the gain as its
default of 1.0.
sky_eps : float
Number of electrons (photons) or DN per second per pixel from the sky
background. Should already be scaled by filter transmission and QE.
This must be in the same units as source_eps for the calculation to
make sense.
dark_eps : float
Number of thermal electrons per second per pixel. If this is given in
DN or ADU, then multiply by the gain to get the value in electrons.
rd : float
Read noise of the CCD in electrons. If this is given in
DN or ADU, then multiply by the gain to get the value in electrons.
npix : float
Size of the aperture in pixels
gain : float, optional
Gain of the CCD. In units of electrons per DN.
Returns
-------
SNR : float or numpy.ndarray
Signal to noise ratio calculated from the inputs
"""
signal = t * source_eps * gain
noise = np.sqrt(
t * (source_eps * gain + npix * (sky_eps * gain + dark_eps)) + npix * rd**2
)
return signal / noise |
Performs bootstrap resampling on numpy arrays.
Bootstrap resampling is used to understand confidence intervals of sample
estimates. This function returns versions of the dataset resampled with
replacement ("case bootstrapping"). These can all be run through a function
or statistic to produce a distribution of values which can then be used to
find the confidence intervals.
Parameters
----------
data : ndarray
N-D array. The bootstrap resampling will be performed on the first
index, so the first index should access the relevant information
to be bootstrapped.
bootnum : int, optional
Number of bootstrap resamples
samples : int, optional
Number of samples in each resample. The default `None` sets samples to
the number of datapoints
bootfunc : function, optional
Function to reduce the resampled data. Each bootstrap resample will
be put through this function and the results returned. If `None`, the
bootstrapped data will be returned
Returns
-------
boot : ndarray
If bootfunc is None, then each row is a bootstrap resample of the data.
If bootfunc is specified, then the columns will correspond to the
outputs of bootfunc.
Examples
--------
Obtain a twice resampled array:
>>> from astropy.stats import bootstrap
>>> import numpy as np
>>> from astropy.utils import NumpyRNGContext
>>> bootarr = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 2)
...
>>> bootresult # doctest: +FLOAT_CMP
array([[6., 9., 0., 6., 1., 1., 2., 8., 7., 0.],
[3., 5., 6., 3., 5., 3., 5., 8., 8., 0.]])
>>> bootresult.shape
(2, 10)
Obtain a statistic on the array
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 2, bootfunc=np.mean)
...
>>> bootresult # doctest: +FLOAT_CMP
array([4. , 4.6])
Obtain a statistic with two outputs on the array
>>> test_statistic = lambda x: (np.sum(x), np.mean(x))
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 3, bootfunc=test_statistic)
>>> bootresult # doctest: +FLOAT_CMP
array([[40. , 4. ],
[46. , 4.6],
[35. , 3.5]])
>>> bootresult.shape
(3, 2)
Obtain a statistic with two outputs on the array, keeping only the first
output
>>> bootfunc = lambda x:test_statistic(x)[0]
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 3, bootfunc=bootfunc)
...
>>> bootresult # doctest: +FLOAT_CMP
array([40., 46., 35.])
>>> bootresult.shape
(3,) | def bootstrap(data, bootnum=100, samples=None, bootfunc=None):
"""Performs bootstrap resampling on numpy arrays.
Bootstrap resampling is used to understand confidence intervals of sample
estimates. This function returns versions of the dataset resampled with
replacement ("case bootstrapping"). These can all be run through a function
or statistic to produce a distribution of values which can then be used to
find the confidence intervals.
Parameters
----------
data : ndarray
N-D array. The bootstrap resampling will be performed on the first
index, so the first index should access the relevant information
to be bootstrapped.
bootnum : int, optional
Number of bootstrap resamples
samples : int, optional
Number of samples in each resample. The default `None` sets samples to
the number of datapoints
bootfunc : function, optional
Function to reduce the resampled data. Each bootstrap resample will
be put through this function and the results returned. If `None`, the
bootstrapped data will be returned
Returns
-------
boot : ndarray
If bootfunc is None, then each row is a bootstrap resample of the data.
If bootfunc is specified, then the columns will correspond to the
outputs of bootfunc.
Examples
--------
Obtain a twice resampled array:
>>> from astropy.stats import bootstrap
>>> import numpy as np
>>> from astropy.utils import NumpyRNGContext
>>> bootarr = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 2)
...
>>> bootresult # doctest: +FLOAT_CMP
array([[6., 9., 0., 6., 1., 1., 2., 8., 7., 0.],
[3., 5., 6., 3., 5., 3., 5., 8., 8., 0.]])
>>> bootresult.shape
(2, 10)
Obtain a statistic on the array
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 2, bootfunc=np.mean)
...
>>> bootresult # doctest: +FLOAT_CMP
array([4. , 4.6])
Obtain a statistic with two outputs on the array
>>> test_statistic = lambda x: (np.sum(x), np.mean(x))
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 3, bootfunc=test_statistic)
>>> bootresult # doctest: +FLOAT_CMP
array([[40. , 4. ],
[46. , 4.6],
[35. , 3.5]])
>>> bootresult.shape
(3, 2)
Obtain a statistic with two outputs on the array, keeping only the first
output
>>> bootfunc = lambda x:test_statistic(x)[0]
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 3, bootfunc=bootfunc)
...
>>> bootresult # doctest: +FLOAT_CMP
array([40., 46., 35.])
>>> bootresult.shape
(3,)
"""
if samples is None:
samples = data.shape[0]
# make sure the input is sane
if samples < 1 or bootnum < 1:
raise ValueError("neither 'samples' nor 'bootnum' can be less than 1.")
if bootfunc is None:
resultdims = (bootnum,) + (samples,) + data.shape[1:]
else:
# test number of outputs from bootfunc, avoid single outputs which are
# array-like
try:
resultdims = (bootnum, len(bootfunc(data)))
except TypeError:
resultdims = (bootnum,)
# create empty boot array
boot = np.empty(resultdims)
for i in range(bootnum):
bootarr = np.random.randint(low=0, high=data.shape[0], size=samples)
if bootfunc is None:
boot[i] = data[bootarr]
else:
boot[i] = bootfunc(data[bootarr])
return boot |
Upper limit on a poisson count rate.
The implementation is based on Kraft, Burrows and Nousek
`ApJ 374, 344 (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_.
The XMM-Newton upper limit server uses the same formalism.
Parameters
----------
N : int or np.int32/np.int64
Total observed count number
B : float or np.float32/np.float64
Background count rate (assumed to be known with negligible error
from a large background area).
CL : float or np.float32/np.float64
Confidence level (number between 0 and 1)
Returns
-------
S : source count limit
Notes
-----
Requires :mod:`~scipy`. This implementation will cause Overflow Errors for
about N > 100 (the exact limit depends on details of how scipy was
compiled). See `~astropy.stats.mpmath_poisson_upper_limit` for an
implementation that is slower, but can deal with arbitrarily high numbers
since it is based on the `mpmath <https://mpmath.org/>`_ library. | def _scipy_kraft_burrows_nousek(N, B, CL):
"""Upper limit on a poisson count rate.
The implementation is based on Kraft, Burrows and Nousek
`ApJ 374, 344 (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_.
The XMM-Newton upper limit server uses the same formalism.
Parameters
----------
N : int or np.int32/np.int64
Total observed count number
B : float or np.float32/np.float64
Background count rate (assumed to be known with negligible error
from a large background area).
CL : float or np.float32/np.float64
Confidence level (number between 0 and 1)
Returns
-------
S : source count limit
Notes
-----
Requires :mod:`~scipy`. This implementation will cause Overflow Errors for
about N > 100 (the exact limit depends on details of how scipy was
compiled). See `~astropy.stats.mpmath_poisson_upper_limit` for an
implementation that is slower, but can deal with arbitrarily high numbers
since it is based on the `mpmath <https://mpmath.org/>`_ library.
"""
from math import exp
from scipy.integrate import quad
from scipy.optimize import brentq
from scipy.special import factorial
def eqn8(N, B):
n = np.arange(N + 1, dtype=np.float64)
return 1.0 / (exp(-B) * np.sum(np.power(B, n) / factorial(n)))
# The parameters of eqn8 do not vary between calls so we can calculate the
# result once and reuse it. The same is True for the factorial of N.
# eqn7 is called hundred times so "caching" these values yields a
# significant speedup (factor 10).
eqn8_res = eqn8(N, B)
factorial_N = float(math.factorial(N))
def eqn7(S, N, B):
SpB = S + B
return eqn8_res * (exp(-SpB) * SpB**N / factorial_N)
def eqn9_left(S_min, S_max, N, B):
return quad(eqn7, S_min, S_max, args=(N, B), limit=500)
def find_s_min(S_max, N, B):
"""
Kraft, Burrows and Nousek suggest to integrate from N-B in both
directions at once, so that S_min and S_max move similarly (see
the article for details). Here, this is implemented differently:
Treat S_max as the optimization parameters in func and then
calculate the matching s_min that has has eqn7(S_max) =
eqn7(S_min) here.
"""
y_S_max = eqn7(S_max, N, B)
if eqn7(0, N, B) >= y_S_max:
return 0.0
else:
return brentq(lambda x: eqn7(x, N, B) - y_S_max, 0, N - B)
def func(s):
s_min = find_s_min(s, N, B)
out = eqn9_left(s_min, s, N, B)
return out[0] - CL
S_max = brentq(func, N - B, 100)
S_min = find_s_min(S_max, N, B)
return S_min, S_max |
Upper limit on a poisson count rate.
The implementation is based on Kraft, Burrows and Nousek in
`ApJ 374, 344 (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_.
The XMM-Newton upper limit server used the same formalism.
Parameters
----------
N : int or np.int32/np.int64
Total observed count number
B : float or np.float32/np.float64
Background count rate (assumed to be known with negligible error
from a large background area).
CL : float or np.float32/np.float64
Confidence level (number between 0 and 1)
Returns
-------
S : source count limit
Notes
-----
Requires the `mpmath <https://mpmath.org/>`_ library. See
`~astropy.stats.scipy_poisson_upper_limit` for an implementation
that is based on scipy and evaluates faster, but runs only to about
N = 100. | def _mpmath_kraft_burrows_nousek(N, B, CL):
"""Upper limit on a poisson count rate.
The implementation is based on Kraft, Burrows and Nousek in
`ApJ 374, 344 (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_.
The XMM-Newton upper limit server used the same formalism.
Parameters
----------
N : int or np.int32/np.int64
Total observed count number
B : float or np.float32/np.float64
Background count rate (assumed to be known with negligible error
from a large background area).
CL : float or np.float32/np.float64
Confidence level (number between 0 and 1)
Returns
-------
S : source count limit
Notes
-----
Requires the `mpmath <https://mpmath.org/>`_ library. See
`~astropy.stats.scipy_poisson_upper_limit` for an implementation
that is based on scipy and evaluates faster, but runs only to about
N = 100.
"""
from mpmath import exp, factorial, findroot, fsum, mpf, power, quad
# We convert these values to float. Because for some reason,
# mpmath.mpf cannot convert from numpy.int64
N = mpf(float(N))
B = mpf(float(B))
CL = mpf(float(CL))
tol = 1e-4
def eqn8(N, B):
sumterms = [power(B, n) / factorial(n) for n in range(int(N) + 1)]
return 1.0 / (exp(-B) * fsum(sumterms))
eqn8_res = eqn8(N, B)
factorial_N = factorial(N)
def eqn7(S, N, B):
SpB = S + B
return eqn8_res * (exp(-SpB) * SpB**N / factorial_N)
def eqn9_left(S_min, S_max, N, B):
def eqn7NB(S):
return eqn7(S, N, B)
return quad(eqn7NB, [S_min, S_max])
def find_s_min(S_max, N, B):
"""
Kraft, Burrows and Nousek suggest to integrate from N-B in both
directions at once, so that S_min and S_max move similarly (see
the article for details). Here, this is implemented differently:
Treat S_max as the optimization parameters in func and then
calculate the matching s_min that has has eqn7(S_max) =
eqn7(S_min) here.
"""
y_S_max = eqn7(S_max, N, B)
# If B > N, then N-B, the "most probable" values is < 0
# and thus s_min is certainly 0.
# Note: For small N, s_max is also close to 0 and root finding
# might find the wrong root, thus it is important to handle this
# case here and return the analytical answer (s_min = 0).
if (B >= N) or (eqn7(0, N, B) >= y_S_max):
return 0.0
else:
def eqn7ysmax(x):
return eqn7(x, N, B) - y_S_max
return findroot(eqn7ysmax, [0.0, N - B], solver="ridder", tol=tol)
def func(s):
s_min = find_s_min(s, N, B)
out = eqn9_left(s_min, s, N, B)
return out - CL
# Several numerical problems were found prevent the solvers from finding
# the roots unless the starting values are very close to the final values.
# Thus, this primitive, time-wasting, brute-force stepping here to get
# an interval that can be fed into the ridder solver.
s_max_guess = max(N - B, 1.0)
while func(s_max_guess) < 0:
s_max_guess += 1
S_max = findroot(func, [s_max_guess - 1, s_max_guess], solver="ridder", tol=tol)
S_min = find_s_min(S_max, N, B)
return float(S_min), float(S_max) |
Upper limit on a poisson count rate.
The implementation is based on Kraft, Burrows and Nousek in
`ApJ 374, 344 (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_.
The XMM-Newton upper limit server used the same formalism.
Parameters
----------
N : int or np.int32/np.int64
Total observed count number
B : float or np.float32/np.float64
Background count rate (assumed to be known with negligible error
from a large background area).
CL : float or np.float32/np.float64
Confidence level (number between 0 and 1)
Returns
-------
S : source count limit
Notes
-----
This functions has an optional dependency: Either :mod:`scipy` or `mpmath
<https://mpmath.org/>`_ need to be available. (Scipy only works for
N < 100). | def _kraft_burrows_nousek(N, B, CL):
"""Upper limit on a poisson count rate.
The implementation is based on Kraft, Burrows and Nousek in
`ApJ 374, 344 (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_.
The XMM-Newton upper limit server used the same formalism.
Parameters
----------
N : int or np.int32/np.int64
Total observed count number
B : float or np.float32/np.float64
Background count rate (assumed to be known with negligible error
from a large background area).
CL : float or np.float32/np.float64
Confidence level (number between 0 and 1)
Returns
-------
S : source count limit
Notes
-----
This functions has an optional dependency: Either :mod:`scipy` or `mpmath
<https://mpmath.org/>`_ need to be available. (Scipy only works for
N < 100).
"""
from astropy.utils.compat.optional_deps import HAS_MPMATH, HAS_SCIPY
if HAS_SCIPY and N <= 100:
try:
return _scipy_kraft_burrows_nousek(N, B, CL)
except OverflowError:
if not HAS_MPMATH:
raise ValueError("Need mpmath package for input numbers this large.")
if HAS_MPMATH:
return _mpmath_kraft_burrows_nousek(N, B, CL)
raise ImportError("Either scipy or mpmath are required.") |
Compute the false positive probability for the Kuiper statistic.
Uses the set of four formulas described in Paltani 2004; they report
the resulting function never underestimates the false positive
probability but can be a bit high in the N=40..50 range.
(They quote a factor 1.5 at the 1e-7 level.)
Parameters
----------
D : float
The Kuiper test score.
N : float
The effective sample size.
Returns
-------
fpp : float
The probability of a score this large arising from the null hypothesis.
Notes
-----
Eq 7 of Paltani 2004 appears to incorrectly quote the original formula
(Stephens 1965). This function implements the original formula, as it
produces a result closer to Monte Carlo simulations.
References
----------
.. [1] Paltani, S., "Searching for periods in X-ray observations using
Kuiper's test. Application to the ROSAT PSPC archive",
Astronomy and Astrophysics, v.240, p.789-790, 2004.
.. [2] Stephens, M. A., "The goodness-of-fit statistic VN: distribution
and significance points", Biometrika, v.52, p.309, 1965. | def kuiper_false_positive_probability(D, N):
"""Compute the false positive probability for the Kuiper statistic.
Uses the set of four formulas described in Paltani 2004; they report
the resulting function never underestimates the false positive
probability but can be a bit high in the N=40..50 range.
(They quote a factor 1.5 at the 1e-7 level.)
Parameters
----------
D : float
The Kuiper test score.
N : float
The effective sample size.
Returns
-------
fpp : float
The probability of a score this large arising from the null hypothesis.
Notes
-----
Eq 7 of Paltani 2004 appears to incorrectly quote the original formula
(Stephens 1965). This function implements the original formula, as it
produces a result closer to Monte Carlo simulations.
References
----------
.. [1] Paltani, S., "Searching for periods in X-ray observations using
Kuiper's test. Application to the ROSAT PSPC archive",
Astronomy and Astrophysics, v.240, p.789-790, 2004.
.. [2] Stephens, M. A., "The goodness-of-fit statistic VN: distribution
and significance points", Biometrika, v.52, p.309, 1965.
"""
try:
from scipy.special import comb, factorial
except ImportError:
# Retained for backwards compatibility with older versions of scipy
# (factorial appears to have moved here in 0.14)
from scipy.misc import comb, factorial
if D < 0.0 or D > 2.0:
raise ValueError("Must have 0<=D<=2 by definition of the Kuiper test")
if D < 2.0 / N:
return 1.0 - factorial(N) * (D - 1.0 / N) ** (N - 1)
elif D < 3.0 / N:
k = -(N * D - 1.0) / 2.0
r = np.sqrt(k**2 - (N * D - 2.0) ** 2 / 2.0)
a, b = -k + r, -k - r
return 1 - (
factorial(N - 1)
* (b ** (N - 1) * (1 - a) - a ** (N - 1) * (1 - b))
/ N ** (N - 2)
/ (b - a)
)
elif (D > 0.5 and N % 2 == 0) or (D > (N - 1.0) / (2.0 * N) and N % 2 == 1):
# NOTE: the upper limit of this sum is taken from Stephens 1965
t = np.arange(np.floor(N * (1 - D)) + 1)
y = D + t / N
Tt = y ** (t - 3) * (
y**3 * N
- y**2 * t * (3 - 2 / N)
+ y * t * (t - 1) * (3 - 2 / N) / N
- t * (t - 1) * (t - 2) / N**2
)
term1 = comb(N, t)
term2 = (1 - D - t / N) ** (N - t - 1)
# term1 is formally finite, but is approximated by numpy as np.inf for
# large values, so we set them to zero manually when they would be
# multiplied by zero anyway
term1[(term1 == np.inf) & (term2 == 0)] = 0.0
final_term = Tt * term1 * term2
return final_term.sum()
else:
z = D * np.sqrt(N)
# When m*z>18.82 (sqrt(-log(finfo(double))/2)), exp(-2m**2z**2)
# underflows. Cutting off just before avoids triggering a (pointless)
# underflow warning if `under="warn"`.
ms = np.arange(1, 18.82 / z)
S1 = (2 * (4 * ms**2 * z**2 - 1) * np.exp(-2 * ms**2 * z**2)).sum()
S2 = (ms**2 * (4 * ms**2 * z**2 - 3) * np.exp(-2 * ms**2 * z**2)).sum()
return S1 - 8 * D / 3 * S2 |
Compute the Kuiper statistic.
Use the Kuiper statistic version of the Kolmogorov-Smirnov test to
find the probability that a sample like ``data`` was drawn from the
distribution whose CDF is given as ``cdf``.
.. warning::
This will not work correctly for distributions that are actually
discrete (Poisson, for example).
Parameters
----------
data : array-like
The data values.
cdf : callable
A callable to evaluate the CDF of the distribution being tested
against. Will be called with a vector of all values at once.
The default is a uniform distribution.
args : list-like, optional
Additional arguments to be supplied to cdf.
Returns
-------
D : float
The raw statistic.
fpp : float
The probability of a D this large arising with a sample drawn from
the distribution whose CDF is cdf.
Notes
-----
The Kuiper statistic resembles the Kolmogorov-Smirnov test in that
it is nonparametric and invariant under reparameterizations of the data.
The Kuiper statistic, in addition, is equally sensitive throughout
the domain, and it is also invariant under cyclic permutations (making
it particularly appropriate for analyzing circular data).
Returns (D, fpp), where D is the Kuiper D number and fpp is the
probability that a value as large as D would occur if data was
drawn from cdf.
.. warning::
The fpp is calculated only approximately, and it can be
as much as 1.5 times the true value.
Stephens 1970 claims this is more effective than the KS at detecting
changes in the variance of a distribution; the KS is (he claims) more
sensitive at detecting changes in the mean.
If cdf was obtained from data by fitting, then fpp is not correct and
it will be necessary to do Monte Carlo simulations to interpret D.
D should normally be independent of the shape of CDF.
References
----------
.. [1] Stephens, M. A., "Use of the Kolmogorov-Smirnov, Cramer-Von Mises
and Related Statistics Without Extensive Tables", Journal of the
Royal Statistical Society. Series B (Methodological), Vol. 32,
No. 1. (1970), pp. 115-122. | def kuiper(data, cdf=lambda x: x, args=()):
"""Compute the Kuiper statistic.
Use the Kuiper statistic version of the Kolmogorov-Smirnov test to
find the probability that a sample like ``data`` was drawn from the
distribution whose CDF is given as ``cdf``.
.. warning::
This will not work correctly for distributions that are actually
discrete (Poisson, for example).
Parameters
----------
data : array-like
The data values.
cdf : callable
A callable to evaluate the CDF of the distribution being tested
against. Will be called with a vector of all values at once.
The default is a uniform distribution.
args : list-like, optional
Additional arguments to be supplied to cdf.
Returns
-------
D : float
The raw statistic.
fpp : float
The probability of a D this large arising with a sample drawn from
the distribution whose CDF is cdf.
Notes
-----
The Kuiper statistic resembles the Kolmogorov-Smirnov test in that
it is nonparametric and invariant under reparameterizations of the data.
The Kuiper statistic, in addition, is equally sensitive throughout
the domain, and it is also invariant under cyclic permutations (making
it particularly appropriate for analyzing circular data).
Returns (D, fpp), where D is the Kuiper D number and fpp is the
probability that a value as large as D would occur if data was
drawn from cdf.
.. warning::
The fpp is calculated only approximately, and it can be
as much as 1.5 times the true value.
Stephens 1970 claims this is more effective than the KS at detecting
changes in the variance of a distribution; the KS is (he claims) more
sensitive at detecting changes in the mean.
If cdf was obtained from data by fitting, then fpp is not correct and
it will be necessary to do Monte Carlo simulations to interpret D.
D should normally be independent of the shape of CDF.
References
----------
.. [1] Stephens, M. A., "Use of the Kolmogorov-Smirnov, Cramer-Von Mises
and Related Statistics Without Extensive Tables", Journal of the
Royal Statistical Society. Series B (Methodological), Vol. 32,
No. 1. (1970), pp. 115-122.
"""
data = np.sort(data)
cdfv = cdf(data, *args)
N = len(data)
D = np.amax(cdfv - np.arange(N) / float(N)) + np.amax(
(np.arange(N) + 1) / float(N) - cdfv
)
return D, kuiper_false_positive_probability(D, N) |
Compute the Kuiper statistic to compare two samples.
Parameters
----------
data1 : array-like
The first set of data values.
data2 : array-like
The second set of data values.
Returns
-------
D : float
The raw test statistic.
fpp : float
The probability of obtaining two samples this different from
the same distribution.
.. warning::
The fpp is quite approximate, especially for small samples. | def kuiper_two(data1, data2):
"""Compute the Kuiper statistic to compare two samples.
Parameters
----------
data1 : array-like
The first set of data values.
data2 : array-like
The second set of data values.
Returns
-------
D : float
The raw test statistic.
fpp : float
The probability of obtaining two samples this different from
the same distribution.
.. warning::
The fpp is quite approximate, especially for small samples.
"""
data1 = np.sort(data1)
data2 = np.sort(data2)
(n1,) = data1.shape
(n2,) = data2.shape
common_type = np.result_type(data1.dtype, data2.dtype)
if not (
np.issubdtype(common_type, np.number)
and not np.issubdtype(common_type, np.complexfloating)
):
raise ValueError("kuiper_two only accepts real inputs")
# nans, if any, are at the end after sorting.
if np.isnan(data1[-1]) or np.isnan(data2[-1]):
raise ValueError("kuiper_two only accepts non-nan inputs")
D = _stats.ks_2samp(np.asarray(data1, common_type), np.asarray(data2, common_type))
Ne = len(data1) * len(data2) / float(len(data1) + len(data2))
return D, kuiper_false_positive_probability(D, Ne) |
Fold the weighted intervals to the interval (0,1).
Convert a list of intervals (ai, bi, wi) to a list of non-overlapping
intervals covering (0,1). Each output interval has a weight equal
to the sum of the wis of all the intervals that include it. All intervals
are interpreted modulo 1, and weights are accumulated counting
multiplicity. This is appropriate, for example, if you have one or more
blocks of observation and you want to determine how much observation
time was spent on different parts of a system's orbit (the blocks
should be converted to units of the orbital period first).
Parameters
----------
intervals : list of (3,) tuple
For each tuple (ai,bi,wi); ai and bi are the limits of the interval,
and wi is the weight to apply to the interval.
Returns
-------
breaks : (N,) array of float
The endpoints of a set of intervals covering [0,1]; breaks[0]=0 and
breaks[-1] = 1
weights : (N-1,) array of float
The ith element is the sum of number of times the interval
breaks[i],breaks[i+1] is included in each interval times the weight
associated with that interval. | def fold_intervals(intervals):
"""Fold the weighted intervals to the interval (0,1).
Convert a list of intervals (ai, bi, wi) to a list of non-overlapping
intervals covering (0,1). Each output interval has a weight equal
to the sum of the wis of all the intervals that include it. All intervals
are interpreted modulo 1, and weights are accumulated counting
multiplicity. This is appropriate, for example, if you have one or more
blocks of observation and you want to determine how much observation
time was spent on different parts of a system's orbit (the blocks
should be converted to units of the orbital period first).
Parameters
----------
intervals : list of (3,) tuple
For each tuple (ai,bi,wi); ai and bi are the limits of the interval,
and wi is the weight to apply to the interval.
Returns
-------
breaks : (N,) array of float
The endpoints of a set of intervals covering [0,1]; breaks[0]=0 and
breaks[-1] = 1
weights : (N-1,) array of float
The ith element is the sum of number of times the interval
breaks[i],breaks[i+1] is included in each interval times the weight
associated with that interval.
"""
r = []
breaks = set()
tot = 0
for a, b, wt in intervals:
tot += (np.ceil(b) - np.floor(a)) * wt
fa = a % 1
breaks.add(fa)
r.append((0, fa, -wt))
fb = b % 1
breaks.add(fb)
r.append((fb, 1, -wt))
breaks.add(0.0)
breaks.add(1.0)
breaks = sorted(breaks)
breaks_map = {f: i for (i, f) in enumerate(breaks)}
totals = np.zeros(len(breaks) - 1)
totals += tot
for a, b, wt in r:
totals[breaks_map[a] : breaks_map[b]] += wt
return np.array(breaks), totals |
Construct a callable piecewise-linear CDF from a pair of arrays.
Take a pair of arrays in the format returned by fold_intervals and
make a callable cumulative distribution function on the interval
(0,1).
Parameters
----------
breaks : (N,) array of float
The boundaries of successive intervals.
totals : (N-1,) array of float
The weight for each interval.
Returns
-------
f : callable
A cumulative distribution function corresponding to the
piecewise-constant probability distribution given by breaks, weights | def cdf_from_intervals(breaks, totals):
"""Construct a callable piecewise-linear CDF from a pair of arrays.
Take a pair of arrays in the format returned by fold_intervals and
make a callable cumulative distribution function on the interval
(0,1).
Parameters
----------
breaks : (N,) array of float
The boundaries of successive intervals.
totals : (N-1,) array of float
The weight for each interval.
Returns
-------
f : callable
A cumulative distribution function corresponding to the
piecewise-constant probability distribution given by breaks, weights
"""
if breaks[0] != 0 or breaks[-1] != 1:
raise ValueError("Intervals must be restricted to [0,1]")
if np.any(np.diff(breaks) <= 0):
raise ValueError("Breaks must be strictly increasing")
if np.any(totals < 0):
raise ValueError("Total weights in each subinterval must be nonnegative")
if np.all(totals == 0):
raise ValueError("At least one interval must have positive exposure")
b = breaks.copy()
c = np.concatenate(((0,), np.cumsum(totals * np.diff(b))))
c /= c[-1]
return lambda x: np.interp(x, b, c, 0, 1) |
Compute the length of overlap of two intervals.
Parameters
----------
i1, i2 : (float, float)
The two intervals, (interval 1, interval 2).
Returns
-------
l : float
The length of the overlap between the two intervals. | def interval_overlap_length(i1, i2):
"""Compute the length of overlap of two intervals.
Parameters
----------
i1, i2 : (float, float)
The two intervals, (interval 1, interval 2).
Returns
-------
l : float
The length of the overlap between the two intervals.
"""
(a, b) = i1
(c, d) = i2
if a < c:
if b < c:
return 0.0
elif b < d:
return b - c
else:
return d - c
elif a < d:
if b < d:
return b - a
else:
return d - a
else:
return 0 |
Histogram of a piecewise-constant weight function.
This function takes a piecewise-constant weight function and
computes the average weight in each histogram bin.
Parameters
----------
n : int
The number of bins
breaks : (N,) array of float
Endpoints of the intervals in the PDF
totals : (N-1,) array of float
Probability densities in each bin
Returns
-------
h : array of float
The average weight for each bin | def histogram_intervals(n, breaks, totals):
"""Histogram of a piecewise-constant weight function.
This function takes a piecewise-constant weight function and
computes the average weight in each histogram bin.
Parameters
----------
n : int
The number of bins
breaks : (N,) array of float
Endpoints of the intervals in the PDF
totals : (N-1,) array of float
Probability densities in each bin
Returns
-------
h : array of float
The average weight for each bin
"""
h = np.zeros(n)
start = breaks[0]
for i in range(len(totals)):
end = breaks[i + 1]
for j in range(n):
ol = interval_overlap_length((float(j) / n, float(j + 1) / n), (start, end))
h[j] += ol / (1.0 / n) * totals[i]
start = end
return h |
Calculate histogram bin edges like ``numpy.histogram_bin_edges``.
Parameters
----------
a : array-like
Input data. The bin edges are calculated over the flattened array.
bins : int, list, or str, optional
If ``bins`` is an int, it is the number of bins. If it is a list
it is taken to be the bin edges. If it is a string, it must be one
of 'blocks', 'knuth', 'scott' or 'freedman'. See
`~astropy.stats.histogram` for a description of each method.
range : tuple or None, optional
The minimum and maximum range for the histogram. If not specified,
it will be (a.min(), a.max()). However, if bins is a list it is
returned unmodified regardless of the range argument.
weights : array-like, optional
An array the same shape as ``a``. If given, the histogram accumulates
the value of the weight corresponding to ``a`` instead of returning the
count of values. This argument does not affect determination of bin
edges, though they may be used in the future as new methods are added. | def calculate_bin_edges(a, bins=10, range=None, weights=None):
"""
Calculate histogram bin edges like ``numpy.histogram_bin_edges``.
Parameters
----------
a : array-like
Input data. The bin edges are calculated over the flattened array.
bins : int, list, or str, optional
If ``bins`` is an int, it is the number of bins. If it is a list
it is taken to be the bin edges. If it is a string, it must be one
of 'blocks', 'knuth', 'scott' or 'freedman'. See
`~astropy.stats.histogram` for a description of each method.
range : tuple or None, optional
The minimum and maximum range for the histogram. If not specified,
it will be (a.min(), a.max()). However, if bins is a list it is
returned unmodified regardless of the range argument.
weights : array-like, optional
An array the same shape as ``a``. If given, the histogram accumulates
the value of the weight corresponding to ``a`` instead of returning the
count of values. This argument does not affect determination of bin
edges, though they may be used in the future as new methods are added.
"""
# if range is specified, we need to truncate the data for
# the bin-finding routines
if range is not None:
a = a[(a >= range[0]) & (a <= range[1])]
# if bins is a string, first compute bin edges with the desired heuristic
if isinstance(bins, str):
a = np.asarray(a).ravel()
# TODO: if weights is specified, we need to modify things.
# e.g. we could use point measures fitness for Bayesian blocks
if weights is not None:
raise NotImplementedError(
"weights are not yet supported for the enhanced histogram"
)
if bins == "blocks":
bins = bayesian_blocks(a)
elif bins == "knuth":
da, bins = knuth_bin_width(a, True)
elif bins == "scott":
da, bins = scott_bin_width(a, True)
elif bins == "freedman":
da, bins = freedman_bin_width(a, True)
else:
raise ValueError(f"unrecognized bin code: '{bins}'")
if range:
# Check that the upper and lower edges are what was requested.
# The current implementation of the bin width estimators does not
# guarantee this, it only ensures that data outside the range is
# excluded from calculation of the bin widths.
if bins[0] != range[0]:
bins[0] = range[0]
if bins[-1] != range[1]:
bins[-1] = range[1]
elif np.ndim(bins) == 0:
# Number of bins was given
bins = np.histogram_bin_edges(a, bins, range=range, weights=weights)
return bins |
Enhanced histogram function, providing adaptive binnings.
This is a histogram function that enables the use of more sophisticated
algorithms for determining bins. Aside from the ``bins`` argument allowing
a string specified how bins are computed, the parameters are the same
as `numpy.histogram`.
Parameters
----------
a : array-like
array of data to be histogrammed
bins : int, list, or str, optional
If bins is a string, then it must be one of:
- 'blocks' : use bayesian blocks for dynamic bin widths
- 'knuth' : use Knuth's rule to determine bins
- 'scott' : use Scott's rule to determine bins
- 'freedman' : use the Freedman-Diaconis rule to determine bins
range : tuple or None, optional
the minimum and maximum range for the histogram. If not specified,
it will be (x.min(), x.max())
weights : array-like, optional
An array the same shape as ``a``. If given, the histogram accumulates
the value of the weight corresponding to ``a`` instead of returning the
count of values. This argument does not affect determination of bin
edges.
**kwargs : dict, optional
Extra arguments are described in `numpy.histogram`.
Returns
-------
hist : array
The values of the histogram. See ``density`` and ``weights`` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
numpy.histogram | def histogram(a, bins=10, range=None, weights=None, **kwargs):
"""Enhanced histogram function, providing adaptive binnings.
This is a histogram function that enables the use of more sophisticated
algorithms for determining bins. Aside from the ``bins`` argument allowing
a string specified how bins are computed, the parameters are the same
as `numpy.histogram`.
Parameters
----------
a : array-like
array of data to be histogrammed
bins : int, list, or str, optional
If bins is a string, then it must be one of:
- 'blocks' : use bayesian blocks for dynamic bin widths
- 'knuth' : use Knuth's rule to determine bins
- 'scott' : use Scott's rule to determine bins
- 'freedman' : use the Freedman-Diaconis rule to determine bins
range : tuple or None, optional
the minimum and maximum range for the histogram. If not specified,
it will be (x.min(), x.max())
weights : array-like, optional
An array the same shape as ``a``. If given, the histogram accumulates
the value of the weight corresponding to ``a`` instead of returning the
count of values. This argument does not affect determination of bin
edges.
**kwargs : dict, optional
Extra arguments are described in `numpy.histogram`.
Returns
-------
hist : array
The values of the histogram. See ``density`` and ``weights`` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
numpy.histogram
"""
bins = calculate_bin_edges(a, bins=bins, range=range, weights=weights)
# Now we call numpy's histogram with the resulting bin edges
return np.histogram(a, bins=bins, range=range, weights=weights, **kwargs) |
Return the optimal histogram bin width using Scott's rule.
Scott's rule is a normal reference rule: it minimizes the integrated
mean squared error in the bin approximation under the assumption that the
data is approximately Gaussian.
Parameters
----------
data : array-like, ndim=1
observed (one-dimensional) data
return_bins : bool, optional
if True, then return the bin edges
Returns
-------
width : float
optimal bin width using Scott's rule
bins : ndarray
bin edges: returned if ``return_bins`` is True
Notes
-----
The optimal bin width is
.. math::
\Delta_b = \frac{3.5\sigma}{n^{1/3}}
where :math:`\sigma` is the standard deviation of the data, and
:math:`n` is the number of data points [1]_.
References
----------
.. [1] Scott, David W. (1979). "On optimal and data-based histograms".
Biometricka 66 (3): 605-610
See Also
--------
knuth_bin_width
freedman_bin_width
bayesian_blocks
histogram | def scott_bin_width(data, return_bins=False):
r"""Return the optimal histogram bin width using Scott's rule.
Scott's rule is a normal reference rule: it minimizes the integrated
mean squared error in the bin approximation under the assumption that the
data is approximately Gaussian.
Parameters
----------
data : array-like, ndim=1
observed (one-dimensional) data
return_bins : bool, optional
if True, then return the bin edges
Returns
-------
width : float
optimal bin width using Scott's rule
bins : ndarray
bin edges: returned if ``return_bins`` is True
Notes
-----
The optimal bin width is
.. math::
\Delta_b = \frac{3.5\sigma}{n^{1/3}}
where :math:`\sigma` is the standard deviation of the data, and
:math:`n` is the number of data points [1]_.
References
----------
.. [1] Scott, David W. (1979). "On optimal and data-based histograms".
Biometricka 66 (3): 605-610
See Also
--------
knuth_bin_width
freedman_bin_width
bayesian_blocks
histogram
"""
data = np.asarray(data)
if data.ndim != 1:
raise ValueError("data should be one-dimensional")
n = data.size
sigma = np.std(data)
dx = 3.5 * sigma / (n ** (1 / 3))
if return_bins:
Nbins = np.ceil((data.max() - data.min()) / dx)
Nbins = max(1, Nbins)
bins = data.min() + dx * np.arange(Nbins + 1)
return dx, bins
else:
return dx |
Return the optimal histogram bin width using the Freedman-Diaconis rule.
The Freedman-Diaconis rule is a normal reference rule like Scott's
rule, but uses rank-based statistics for results which are more robust
to deviations from a normal distribution.
Parameters
----------
data : array-like, ndim=1
observed (one-dimensional) data
return_bins : bool, optional
if True, then return the bin edges
Returns
-------
width : float
optimal bin width using the Freedman-Diaconis rule
bins : ndarray
bin edges: returned if ``return_bins`` is True
Notes
-----
The optimal bin width is
.. math::
\Delta_b = \frac{2(q_{75} - q_{25})}{n^{1/3}}
where :math:`q_{N}` is the :math:`N` percent quartile of the data, and
:math:`n` is the number of data points [1]_.
References
----------
.. [1] D. Freedman & P. Diaconis (1981)
"On the histogram as a density estimator: L2 theory".
Probability Theory and Related Fields 57 (4): 453-476
See Also
--------
knuth_bin_width
scott_bin_width
bayesian_blocks
histogram | def freedman_bin_width(data, return_bins=False):
r"""Return the optimal histogram bin width using the Freedman-Diaconis rule.
The Freedman-Diaconis rule is a normal reference rule like Scott's
rule, but uses rank-based statistics for results which are more robust
to deviations from a normal distribution.
Parameters
----------
data : array-like, ndim=1
observed (one-dimensional) data
return_bins : bool, optional
if True, then return the bin edges
Returns
-------
width : float
optimal bin width using the Freedman-Diaconis rule
bins : ndarray
bin edges: returned if ``return_bins`` is True
Notes
-----
The optimal bin width is
.. math::
\Delta_b = \frac{2(q_{75} - q_{25})}{n^{1/3}}
where :math:`q_{N}` is the :math:`N` percent quartile of the data, and
:math:`n` is the number of data points [1]_.
References
----------
.. [1] D. Freedman & P. Diaconis (1981)
"On the histogram as a density estimator: L2 theory".
Probability Theory and Related Fields 57 (4): 453-476
See Also
--------
knuth_bin_width
scott_bin_width
bayesian_blocks
histogram
"""
data = np.asarray(data)
if data.ndim != 1:
raise ValueError("data should be one-dimensional")
n = data.size
if n < 4:
raise ValueError("data should have more than three entries")
v25, v75 = np.percentile(data, [25, 75])
dx = 2 * (v75 - v25) / (n ** (1 / 3))
if return_bins:
dmin, dmax = data.min(), data.max()
Nbins = max(1, np.ceil((dmax - dmin) / dx))
try:
bins = dmin + dx * np.arange(Nbins + 1)
except ValueError as e:
if "Maximum allowed size exceeded" in str(e):
raise ValueError(
"The inter-quartile range of the data is too small: "
f"failed to construct histogram with {Nbins + 1} bins. "
"Please use another bin method, such as "
'bins="scott"'
)
else: # Something else # pragma: no cover
raise
return dx, bins
else:
return dx |
Return the optimal histogram bin width using Knuth's rule.
Knuth's rule is a fixed-width, Bayesian approach to determining
the optimal bin width of a histogram.
Parameters
----------
data : array-like, ndim=1
observed (one-dimensional) data
return_bins : bool, optional
if True, then return the bin edges
quiet : bool, optional
if True (default) then suppress stdout output from scipy.optimize
Returns
-------
dx : float
optimal bin width. Bins are measured starting at the first data point.
bins : ndarray
bin edges: returned if ``return_bins`` is True
Notes
-----
The optimal number of bins is the value M which maximizes the function
.. math::
F(M|x,I) = n\log(M) + \log\Gamma(\frac{M}{2})
- M\log\Gamma(\frac{1}{2})
- \log\Gamma(\frac{2n+M}{2})
+ \sum_{k=1}^M \log\Gamma(n_k + \frac{1}{2})
where :math:`\Gamma` is the Gamma function, :math:`n` is the number of
data points, :math:`n_k` is the number of measurements in bin :math:`k`
[1]_.
References
----------
.. [1] Knuth, K.H. "Optimal Data-Based Binning for Histograms".
arXiv:0605197, 2006
See Also
--------
freedman_bin_width
scott_bin_width
bayesian_blocks
histogram | def knuth_bin_width(data, return_bins=False, quiet=True):
r"""Return the optimal histogram bin width using Knuth's rule.
Knuth's rule is a fixed-width, Bayesian approach to determining
the optimal bin width of a histogram.
Parameters
----------
data : array-like, ndim=1
observed (one-dimensional) data
return_bins : bool, optional
if True, then return the bin edges
quiet : bool, optional
if True (default) then suppress stdout output from scipy.optimize
Returns
-------
dx : float
optimal bin width. Bins are measured starting at the first data point.
bins : ndarray
bin edges: returned if ``return_bins`` is True
Notes
-----
The optimal number of bins is the value M which maximizes the function
.. math::
F(M|x,I) = n\log(M) + \log\Gamma(\frac{M}{2})
- M\log\Gamma(\frac{1}{2})
- \log\Gamma(\frac{2n+M}{2})
+ \sum_{k=1}^M \log\Gamma(n_k + \frac{1}{2})
where :math:`\Gamma` is the Gamma function, :math:`n` is the number of
data points, :math:`n_k` is the number of measurements in bin :math:`k`
[1]_.
References
----------
.. [1] Knuth, K.H. "Optimal Data-Based Binning for Histograms".
arXiv:0605197, 2006
See Also
--------
freedman_bin_width
scott_bin_width
bayesian_blocks
histogram
"""
# import here because of optional scipy dependency
from scipy import optimize
knuthF = _KnuthF(data)
dx0, bins0 = freedman_bin_width(data, True)
M = optimize.fmin(knuthF, len(bins0), disp=not quiet)[0]
bins = knuthF.bins(M)
dx = bins[1] - bins[0]
if return_bins:
return dx, bins
else:
return dx |
Computes the Bayesian Information Criterion (BIC) given the log of the
likelihood function evaluated at the estimated (or analytically derived)
parameters, the number of parameters, and the number of samples.
The BIC is usually applied to decide whether increasing the number of free
parameters (hence, increasing the model complexity) yields significantly
better fittings. The decision is in favor of the model with the lowest
BIC.
BIC is given as
.. math::
\mathrm{BIC} = k \ln(n) - 2L,
in which :math:`n` is the sample size, :math:`k` is the number of free
parameters, and :math:`L` is the log likelihood function of the model
evaluated at the maximum likelihood estimate (i. e., the parameters for
which L is maximized).
When comparing two models define
:math:`\Delta \mathrm{BIC} = \mathrm{BIC}_h - \mathrm{BIC}_l`, in which
:math:`\mathrm{BIC}_h` is the higher BIC, and :math:`\mathrm{BIC}_l` is
the lower BIC. The higher is :math:`\Delta \mathrm{BIC}` the stronger is
the evidence against the model with higher BIC.
The general rule of thumb is:
:math:`0 < \Delta\mathrm{BIC} \leq 2`: weak evidence that model low is
better
:math:`2 < \Delta\mathrm{BIC} \leq 6`: moderate evidence that model low is
better
:math:`6 < \Delta\mathrm{BIC} \leq 10`: strong evidence that model low is
better
:math:`\Delta\mathrm{BIC} > 10`: very strong evidence that model low is
better
For a detailed explanation, see [1]_ - [5]_.
Parameters
----------
log_likelihood : float
Logarithm of the likelihood function of the model evaluated at the
point of maxima (with respect to the parameter space).
n_params : int
Number of free parameters of the model, i.e., dimension of the
parameter space.
n_samples : int
Number of observations.
Returns
-------
bic : float
Bayesian Information Criterion.
Examples
--------
The following example was originally presented in [1]_. Consider a
Gaussian model (mu, sigma) and a t-Student model (mu, sigma, delta).
In addition, assume that the t model has presented a higher likelihood.
The question that the BIC is proposed to answer is: "Is the increase in
likelihood due to larger number of parameters?"
>>> from astropy.stats.info_theory import bayesian_info_criterion
>>> lnL_g = -176.4
>>> lnL_t = -173.0
>>> n_params_g = 2
>>> n_params_t = 3
>>> n_samples = 100
>>> bic_g = bayesian_info_criterion(lnL_g, n_params_g, n_samples)
>>> bic_t = bayesian_info_criterion(lnL_t, n_params_t, n_samples)
>>> bic_g - bic_t # doctest: +FLOAT_CMP
2.1948298140119391
Therefore, there exist a moderate evidence that the increasing in
likelihood for t-Student model is due to the larger number of parameters.
References
----------
.. [1] Richards, D. Maximum Likelihood Estimation and the Bayesian
Information Criterion.
<https://hea-www.harvard.edu/astrostat/Stat310_0910/dr_20100323_mle.pdf>
.. [2] Wikipedia. Bayesian Information Criterion.
<https://en.wikipedia.org/wiki/Bayesian_information_criterion>
.. [3] Origin Lab. Comparing Two Fitting Functions.
<https://www.originlab.com/doc/Origin-Help/PostFit-CompareFitFunc>
.. [4] Liddle, A. R. Information Criteria for Astrophysical Model
Selection. 2008. <https://arxiv.org/pdf/astro-ph/0701113v2.pdf>
.. [5] Liddle, A. R. How many cosmological parameters? 2008.
<https://arxiv.org/pdf/astro-ph/0401198v3.pdf> | def bayesian_info_criterion(log_likelihood, n_params, n_samples):
r"""Computes the Bayesian Information Criterion (BIC) given the log of the
likelihood function evaluated at the estimated (or analytically derived)
parameters, the number of parameters, and the number of samples.
The BIC is usually applied to decide whether increasing the number of free
parameters (hence, increasing the model complexity) yields significantly
better fittings. The decision is in favor of the model with the lowest
BIC.
BIC is given as
.. math::
\mathrm{BIC} = k \ln(n) - 2L,
in which :math:`n` is the sample size, :math:`k` is the number of free
parameters, and :math:`L` is the log likelihood function of the model
evaluated at the maximum likelihood estimate (i. e., the parameters for
which L is maximized).
When comparing two models define
:math:`\Delta \mathrm{BIC} = \mathrm{BIC}_h - \mathrm{BIC}_l`, in which
:math:`\mathrm{BIC}_h` is the higher BIC, and :math:`\mathrm{BIC}_l` is
the lower BIC. The higher is :math:`\Delta \mathrm{BIC}` the stronger is
the evidence against the model with higher BIC.
The general rule of thumb is:
:math:`0 < \Delta\mathrm{BIC} \leq 2`: weak evidence that model low is
better
:math:`2 < \Delta\mathrm{BIC} \leq 6`: moderate evidence that model low is
better
:math:`6 < \Delta\mathrm{BIC} \leq 10`: strong evidence that model low is
better
:math:`\Delta\mathrm{BIC} > 10`: very strong evidence that model low is
better
For a detailed explanation, see [1]_ - [5]_.
Parameters
----------
log_likelihood : float
Logarithm of the likelihood function of the model evaluated at the
point of maxima (with respect to the parameter space).
n_params : int
Number of free parameters of the model, i.e., dimension of the
parameter space.
n_samples : int
Number of observations.
Returns
-------
bic : float
Bayesian Information Criterion.
Examples
--------
The following example was originally presented in [1]_. Consider a
Gaussian model (mu, sigma) and a t-Student model (mu, sigma, delta).
In addition, assume that the t model has presented a higher likelihood.
The question that the BIC is proposed to answer is: "Is the increase in
likelihood due to larger number of parameters?"
>>> from astropy.stats.info_theory import bayesian_info_criterion
>>> lnL_g = -176.4
>>> lnL_t = -173.0
>>> n_params_g = 2
>>> n_params_t = 3
>>> n_samples = 100
>>> bic_g = bayesian_info_criterion(lnL_g, n_params_g, n_samples)
>>> bic_t = bayesian_info_criterion(lnL_t, n_params_t, n_samples)
>>> bic_g - bic_t # doctest: +FLOAT_CMP
2.1948298140119391
Therefore, there exist a moderate evidence that the increasing in
likelihood for t-Student model is due to the larger number of parameters.
References
----------
.. [1] Richards, D. Maximum Likelihood Estimation and the Bayesian
Information Criterion.
<https://hea-www.harvard.edu/astrostat/Stat310_0910/dr_20100323_mle.pdf>
.. [2] Wikipedia. Bayesian Information Criterion.
<https://en.wikipedia.org/wiki/Bayesian_information_criterion>
.. [3] Origin Lab. Comparing Two Fitting Functions.
<https://www.originlab.com/doc/Origin-Help/PostFit-CompareFitFunc>
.. [4] Liddle, A. R. Information Criteria for Astrophysical Model
Selection. 2008. <https://arxiv.org/pdf/astro-ph/0701113v2.pdf>
.. [5] Liddle, A. R. How many cosmological parameters? 2008.
<https://arxiv.org/pdf/astro-ph/0401198v3.pdf>
"""
return n_params * np.log(n_samples) - 2.0 * log_likelihood |
Computes the Bayesian Information Criterion (BIC) assuming that the
observations come from a Gaussian distribution.
In this case, BIC is given as
.. math::
\mathrm{BIC} = n\ln\left(\dfrac{\mathrm{SSR}}{n}\right) + k\ln(n)
in which :math:`n` is the sample size, :math:`k` is the number of free
parameters and :math:`\mathrm{SSR}` stands for the sum of squared residuals
between model and data.
This is applicable, for instance, when the parameters of a model are
estimated using the least squares statistic. See [1]_ and [2]_.
Parameters
----------
ssr : float
Sum of squared residuals (SSR) between model and data.
n_params : int
Number of free parameters of the model, i.e., dimension of the
parameter space.
n_samples : int
Number of observations.
Returns
-------
bic : float
Examples
--------
Consider the simple 1-D fitting example presented in the Astropy
modeling webpage [3]_. There, two models (Box and Gaussian) were fitted to
a source flux using the least squares statistic. However, the fittings
themselves do not tell much about which model better represents this
hypothetical source. Therefore, we are going to apply to BIC in order to
decide in favor of a model.
>>> import numpy as np
>>> from astropy.modeling import models, fitting
>>> from astropy.stats.info_theory import bayesian_info_criterion_lsq
>>> # Generate fake data
>>> np.random.seed(0)
>>> x = np.linspace(-5., 5., 200)
>>> y = 3 * np.exp(-0.5 * (x - 1.3)**2 / 0.8**2)
>>> y += np.random.normal(0., 0.2, x.shape)
>>> # Fit the data using a Box model.
>>> # Bounds are not really needed but included here to demonstrate usage.
>>> t_init = models.Trapezoid1D(amplitude=1., x_0=0., width=1., slope=0.5,
... bounds={"x_0": (-5., 5.)})
>>> fit_t = fitting.LevMarLSQFitter()
>>> t = fit_t(t_init, x, y)
>>> # Fit the data using a Gaussian
>>> g_init = models.Gaussian1D(amplitude=1., mean=0, stddev=1.)
>>> fit_g = fitting.LevMarLSQFitter()
>>> g = fit_g(g_init, x, y)
>>> # Compute the mean squared errors
>>> ssr_t = np.sum((t(x) - y)*(t(x) - y))
>>> ssr_g = np.sum((g(x) - y)*(g(x) - y))
>>> # Compute the bics
>>> bic_t = bayesian_info_criterion_lsq(ssr_t, 4, x.shape[0])
>>> bic_g = bayesian_info_criterion_lsq(ssr_g, 3, x.shape[0])
>>> bic_t - bic_g # doctest: +SKIP
30.644474706065466
Hence, there is a very strong evidence that the Gaussian model has a
significantly better representation of the data than the Box model. This
is, obviously, expected since the true model is Gaussian.
References
----------
.. [1] Wikipedia. Bayesian Information Criterion.
<https://en.wikipedia.org/wiki/Bayesian_information_criterion>
.. [2] Origin Lab. Comparing Two Fitting Functions.
<https://www.originlab.com/doc/Origin-Help/PostFit-CompareFitFunc>
.. [3] Astropy Models and Fitting
<https://docs.astropy.org/en/stable/modeling> | def bayesian_info_criterion_lsq(ssr, n_params, n_samples):
r"""
Computes the Bayesian Information Criterion (BIC) assuming that the
observations come from a Gaussian distribution.
In this case, BIC is given as
.. math::
\mathrm{BIC} = n\ln\left(\dfrac{\mathrm{SSR}}{n}\right) + k\ln(n)
in which :math:`n` is the sample size, :math:`k` is the number of free
parameters and :math:`\mathrm{SSR}` stands for the sum of squared residuals
between model and data.
This is applicable, for instance, when the parameters of a model are
estimated using the least squares statistic. See [1]_ and [2]_.
Parameters
----------
ssr : float
Sum of squared residuals (SSR) between model and data.
n_params : int
Number of free parameters of the model, i.e., dimension of the
parameter space.
n_samples : int
Number of observations.
Returns
-------
bic : float
Examples
--------
Consider the simple 1-D fitting example presented in the Astropy
modeling webpage [3]_. There, two models (Box and Gaussian) were fitted to
a source flux using the least squares statistic. However, the fittings
themselves do not tell much about which model better represents this
hypothetical source. Therefore, we are going to apply to BIC in order to
decide in favor of a model.
>>> import numpy as np
>>> from astropy.modeling import models, fitting
>>> from astropy.stats.info_theory import bayesian_info_criterion_lsq
>>> # Generate fake data
>>> np.random.seed(0)
>>> x = np.linspace(-5., 5., 200)
>>> y = 3 * np.exp(-0.5 * (x - 1.3)**2 / 0.8**2)
>>> y += np.random.normal(0., 0.2, x.shape)
>>> # Fit the data using a Box model.
>>> # Bounds are not really needed but included here to demonstrate usage.
>>> t_init = models.Trapezoid1D(amplitude=1., x_0=0., width=1., slope=0.5,
... bounds={"x_0": (-5., 5.)})
>>> fit_t = fitting.LevMarLSQFitter()
>>> t = fit_t(t_init, x, y)
>>> # Fit the data using a Gaussian
>>> g_init = models.Gaussian1D(amplitude=1., mean=0, stddev=1.)
>>> fit_g = fitting.LevMarLSQFitter()
>>> g = fit_g(g_init, x, y)
>>> # Compute the mean squared errors
>>> ssr_t = np.sum((t(x) - y)*(t(x) - y))
>>> ssr_g = np.sum((g(x) - y)*(g(x) - y))
>>> # Compute the bics
>>> bic_t = bayesian_info_criterion_lsq(ssr_t, 4, x.shape[0])
>>> bic_g = bayesian_info_criterion_lsq(ssr_g, 3, x.shape[0])
>>> bic_t - bic_g # doctest: +SKIP
30.644474706065466
Hence, there is a very strong evidence that the Gaussian model has a
significantly better representation of the data than the Box model. This
is, obviously, expected since the true model is Gaussian.
References
----------
.. [1] Wikipedia. Bayesian Information Criterion.
<https://en.wikipedia.org/wiki/Bayesian_information_criterion>
.. [2] Origin Lab. Comparing Two Fitting Functions.
<https://www.originlab.com/doc/Origin-Help/PostFit-CompareFitFunc>
.. [3] Astropy Models and Fitting
<https://docs.astropy.org/en/stable/modeling>
"""
return bayesian_info_criterion(
-0.5 * n_samples * np.log(ssr / n_samples), n_params, n_samples
) |
Computes the Akaike Information Criterion (AIC).
Like the Bayesian Information Criterion, the AIC is a measure of
relative fitting quality which is used for fitting evaluation and model
selection. The decision is in favor of the model with the lowest AIC.
AIC is given as
.. math::
\mathrm{AIC} = 2(k - L)
in which :math:`n` is the sample size, :math:`k` is the number of free
parameters, and :math:`L` is the log likelihood function of the model
evaluated at the maximum likelihood estimate (i. e., the parameters for
which L is maximized).
In case that the sample size is not "large enough" a correction is
applied, i.e.
.. math::
\mathrm{AIC} = 2(k - L) + \dfrac{2k(k+1)}{n - k - 1}
Rule of thumb [1]_:
:math:`\Delta\mathrm{AIC}_i = \mathrm{AIC}_i - \mathrm{AIC}_{min}`
:math:`\Delta\mathrm{AIC}_i < 2`: substantial support for model i
:math:`3 < \Delta\mathrm{AIC}_i < 7`: considerably less support for model i
:math:`\Delta\mathrm{AIC}_i > 10`: essentially none support for model i
in which :math:`\mathrm{AIC}_{min}` stands for the lower AIC among the
models which are being compared.
For detailed explanations see [1]_-[6]_.
Parameters
----------
log_likelihood : float
Logarithm of the likelihood function of the model evaluated at the
point of maxima (with respect to the parameter space).
n_params : int
Number of free parameters of the model, i.e., dimension of the
parameter space.
n_samples : int
Number of observations.
Returns
-------
aic : float
Akaike Information Criterion.
Examples
--------
The following example was originally presented in [2]_. Basically, two
models are being compared. One with six parameters (model 1) and another
with five parameters (model 2). Despite of the fact that model 2 has a
lower AIC, we could decide in favor of model 1 since the difference (in
AIC) between them is only about 1.0.
>>> n_samples = 121
>>> lnL1 = -3.54
>>> n1_params = 6
>>> lnL2 = -4.17
>>> n2_params = 5
>>> aic1 = akaike_info_criterion(lnL1, n1_params, n_samples)
>>> aic2 = akaike_info_criterion(lnL2, n2_params, n_samples)
>>> aic1 - aic2 # doctest: +FLOAT_CMP
0.9551029748283746
Therefore, we can strongly support the model 1 with the advantage that
it has more free parameters.
References
----------
.. [1] Cavanaugh, J. E. Model Selection Lecture II: The Akaike
Information Criterion.
<http://machinelearning102.pbworks.com/w/file/fetch/47699383/ms_lec_2_ho.pdf>
.. [2] Mazerolle, M. J. Making sense out of Akaike's Information
Criterion (AIC): its use and interpretation in model selection and
inference from ecological data.
.. [3] Wikipedia. Akaike Information Criterion.
<https://en.wikipedia.org/wiki/Akaike_information_criterion>
.. [4] Origin Lab. Comparing Two Fitting Functions.
<https://www.originlab.com/doc/Origin-Help/PostFit-CompareFitFunc>
.. [5] Liddle, A. R. Information Criteria for Astrophysical Model
Selection. 2008. <https://arxiv.org/pdf/astro-ph/0701113v2.pdf>
.. [6] Liddle, A. R. How many cosmological parameters? 2008.
<https://arxiv.org/pdf/astro-ph/0401198v3.pdf> | def akaike_info_criterion(log_likelihood, n_params, n_samples):
r"""
Computes the Akaike Information Criterion (AIC).
Like the Bayesian Information Criterion, the AIC is a measure of
relative fitting quality which is used for fitting evaluation and model
selection. The decision is in favor of the model with the lowest AIC.
AIC is given as
.. math::
\mathrm{AIC} = 2(k - L)
in which :math:`n` is the sample size, :math:`k` is the number of free
parameters, and :math:`L` is the log likelihood function of the model
evaluated at the maximum likelihood estimate (i. e., the parameters for
which L is maximized).
In case that the sample size is not "large enough" a correction is
applied, i.e.
.. math::
\mathrm{AIC} = 2(k - L) + \dfrac{2k(k+1)}{n - k - 1}
Rule of thumb [1]_:
:math:`\Delta\mathrm{AIC}_i = \mathrm{AIC}_i - \mathrm{AIC}_{min}`
:math:`\Delta\mathrm{AIC}_i < 2`: substantial support for model i
:math:`3 < \Delta\mathrm{AIC}_i < 7`: considerably less support for model i
:math:`\Delta\mathrm{AIC}_i > 10`: essentially none support for model i
in which :math:`\mathrm{AIC}_{min}` stands for the lower AIC among the
models which are being compared.
For detailed explanations see [1]_-[6]_.
Parameters
----------
log_likelihood : float
Logarithm of the likelihood function of the model evaluated at the
point of maxima (with respect to the parameter space).
n_params : int
Number of free parameters of the model, i.e., dimension of the
parameter space.
n_samples : int
Number of observations.
Returns
-------
aic : float
Akaike Information Criterion.
Examples
--------
The following example was originally presented in [2]_. Basically, two
models are being compared. One with six parameters (model 1) and another
with five parameters (model 2). Despite of the fact that model 2 has a
lower AIC, we could decide in favor of model 1 since the difference (in
AIC) between them is only about 1.0.
>>> n_samples = 121
>>> lnL1 = -3.54
>>> n1_params = 6
>>> lnL2 = -4.17
>>> n2_params = 5
>>> aic1 = akaike_info_criterion(lnL1, n1_params, n_samples)
>>> aic2 = akaike_info_criterion(lnL2, n2_params, n_samples)
>>> aic1 - aic2 # doctest: +FLOAT_CMP
0.9551029748283746
Therefore, we can strongly support the model 1 with the advantage that
it has more free parameters.
References
----------
.. [1] Cavanaugh, J. E. Model Selection Lecture II: The Akaike
Information Criterion.
<http://machinelearning102.pbworks.com/w/file/fetch/47699383/ms_lec_2_ho.pdf>
.. [2] Mazerolle, M. J. Making sense out of Akaike's Information
Criterion (AIC): its use and interpretation in model selection and
inference from ecological data.
.. [3] Wikipedia. Akaike Information Criterion.
<https://en.wikipedia.org/wiki/Akaike_information_criterion>
.. [4] Origin Lab. Comparing Two Fitting Functions.
<https://www.originlab.com/doc/Origin-Help/PostFit-CompareFitFunc>
.. [5] Liddle, A. R. Information Criteria for Astrophysical Model
Selection. 2008. <https://arxiv.org/pdf/astro-ph/0701113v2.pdf>
.. [6] Liddle, A. R. How many cosmological parameters? 2008.
<https://arxiv.org/pdf/astro-ph/0401198v3.pdf>
"""
# Correction in case of small number of observations
if n_samples / float(n_params) >= 40.0:
aic = 2.0 * (n_params - log_likelihood)
else:
aic = 2.0 * (n_params - log_likelihood) + 2.0 * n_params * (n_params + 1.0) / (
n_samples - n_params - 1.0
)
return aic |
Computes the Akaike Information Criterion assuming that the observations
are Gaussian distributed.
In this case, AIC is given as
.. math::
\mathrm{AIC} = n\ln\left(\dfrac{\mathrm{SSR}}{n}\right) + 2k
In case that the sample size is not "large enough", a correction is
applied, i.e.
.. math::
\mathrm{AIC} = n\ln\left(\dfrac{\mathrm{SSR}}{n}\right) + 2k +
\dfrac{2k(k+1)}{n-k-1}
in which :math:`n` is the sample size, :math:`k` is the number of free
parameters and :math:`\mathrm{SSR}` stands for the sum of squared residuals
between model and data.
This is applicable, for instance, when the parameters of a model are
estimated using the least squares statistic.
Parameters
----------
ssr : float
Sum of squared residuals (SSR) between model and data.
n_params : int
Number of free parameters of the model, i.e., the dimension of the
parameter space.
n_samples : int
Number of observations.
Returns
-------
aic : float
Akaike Information Criterion.
Examples
--------
This example is based on Astropy Modeling webpage, Compound models
section.
>>> import numpy as np
>>> from astropy.modeling import models, fitting
>>> from astropy.stats.info_theory import akaike_info_criterion_lsq
>>> np.random.seed(42)
>>> # Generate fake data
>>> g1 = models.Gaussian1D(.1, 0, 0.2) # changed this to noise level
>>> g2 = models.Gaussian1D(.1, 0.3, 0.2) # and added another Gaussian
>>> g3 = models.Gaussian1D(2.5, 0.5, 0.1)
>>> x = np.linspace(-1, 1, 200)
>>> y = g1(x) + g2(x) + g3(x) + np.random.normal(0., 0.2, x.shape)
>>> # Fit with three Gaussians
>>> g3_init = (models.Gaussian1D(.1, 0, 0.1)
... + models.Gaussian1D(.1, 0.2, 0.15)
... + models.Gaussian1D(2.4, .4, 0.1))
>>> fitter = fitting.LevMarLSQFitter()
>>> g3_fit = fitter(g3_init, x, y)
>>> # Fit with two Gaussians
>>> g2_init = (models.Gaussian1D(.1, 0, 0.1) +
... models.Gaussian1D(2, 0.5, 0.1))
>>> g2_fit = fitter(g2_init, x, y)
>>> # Fit with only one Gaussian
>>> g1_init = models.Gaussian1D(amplitude=2., mean=0.3, stddev=.5)
>>> g1_fit = fitter(g1_init, x, y)
>>> # Compute the mean squared errors
>>> ssr_g3 = np.sum((g3_fit(x) - y)**2.0)
>>> ssr_g2 = np.sum((g2_fit(x) - y)**2.0)
>>> ssr_g1 = np.sum((g1_fit(x) - y)**2.0)
>>> akaike_info_criterion_lsq(ssr_g3, 9, x.shape[0]) # doctest: +FLOAT_CMP
-634.5257517810961
>>> akaike_info_criterion_lsq(ssr_g2, 6, x.shape[0]) # doctest: +FLOAT_CMP
-662.83834510232043
>>> akaike_info_criterion_lsq(ssr_g1, 3, x.shape[0]) # doctest: +FLOAT_CMP
-647.47312032659499
Hence, from the AIC values, we would prefer to choose the model g2_fit.
However, we can considerably support the model g3_fit, since the
difference in AIC is about 2.4. We should reject the model g1_fit.
References
----------
.. [1] Akaike Information Criterion.
<https://en.wikipedia.org/wiki/Akaike_information_criterion>
.. [2] Origin Lab. Comparing Two Fitting Functions.
<https://www.originlab.com/doc/Origin-Help/PostFit-CompareFitFunc> | def akaike_info_criterion_lsq(ssr, n_params, n_samples):
r"""
Computes the Akaike Information Criterion assuming that the observations
are Gaussian distributed.
In this case, AIC is given as
.. math::
\mathrm{AIC} = n\ln\left(\dfrac{\mathrm{SSR}}{n}\right) + 2k
In case that the sample size is not "large enough", a correction is
applied, i.e.
.. math::
\mathrm{AIC} = n\ln\left(\dfrac{\mathrm{SSR}}{n}\right) + 2k +
\dfrac{2k(k+1)}{n-k-1}
in which :math:`n` is the sample size, :math:`k` is the number of free
parameters and :math:`\mathrm{SSR}` stands for the sum of squared residuals
between model and data.
This is applicable, for instance, when the parameters of a model are
estimated using the least squares statistic.
Parameters
----------
ssr : float
Sum of squared residuals (SSR) between model and data.
n_params : int
Number of free parameters of the model, i.e., the dimension of the
parameter space.
n_samples : int
Number of observations.
Returns
-------
aic : float
Akaike Information Criterion.
Examples
--------
This example is based on Astropy Modeling webpage, Compound models
section.
>>> import numpy as np
>>> from astropy.modeling import models, fitting
>>> from astropy.stats.info_theory import akaike_info_criterion_lsq
>>> np.random.seed(42)
>>> # Generate fake data
>>> g1 = models.Gaussian1D(.1, 0, 0.2) # changed this to noise level
>>> g2 = models.Gaussian1D(.1, 0.3, 0.2) # and added another Gaussian
>>> g3 = models.Gaussian1D(2.5, 0.5, 0.1)
>>> x = np.linspace(-1, 1, 200)
>>> y = g1(x) + g2(x) + g3(x) + np.random.normal(0., 0.2, x.shape)
>>> # Fit with three Gaussians
>>> g3_init = (models.Gaussian1D(.1, 0, 0.1)
... + models.Gaussian1D(.1, 0.2, 0.15)
... + models.Gaussian1D(2.4, .4, 0.1))
>>> fitter = fitting.LevMarLSQFitter()
>>> g3_fit = fitter(g3_init, x, y)
>>> # Fit with two Gaussians
>>> g2_init = (models.Gaussian1D(.1, 0, 0.1) +
... models.Gaussian1D(2, 0.5, 0.1))
>>> g2_fit = fitter(g2_init, x, y)
>>> # Fit with only one Gaussian
>>> g1_init = models.Gaussian1D(amplitude=2., mean=0.3, stddev=.5)
>>> g1_fit = fitter(g1_init, x, y)
>>> # Compute the mean squared errors
>>> ssr_g3 = np.sum((g3_fit(x) - y)**2.0)
>>> ssr_g2 = np.sum((g2_fit(x) - y)**2.0)
>>> ssr_g1 = np.sum((g1_fit(x) - y)**2.0)
>>> akaike_info_criterion_lsq(ssr_g3, 9, x.shape[0]) # doctest: +FLOAT_CMP
-634.5257517810961
>>> akaike_info_criterion_lsq(ssr_g2, 6, x.shape[0]) # doctest: +FLOAT_CMP
-662.83834510232043
>>> akaike_info_criterion_lsq(ssr_g1, 3, x.shape[0]) # doctest: +FLOAT_CMP
-647.47312032659499
Hence, from the AIC values, we would prefer to choose the model g2_fit.
However, we can considerably support the model g3_fit, since the
difference in AIC is about 2.4. We should reject the model g1_fit.
References
----------
.. [1] Akaike Information Criterion.
<https://en.wikipedia.org/wiki/Akaike_information_criterion>
.. [2] Origin Lab. Comparing Two Fitting Functions.
<https://www.originlab.com/doc/Origin-Help/PostFit-CompareFitFunc>
"""
return akaike_info_criterion(
-0.5 * n_samples * np.log(ssr / n_samples), n_params, n_samples
) |
Performs jackknife resampling on numpy arrays.
Jackknife resampling is a technique to generate 'n' deterministic samples
of size 'n-1' from a measured sample of size 'n'. Basically, the i-th
sample, (1<=i<=n), is generated by means of removing the i-th measurement
of the original sample. Like the bootstrap resampling, this statistical
technique finds applications in estimating variance, bias, and confidence
intervals.
Parameters
----------
data : ndarray
Original sample (1-D array) from which the jackknife resamples will be
generated.
Returns
-------
resamples : ndarray
The i-th row is the i-th jackknife sample, i.e., the original sample
with the i-th measurement deleted.
References
----------
.. [1] McIntosh, Avery. "The Jackknife Estimation Method".
<https://arxiv.org/abs/1606.00497>
.. [2] Efron, Bradley. "The Jackknife, the Bootstrap, and other
Resampling Plans". Technical Report No. 63, Division of Biostatistics,
Stanford University, December, 1980.
.. [3] Jackknife resampling <https://en.wikipedia.org/wiki/Jackknife_resampling> | def jackknife_resampling(data):
"""Performs jackknife resampling on numpy arrays.
Jackknife resampling is a technique to generate 'n' deterministic samples
of size 'n-1' from a measured sample of size 'n'. Basically, the i-th
sample, (1<=i<=n), is generated by means of removing the i-th measurement
of the original sample. Like the bootstrap resampling, this statistical
technique finds applications in estimating variance, bias, and confidence
intervals.
Parameters
----------
data : ndarray
Original sample (1-D array) from which the jackknife resamples will be
generated.
Returns
-------
resamples : ndarray
The i-th row is the i-th jackknife sample, i.e., the original sample
with the i-th measurement deleted.
References
----------
.. [1] McIntosh, Avery. "The Jackknife Estimation Method".
<https://arxiv.org/abs/1606.00497>
.. [2] Efron, Bradley. "The Jackknife, the Bootstrap, and other
Resampling Plans". Technical Report No. 63, Division of Biostatistics,
Stanford University, December, 1980.
.. [3] Jackknife resampling <https://en.wikipedia.org/wiki/Jackknife_resampling>
"""
n = data.shape[0]
if n <= 0:
raise ValueError("data must contain at least one measurement.")
resamples = np.empty([n, n - 1])
for i in range(n):
resamples[i] = np.delete(data, i)
return resamples |
Performs jackknife estimation on the basis of jackknife resamples.
This function requires `SciPy <https://www.scipy.org/>`_ to be installed.
Parameters
----------
data : ndarray
Original sample (1-D array).
statistic : function
Any function (or vector of functions) on the basis of the measured
data, e.g, sample mean, sample variance, etc. The jackknife estimate of
this statistic will be returned.
confidence_level : float, optional
Confidence level for the confidence interval of the Jackknife estimate.
Must be a real-valued number in (0,1). Default value is 0.95.
Returns
-------
estimate : float or `~numpy.ndarray`
The i-th element is the bias-corrected "jackknifed" estimate.
bias : float or `~numpy.ndarray`
The i-th element is the jackknife bias.
std_err : float or `~numpy.ndarray`
The i-th element is the jackknife standard error.
conf_interval : ndarray
If ``statistic`` is single-valued, the first and second elements are
the lower and upper bounds, respectively. If ``statistic`` is
vector-valued, each column corresponds to the confidence interval for
each component of ``statistic``. The first and second rows contain the
lower and upper bounds, respectively.
Examples
--------
1. Obtain Jackknife resamples:
>>> import numpy as np
>>> from astropy.stats import jackknife_resampling
>>> from astropy.stats import jackknife_stats
>>> data = np.array([1,2,3,4,5,6,7,8,9,0])
>>> resamples = jackknife_resampling(data)
>>> resamples
array([[2., 3., 4., 5., 6., 7., 8., 9., 0.],
[1., 3., 4., 5., 6., 7., 8., 9., 0.],
[1., 2., 4., 5., 6., 7., 8., 9., 0.],
[1., 2., 3., 5., 6., 7., 8., 9., 0.],
[1., 2., 3., 4., 6., 7., 8., 9., 0.],
[1., 2., 3., 4., 5., 7., 8., 9., 0.],
[1., 2., 3., 4., 5., 6., 8., 9., 0.],
[1., 2., 3., 4., 5., 6., 7., 9., 0.],
[1., 2., 3., 4., 5., 6., 7., 8., 0.],
[1., 2., 3., 4., 5., 6., 7., 8., 9.]])
>>> resamples.shape
(10, 9)
2. Obtain Jackknife estimate for the mean, its bias, its standard error,
and its 95% confidence interval:
>>> test_statistic = np.mean
>>> estimate, bias, stderr, conf_interval = jackknife_stats(
... data, test_statistic, 0.95)
>>> estimate
4.5
>>> bias
0.0
>>> stderr # doctest: +FLOAT_CMP
0.95742710775633832
>>> conf_interval
array([2.62347735, 6.37652265])
3. Example for two estimates
>>> test_statistic = lambda x: (np.mean(x), np.var(x))
>>> estimate, bias, stderr, conf_interval = jackknife_stats(
... data, test_statistic, 0.95)
>>> estimate
array([4.5 , 9.16666667])
>>> bias
array([ 0. , -0.91666667])
>>> stderr
array([0.95742711, 2.69124476])
>>> conf_interval
array([[ 2.62347735, 3.89192387],
[ 6.37652265, 14.44140947]])
IMPORTANT: Note that confidence intervals are given as columns | def jackknife_stats(data, statistic, confidence_level=0.95):
"""Performs jackknife estimation on the basis of jackknife resamples.
This function requires `SciPy <https://www.scipy.org/>`_ to be installed.
Parameters
----------
data : ndarray
Original sample (1-D array).
statistic : function
Any function (or vector of functions) on the basis of the measured
data, e.g, sample mean, sample variance, etc. The jackknife estimate of
this statistic will be returned.
confidence_level : float, optional
Confidence level for the confidence interval of the Jackknife estimate.
Must be a real-valued number in (0,1). Default value is 0.95.
Returns
-------
estimate : float or `~numpy.ndarray`
The i-th element is the bias-corrected "jackknifed" estimate.
bias : float or `~numpy.ndarray`
The i-th element is the jackknife bias.
std_err : float or `~numpy.ndarray`
The i-th element is the jackknife standard error.
conf_interval : ndarray
If ``statistic`` is single-valued, the first and second elements are
the lower and upper bounds, respectively. If ``statistic`` is
vector-valued, each column corresponds to the confidence interval for
each component of ``statistic``. The first and second rows contain the
lower and upper bounds, respectively.
Examples
--------
1. Obtain Jackknife resamples:
>>> import numpy as np
>>> from astropy.stats import jackknife_resampling
>>> from astropy.stats import jackknife_stats
>>> data = np.array([1,2,3,4,5,6,7,8,9,0])
>>> resamples = jackknife_resampling(data)
>>> resamples
array([[2., 3., 4., 5., 6., 7., 8., 9., 0.],
[1., 3., 4., 5., 6., 7., 8., 9., 0.],
[1., 2., 4., 5., 6., 7., 8., 9., 0.],
[1., 2., 3., 5., 6., 7., 8., 9., 0.],
[1., 2., 3., 4., 6., 7., 8., 9., 0.],
[1., 2., 3., 4., 5., 7., 8., 9., 0.],
[1., 2., 3., 4., 5., 6., 8., 9., 0.],
[1., 2., 3., 4., 5., 6., 7., 9., 0.],
[1., 2., 3., 4., 5., 6., 7., 8., 0.],
[1., 2., 3., 4., 5., 6., 7., 8., 9.]])
>>> resamples.shape
(10, 9)
2. Obtain Jackknife estimate for the mean, its bias, its standard error,
and its 95% confidence interval:
>>> test_statistic = np.mean
>>> estimate, bias, stderr, conf_interval = jackknife_stats(
... data, test_statistic, 0.95)
>>> estimate
4.5
>>> bias
0.0
>>> stderr # doctest: +FLOAT_CMP
0.95742710775633832
>>> conf_interval
array([2.62347735, 6.37652265])
3. Example for two estimates
>>> test_statistic = lambda x: (np.mean(x), np.var(x))
>>> estimate, bias, stderr, conf_interval = jackknife_stats(
... data, test_statistic, 0.95)
>>> estimate
array([4.5 , 9.16666667])
>>> bias
array([ 0. , -0.91666667])
>>> stderr
array([0.95742711, 2.69124476])
>>> conf_interval
array([[ 2.62347735, 3.89192387],
[ 6.37652265, 14.44140947]])
IMPORTANT: Note that confidence intervals are given as columns
"""
# jackknife confidence interval
if not (0 < confidence_level < 1):
raise ValueError("confidence level must be in (0, 1).")
# make sure original data is proper
n = data.shape[0]
if n <= 0:
raise ValueError("data must contain at least one measurement.")
# Only import scipy if inputs are valid
from scipy.special import erfinv
resamples = jackknife_resampling(data)
stat_data = statistic(data)
jack_stat = np.apply_along_axis(statistic, 1, resamples)
mean_jack_stat = np.mean(jack_stat, axis=0)
# jackknife bias
bias = (n - 1) * (mean_jack_stat - stat_data)
# jackknife standard error
std_err = np.sqrt(
(n - 1)
* np.mean((jack_stat - mean_jack_stat) * (jack_stat - mean_jack_stat), axis=0)
)
# bias-corrected "jackknifed estimate"
estimate = stat_data - bias
z_score = np.sqrt(2.0) * erfinv(confidence_level)
conf_interval = estimate + z_score * np.array((-std_err, std_err))
return estimate, bias, std_err, conf_interval |
Bottleneck can only take integer axis, not tuple, so this function
takes all the axes to be operated on and combines them into the
first dimension of the array so that we can then use axis=0. | def _move_tuple_axes_first(array, axis):
"""
Bottleneck can only take integer axis, not tuple, so this function
takes all the axes to be operated on and combines them into the
first dimension of the array so that we can then use axis=0.
"""
# Figure out how many axes we are operating over
naxis = len(axis)
# Add remaining axes to the axis tuple
axis += tuple(i for i in range(array.ndim) if i not in axis)
# The new position of each axis is just in order
destination = tuple(range(array.ndim))
# Reorder the array so that the axes being operated on are at the
# beginning
array_new = np.moveaxis(array, axis, destination)
# Collapse the dimensions being operated on into a single dimension
# so that we can then use axis=0 with the bottleneck functions
array_new = array_new.reshape((-1,) + array_new.shape[naxis:])
return array_new |
Bottleneck nanmean function that handle tuple axis. | def _nanmean(array, axis=None):
"""Bottleneck nanmean function that handle tuple axis."""
if isinstance(axis, tuple):
array = _move_tuple_axes_first(array, axis=axis)
axis = 0
if isinstance(array, Quantity):
return array.__array_wrap__(bottleneck.nanmean(array, axis=axis))
else:
return bottleneck.nanmean(array, axis=axis) |
Bottleneck nanmedian function that handle tuple axis. | def _nanmedian(array, axis=None):
"""Bottleneck nanmedian function that handle tuple axis."""
if isinstance(axis, tuple):
array = _move_tuple_axes_first(array, axis=axis)
axis = 0
if isinstance(array, Quantity):
return array.__array_wrap__(bottleneck.nanmedian(array, axis=axis))
else:
return bottleneck.nanmedian(array, axis=axis) |
Bottleneck nanstd function that handle tuple axis. | def _nanstd(array, axis=None, ddof=0):
"""Bottleneck nanstd function that handle tuple axis."""
if isinstance(axis, tuple):
array = _move_tuple_axes_first(array, axis=axis)
axis = 0
if isinstance(array, Quantity):
return array.__array_wrap__(bottleneck.nanstd(array, axis=axis, ddof=ddof))
else:
return bottleneck.nanstd(array, axis=axis, ddof=ddof) |
mad_std function that ignores NaNs by default. | def _nanmadstd(array, axis=None):
"""mad_std function that ignores NaNs by default."""
return mad_std(array, axis=axis, ignore_nan=True) |
Perform sigma-clipping on the provided data.
The data will be iterated over, each time rejecting values that are
less or more than a specified number of standard deviations from a
center value.
Clipped (rejected) pixels are those where::
data < center - (sigma_lower * std)
data > center + (sigma_upper * std)
where::
center = cenfunc(data [, axis=])
std = stdfunc(data [, axis=])
Invalid data values (i.e., NaN or inf) are automatically clipped.
For an object-oriented interface to sigma clipping, see
:class:`SigmaClip`.
.. note::
`scipy.stats.sigmaclip` provides a subset of the functionality
in this class. Also, its input data cannot be a masked array
and it does not handle data that contains invalid values (i.e.,
NaN or inf). Also note that it uses the mean as the centering
function. The equivalent settings to `scipy.stats.sigmaclip`
are::
sigma_clip(sigma=4., cenfunc='mean', maxiters=None, axis=None,
... masked=False, return_bounds=True)
Parameters
----------
data : array-like or `~numpy.ma.MaskedArray`
The data to be sigma clipped.
sigma : float, optional
The number of standard deviations to use for both the lower
and upper clipping limit. These limits are overridden by
``sigma_lower`` and ``sigma_upper``, if input. The default is 3.
sigma_lower : float or None, optional
The number of standard deviations to use as the lower bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
sigma_upper : float or None, optional
The number of standard deviations to use as the upper bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
maxiters : int or None, optional
The maximum number of sigma-clipping iterations to perform or
`None` to clip until convergence is achieved (i.e., iterate
until the last iteration clips nothing). If convergence is
achieved prior to ``maxiters`` iterations, the clipping
iterations will stop. The default is 5.
cenfunc : {'median', 'mean'} or callable, optional
The statistic or callable function/object used to compute
the center value for the clipping. If using a callable
function/object and the ``axis`` keyword is used, then it must
be able to ignore NaNs (e.g., `numpy.nanmean`) and it must have
an ``axis`` keyword to return an array with axis dimension(s)
removed. The default is ``'median'``.
stdfunc : {'std', 'mad_std'} or callable, optional
The statistic or callable function/object used to compute the
standard deviation about the center value. If using a callable
function/object and the ``axis`` keyword is used, then it must
be able to ignore NaNs (e.g., `numpy.nanstd`) and it must have
an ``axis`` keyword to return an array with axis dimension(s)
removed. The default is ``'std'``.
axis : None or int or tuple of int, optional
The axis or axes along which to sigma clip the data. If `None`,
then the flattened data will be used. ``axis`` is passed to the
``cenfunc`` and ``stdfunc``. The default is `None`.
masked : bool, optional
If `True`, then a `~numpy.ma.MaskedArray` is returned, where
the mask is `True` for clipped values. If `False`, then a
`~numpy.ndarray` is returned. The default is `True`.
return_bounds : bool, optional
If `True`, then the minimum and maximum clipping bounds are also
returned.
copy : bool, optional
If `True`, then the ``data`` array will be copied. If `False`
and ``masked=True``, then the returned masked array data will
contain the same array as the input ``data`` (if ``data`` is a
`~numpy.ndarray` or `~numpy.ma.MaskedArray`). If `False` and
``masked=False``, the input data is modified in-place. The
default is `True`.
grow : float or `False`, optional
Radius within which to mask the neighbouring pixels of those
that fall outwith the clipping limits (only applied along
``axis``, if specified). As an example, for a 2D image a value
of 1 will mask the nearest pixels in a cross pattern around each
deviant pixel, while 1.5 will also reject the nearest diagonal
neighbours and so on.
Returns
-------
result : array-like
If ``masked=True``, then a `~numpy.ma.MaskedArray` is returned,
where the mask is `True` for clipped values and where the input
mask was `True`.
If ``masked=False``, then a `~numpy.ndarray` is returned.
If ``return_bounds=True``, then in addition to the masked array
or array above, the minimum and maximum clipping bounds are
returned.
If ``masked=False`` and ``axis=None``, then the output array
is a flattened 1D `~numpy.ndarray` where the clipped values
have been removed. If ``return_bounds=True`` then the returned
minimum and maximum thresholds are scalars.
If ``masked=False`` and ``axis`` is specified, then the output
`~numpy.ndarray` will have the same shape as the input ``data``
and contain ``np.nan`` where values were clipped. If the input
``data`` was a masked array, then the output `~numpy.ndarray`
will also contain ``np.nan`` where the input mask was `True`.
If ``return_bounds=True`` then the returned minimum and maximum
clipping thresholds will be be `~numpy.ndarray`\s.
See Also
--------
SigmaClip, sigma_clipped_stats
Notes
-----
The best performance will typically be obtained by setting
``cenfunc`` and ``stdfunc`` to one of the built-in functions
specified as as string. If one of the options is set to a string
while the other has a custom callable, you may in some cases see
better performance if you have the `bottleneck`_ package installed.
.. _bottleneck: https://github.com/pydata/bottleneck
Examples
--------
This example uses a data array of random variates from a Gaussian
distribution. We clip all points that are more than 2 sample
standard deviations from the median. The result is a masked array,
where the mask is `True` for clipped data::
>>> from astropy.stats import sigma_clip
>>> from numpy.random import randn
>>> randvar = randn(10000)
>>> filtered_data = sigma_clip(randvar, sigma=2, maxiters=5)
This example clips all points that are more than 3 sigma relative
to the sample *mean*, clips until convergence, returns an unmasked
`~numpy.ndarray`, and does not copy the data::
>>> from astropy.stats import sigma_clip
>>> from numpy.random import randn
>>> from numpy import mean
>>> randvar = randn(10000)
>>> filtered_data = sigma_clip(randvar, sigma=3, maxiters=None,
... cenfunc=mean, masked=False, copy=False)
This example sigma clips along one axis::
>>> from astropy.stats import sigma_clip
>>> from numpy.random import normal
>>> from numpy import arange, diag, ones
>>> data = arange(5) + normal(0., 0.05, (5, 5)) + diag(ones(5))
>>> filtered_data = sigma_clip(data, sigma=2.3, axis=0)
Note that along the other axis, no points would be clipped, as the
standard deviation is higher. | def sigma_clip(
data,
sigma=3,
sigma_lower=None,
sigma_upper=None,
maxiters=5,
cenfunc="median",
stdfunc="std",
axis=None,
masked=True,
return_bounds=False,
copy=True,
grow=False,
):
"""
Perform sigma-clipping on the provided data.
The data will be iterated over, each time rejecting values that are
less or more than a specified number of standard deviations from a
center value.
Clipped (rejected) pixels are those where::
data < center - (sigma_lower * std)
data > center + (sigma_upper * std)
where::
center = cenfunc(data [, axis=])
std = stdfunc(data [, axis=])
Invalid data values (i.e., NaN or inf) are automatically clipped.
For an object-oriented interface to sigma clipping, see
:class:`SigmaClip`.
.. note::
`scipy.stats.sigmaclip` provides a subset of the functionality
in this class. Also, its input data cannot be a masked array
and it does not handle data that contains invalid values (i.e.,
NaN or inf). Also note that it uses the mean as the centering
function. The equivalent settings to `scipy.stats.sigmaclip`
are::
sigma_clip(sigma=4., cenfunc='mean', maxiters=None, axis=None,
... masked=False, return_bounds=True)
Parameters
----------
data : array-like or `~numpy.ma.MaskedArray`
The data to be sigma clipped.
sigma : float, optional
The number of standard deviations to use for both the lower
and upper clipping limit. These limits are overridden by
``sigma_lower`` and ``sigma_upper``, if input. The default is 3.
sigma_lower : float or None, optional
The number of standard deviations to use as the lower bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
sigma_upper : float or None, optional
The number of standard deviations to use as the upper bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
maxiters : int or None, optional
The maximum number of sigma-clipping iterations to perform or
`None` to clip until convergence is achieved (i.e., iterate
until the last iteration clips nothing). If convergence is
achieved prior to ``maxiters`` iterations, the clipping
iterations will stop. The default is 5.
cenfunc : {'median', 'mean'} or callable, optional
The statistic or callable function/object used to compute
the center value for the clipping. If using a callable
function/object and the ``axis`` keyword is used, then it must
be able to ignore NaNs (e.g., `numpy.nanmean`) and it must have
an ``axis`` keyword to return an array with axis dimension(s)
removed. The default is ``'median'``.
stdfunc : {'std', 'mad_std'} or callable, optional
The statistic or callable function/object used to compute the
standard deviation about the center value. If using a callable
function/object and the ``axis`` keyword is used, then it must
be able to ignore NaNs (e.g., `numpy.nanstd`) and it must have
an ``axis`` keyword to return an array with axis dimension(s)
removed. The default is ``'std'``.
axis : None or int or tuple of int, optional
The axis or axes along which to sigma clip the data. If `None`,
then the flattened data will be used. ``axis`` is passed to the
``cenfunc`` and ``stdfunc``. The default is `None`.
masked : bool, optional
If `True`, then a `~numpy.ma.MaskedArray` is returned, where
the mask is `True` for clipped values. If `False`, then a
`~numpy.ndarray` is returned. The default is `True`.
return_bounds : bool, optional
If `True`, then the minimum and maximum clipping bounds are also
returned.
copy : bool, optional
If `True`, then the ``data`` array will be copied. If `False`
and ``masked=True``, then the returned masked array data will
contain the same array as the input ``data`` (if ``data`` is a
`~numpy.ndarray` or `~numpy.ma.MaskedArray`). If `False` and
``masked=False``, the input data is modified in-place. The
default is `True`.
grow : float or `False`, optional
Radius within which to mask the neighbouring pixels of those
that fall outwith the clipping limits (only applied along
``axis``, if specified). As an example, for a 2D image a value
of 1 will mask the nearest pixels in a cross pattern around each
deviant pixel, while 1.5 will also reject the nearest diagonal
neighbours and so on.
Returns
-------
result : array-like
If ``masked=True``, then a `~numpy.ma.MaskedArray` is returned,
where the mask is `True` for clipped values and where the input
mask was `True`.
If ``masked=False``, then a `~numpy.ndarray` is returned.
If ``return_bounds=True``, then in addition to the masked array
or array above, the minimum and maximum clipping bounds are
returned.
If ``masked=False`` and ``axis=None``, then the output array
is a flattened 1D `~numpy.ndarray` where the clipped values
have been removed. If ``return_bounds=True`` then the returned
minimum and maximum thresholds are scalars.
If ``masked=False`` and ``axis`` is specified, then the output
`~numpy.ndarray` will have the same shape as the input ``data``
and contain ``np.nan`` where values were clipped. If the input
``data`` was a masked array, then the output `~numpy.ndarray`
will also contain ``np.nan`` where the input mask was `True`.
If ``return_bounds=True`` then the returned minimum and maximum
clipping thresholds will be be `~numpy.ndarray`\\s.
See Also
--------
SigmaClip, sigma_clipped_stats
Notes
-----
The best performance will typically be obtained by setting
``cenfunc`` and ``stdfunc`` to one of the built-in functions
specified as as string. If one of the options is set to a string
while the other has a custom callable, you may in some cases see
better performance if you have the `bottleneck`_ package installed.
.. _bottleneck: https://github.com/pydata/bottleneck
Examples
--------
This example uses a data array of random variates from a Gaussian
distribution. We clip all points that are more than 2 sample
standard deviations from the median. The result is a masked array,
where the mask is `True` for clipped data::
>>> from astropy.stats import sigma_clip
>>> from numpy.random import randn
>>> randvar = randn(10000)
>>> filtered_data = sigma_clip(randvar, sigma=2, maxiters=5)
This example clips all points that are more than 3 sigma relative
to the sample *mean*, clips until convergence, returns an unmasked
`~numpy.ndarray`, and does not copy the data::
>>> from astropy.stats import sigma_clip
>>> from numpy.random import randn
>>> from numpy import mean
>>> randvar = randn(10000)
>>> filtered_data = sigma_clip(randvar, sigma=3, maxiters=None,
... cenfunc=mean, masked=False, copy=False)
This example sigma clips along one axis::
>>> from astropy.stats import sigma_clip
>>> from numpy.random import normal
>>> from numpy import arange, diag, ones
>>> data = arange(5) + normal(0., 0.05, (5, 5)) + diag(ones(5))
>>> filtered_data = sigma_clip(data, sigma=2.3, axis=0)
Note that along the other axis, no points would be clipped, as the
standard deviation is higher.
"""
sigclip = SigmaClip(
sigma=sigma,
sigma_lower=sigma_lower,
sigma_upper=sigma_upper,
maxiters=maxiters,
cenfunc=cenfunc,
stdfunc=stdfunc,
grow=grow,
)
return sigclip(
data, axis=axis, masked=masked, return_bounds=return_bounds, copy=copy
) |
Calculate sigma-clipped statistics on the provided data.
Parameters
----------
data : array-like or `~numpy.ma.MaskedArray`
Data array or object that can be converted to an array.
mask : `numpy.ndarray` (bool), optional
A boolean mask with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Masked pixels are excluded when computing the statistics.
mask_value : float, optional
A data value (e.g., ``0.0``) that is ignored when computing the
statistics. ``mask_value`` will be masked in addition to any
input ``mask``.
sigma : float, optional
The number of standard deviations to use for both the lower
and upper clipping limit. These limits are overridden by
``sigma_lower`` and ``sigma_upper``, if input. The default is 3.
sigma_lower : float or None, optional
The number of standard deviations to use as the lower bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
sigma_upper : float or None, optional
The number of standard deviations to use as the upper bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
maxiters : int or None, optional
The maximum number of sigma-clipping iterations to perform or
`None` to clip until convergence is achieved (i.e., iterate
until the last iteration clips nothing). If convergence is
achieved prior to ``maxiters`` iterations, the clipping
iterations will stop. The default is 5.
cenfunc : {'median', 'mean'} or callable, optional
The statistic or callable function/object used to compute
the center value for the clipping. If using a callable
function/object and the ``axis`` keyword is used, then it must
be able to ignore NaNs (e.g., `numpy.nanmean`) and it must have
an ``axis`` keyword to return an array with axis dimension(s)
removed. The default is ``'median'``.
stdfunc : {'std', 'mad_std'} or callable, optional
The statistic or callable function/object used to compute the
standard deviation about the center value. If using a callable
function/object and the ``axis`` keyword is used, then it must
be able to ignore NaNs (e.g., `numpy.nanstd`) and it must have
an ``axis`` keyword to return an array with axis dimension(s)
removed. The default is ``'std'``.
std_ddof : int, optional
The delta degrees of freedom for the standard deviation
calculation. The divisor used in the calculation is ``N -
std_ddof``, where ``N`` represents the number of elements. The
default is 0.
axis : None or int or tuple of int, optional
The axis or axes along which to sigma clip the data. If `None`,
then the flattened data will be used. ``axis`` is passed to the
``cenfunc`` and ``stdfunc``. The default is `None`.
grow : float or `False`, optional
Radius within which to mask the neighbouring pixels of those
that fall outwith the clipping limits (only applied along
``axis``, if specified). As an example, for a 2D image a value
of 1 will mask the nearest pixels in a cross pattern around each
deviant pixel, while 1.5 will also reject the nearest diagonal
neighbours and so on.
Notes
-----
The best performance will typically be obtained by setting
``cenfunc`` and ``stdfunc`` to one of the built-in functions
specified as as string. If one of the options is set to a string
while the other has a custom callable, you may in some cases see
better performance if you have the `bottleneck`_ package installed.
.. _bottleneck: https://github.com/pydata/bottleneck
Returns
-------
mean, median, stddev : float
The mean, median, and standard deviation of the sigma-clipped
data.
See Also
--------
SigmaClip, sigma_clip | def sigma_clipped_stats(
data,
mask=None,
mask_value=None,
sigma=3.0,
sigma_lower=None,
sigma_upper=None,
maxiters=5,
cenfunc="median",
stdfunc="std",
std_ddof=0,
axis=None,
grow=False,
):
"""
Calculate sigma-clipped statistics on the provided data.
Parameters
----------
data : array-like or `~numpy.ma.MaskedArray`
Data array or object that can be converted to an array.
mask : `numpy.ndarray` (bool), optional
A boolean mask with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Masked pixels are excluded when computing the statistics.
mask_value : float, optional
A data value (e.g., ``0.0``) that is ignored when computing the
statistics. ``mask_value`` will be masked in addition to any
input ``mask``.
sigma : float, optional
The number of standard deviations to use for both the lower
and upper clipping limit. These limits are overridden by
``sigma_lower`` and ``sigma_upper``, if input. The default is 3.
sigma_lower : float or None, optional
The number of standard deviations to use as the lower bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
sigma_upper : float or None, optional
The number of standard deviations to use as the upper bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
maxiters : int or None, optional
The maximum number of sigma-clipping iterations to perform or
`None` to clip until convergence is achieved (i.e., iterate
until the last iteration clips nothing). If convergence is
achieved prior to ``maxiters`` iterations, the clipping
iterations will stop. The default is 5.
cenfunc : {'median', 'mean'} or callable, optional
The statistic or callable function/object used to compute
the center value for the clipping. If using a callable
function/object and the ``axis`` keyword is used, then it must
be able to ignore NaNs (e.g., `numpy.nanmean`) and it must have
an ``axis`` keyword to return an array with axis dimension(s)
removed. The default is ``'median'``.
stdfunc : {'std', 'mad_std'} or callable, optional
The statistic or callable function/object used to compute the
standard deviation about the center value. If using a callable
function/object and the ``axis`` keyword is used, then it must
be able to ignore NaNs (e.g., `numpy.nanstd`) and it must have
an ``axis`` keyword to return an array with axis dimension(s)
removed. The default is ``'std'``.
std_ddof : int, optional
The delta degrees of freedom for the standard deviation
calculation. The divisor used in the calculation is ``N -
std_ddof``, where ``N`` represents the number of elements. The
default is 0.
axis : None or int or tuple of int, optional
The axis or axes along which to sigma clip the data. If `None`,
then the flattened data will be used. ``axis`` is passed to the
``cenfunc`` and ``stdfunc``. The default is `None`.
grow : float or `False`, optional
Radius within which to mask the neighbouring pixels of those
that fall outwith the clipping limits (only applied along
``axis``, if specified). As an example, for a 2D image a value
of 1 will mask the nearest pixels in a cross pattern around each
deviant pixel, while 1.5 will also reject the nearest diagonal
neighbours and so on.
Notes
-----
The best performance will typically be obtained by setting
``cenfunc`` and ``stdfunc`` to one of the built-in functions
specified as as string. If one of the options is set to a string
while the other has a custom callable, you may in some cases see
better performance if you have the `bottleneck`_ package installed.
.. _bottleneck: https://github.com/pydata/bottleneck
Returns
-------
mean, median, stddev : float
The mean, median, and standard deviation of the sigma-clipped
data.
See Also
--------
SigmaClip, sigma_clip
"""
if mask is not None:
data = np.ma.MaskedArray(data, mask)
if mask_value is not None:
data = np.ma.masked_values(data, mask_value)
if isinstance(data, np.ma.MaskedArray) and data.mask.all():
return np.ma.masked, np.ma.masked, np.ma.masked
sigclip = SigmaClip(
sigma=sigma,
sigma_lower=sigma_lower,
sigma_upper=sigma_upper,
maxiters=maxiters,
cenfunc=cenfunc,
stdfunc=stdfunc,
grow=grow,
)
data_clipped = sigclip(
data, axis=axis, masked=False, return_bounds=False, copy=True
)
if HAS_BOTTLENECK:
mean = _nanmean(data_clipped, axis=axis)
median = _nanmedian(data_clipped, axis=axis)
std = _nanstd(data_clipped, ddof=std_ddof, axis=axis)
else: # pragma: no cover
mean = np.nanmean(data_clipped, axis=axis)
median = np.nanmedian(data_clipped, axis=axis)
std = np.nanstd(data_clipped, ddof=std_ddof, axis=axis)
return mean, median, std |
Test results for several fitness functions | def test_fitness_function_results():
"""Test results for several fitness functions"""
rng = np.random.default_rng(42)
# Event Data
t = rng.standard_normal(100)
edges = bayesian_blocks(t, fitness="events")
assert_allclose(edges, [-1.95103519, -1.01861547, 0.95442154, 2.1416476])
# Event data with repeats
t[80:] = t[:20]
edges = bayesian_blocks(t, fitness="events", p0=0.01)
assert_allclose(edges, [-1.95103519, -1.08663566, 1.17575682, 2.1416476])
# Regular event data
dt = 0.01
t = dt * np.arange(1000)
x = np.zeros(len(t))
N = len(t) // 10
x[rng.integers(0, len(t), N)] = 1
x[rng.integers(0, len(t) // 2, N)] = 1
edges = bayesian_blocks(t, x, fitness="regular_events", dt=dt)
assert_allclose(edges, [0, 4.365, 4.995, 9.99])
# Measured point data with errors
t = 100 * rng.random(20)
x = np.exp(-0.5 * (t - 50) ** 2)
sigma = 0.1
x_obs = x + sigma * rng.standard_normal(len(x))
edges = bayesian_blocks(t, x_obs, sigma, fitness="measures")
expected = [1.39362877, 44.30811196, 49.46626158, 54.37232704, 92.7562551]
assert_allclose(edges, expected)
# Optional arguments are passed (p0)
p0_sel = 0.05
edges = bayesian_blocks(t, x_obs, sigma, fitness="measures", p0=p0_sel)
assert_allclose(edges, expected)
# Optional arguments are passed (ncp_prior)
ncp_prior_sel = 4 - np.log(73.53 * p0_sel * (len(t) ** -0.478))
edges = bayesian_blocks(
t, x_obs, sigma, fitness="measures", ncp_prior=ncp_prior_sel
)
assert_allclose(edges, expected)
# Optional arguments are passed (gamma)
gamma_sel = np.exp(-ncp_prior_sel)
edges = bayesian_blocks(t, x_obs, sigma, fitness="measures", gamma=gamma_sel)
assert_allclose(edges, expected) |
Ensure that edges contains both endpoints when there are no change points | def test_zero_change_points(rseed=0):
"""
Ensure that edges contains both endpoints when there are no change points
"""
np.random.seed(rseed)
# Using the failed edge case from
# https://github.com/astropy/astropy/issues/8558
values = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
bins = bayesian_blocks(values)
assert values.min() == bins[0]
assert values.max() == bins[-1] |
Test a 2D array with the axis keyword. | def test_biweight_location_axis():
"""Test a 2D array with the axis keyword."""
with NumpyRNGContext(12345):
ny = 100
nx = 200
data = np.random.normal(5, 2, (ny, nx))
bw = biweight_location(data, axis=0)
bwi = []
for i in range(nx):
bwi.append(biweight_location(data[:, i]))
bwi = np.array(bwi)
assert_allclose(bw, bwi)
bw = biweight_location(data, axis=1)
bwi = []
for i in range(ny):
bwi.append(biweight_location(data[i, :]))
bwi = np.array(bwi)
assert_allclose(bw, bwi) |
Test a 3D array with the axis keyword. | def test_biweight_location_axis_3d():
"""Test a 3D array with the axis keyword."""
with NumpyRNGContext(12345):
nz = 3
ny = 4
nx = 5
data = np.random.normal(5, 2, (nz, ny, nx))
bw = biweight_location(data, axis=0)
assert bw.shape == (ny, nx)
y = 0
bwi = []
for i in range(nx):
bwi.append(biweight_location(data[:, y, i]))
bwi = np.array(bwi)
assert_allclose(bw[y], bwi) |
Test a 3D array with a tuple axis keyword. | def test_biweight_location_axis_tuple():
"""Test a 3D array with a tuple axis keyword."""
data = np.arange(24).reshape(2, 3, 4)
data[0, 0] = 100.0
assert_equal(biweight_location(data, axis=0), biweight_location(data, axis=(0,)))
assert_equal(biweight_location(data, axis=-1), biweight_location(data, axis=(2,)))
assert_equal(
biweight_location(data, axis=(0, 1)), biweight_location(data, axis=(1, 0))
)
assert_equal(
biweight_location(data, axis=(0, 2)), biweight_location(data, axis=(0, -1))
)
assert_equal(
biweight_location(data, axis=(0, 1, 2)), biweight_location(data, axis=(2, 0, 1))
)
assert_equal(
biweight_location(data, axis=(0, 1, 2)), biweight_location(data, axis=None)
) |
Test a 2D array with the axis keyword. | def test_biweight_midvariance_axis():
"""Test a 2D array with the axis keyword."""
with NumpyRNGContext(12345):
ny = 100
nx = 200
data = np.random.normal(5, 2, (ny, nx))
bw = biweight_midvariance(data, axis=0)
bwi = []
for i in range(nx):
bwi.append(biweight_midvariance(data[:, i]))
bwi = np.array(bwi)
assert_allclose(bw, bwi)
bw = biweight_midvariance(data, axis=1)
bwi = []
for i in range(ny):
bwi.append(biweight_midvariance(data[i, :]))
bwi = np.array(bwi)
assert_allclose(bw, bwi) |
Test a 3D array with the axis keyword. | def test_biweight_midvariance_axis_3d():
"""Test a 3D array with the axis keyword."""
with NumpyRNGContext(12345):
nz = 3
ny = 4
nx = 5
data = np.random.normal(5, 2, (nz, ny, nx))
bw = biweight_midvariance(data, axis=0)
assert bw.shape == (ny, nx)
y = 0
bwi = []
for i in range(nx):
bwi.append(biweight_midvariance(data[:, y, i]))
bwi = np.array(bwi)
assert_allclose(bw[y], bwi) |
Test a 3D array with a tuple axis keyword. | def test_biweight_scale_axis_tuple():
"""Test a 3D array with a tuple axis keyword."""
data = np.arange(24).reshape(2, 3, 4)
data[0, 0] = 100.0
assert_equal(biweight_scale(data, axis=0), biweight_scale(data, axis=(0,)))
assert_equal(biweight_scale(data, axis=-1), biweight_scale(data, axis=(2,)))
assert_equal(biweight_scale(data, axis=(0, 1)), biweight_scale(data, axis=(1, 0)))
assert_equal(biweight_scale(data, axis=(0, 2)), biweight_scale(data, axis=(0, -1)))
assert_equal(
biweight_scale(data, axis=(0, 1, 2)), biweight_scale(data, axis=(2, 0, 1))
)
assert_equal(biweight_scale(data, axis=(0, 1, 2)), biweight_scale(data, axis=None))
assert_equal(
biweight_scale(data, axis=(0, 2), modify_sample_size=True),
biweight_scale(data, axis=(0, -1), modify_sample_size=True),
) |
Test that biweight_midcovariance diagonal elements agree with
biweight_midvariance. | def test_biweight_midcovariance_midvariance():
"""
Test that biweight_midcovariance diagonal elements agree with
biweight_midvariance.
"""
rng = np.random.default_rng(1)
d = rng.normal(0, 2, size=(100, 3))
cov = biweight_midcovariance(d)
var = [biweight_midvariance(a) for a in d]
assert_allclose(cov.diagonal(), var)
cov2 = biweight_midcovariance(d, modify_sample_size=True)
var2 = [biweight_midvariance(a, modify_sample_size=True) for a in d]
assert_allclose(cov2.diagonal(), var2) |
Test that biweight_midcovariance raises error with a 3D array. | def test_midcovariance_shape():
"""
Test that biweight_midcovariance raises error with a 3D array.
"""
d = np.ones(27).reshape(3, 3, 3)
with pytest.raises(ValueError, match=r"The input array must be 2D or 1D\."):
biweight_midcovariance(d) |
Test that biweight_midcovariance raises error when M is not a scalar
or 1D array. | def test_midcovariance_M_shape():
"""
Test that biweight_midcovariance raises error when M is not a scalar
or 1D array.
"""
d = [0, 1, 2]
M = [[0, 1], [2, 3]]
with pytest.raises(ValueError, match=r"M must be a scalar or 1D array\."):
biweight_midcovariance(d, M=M) |
Regression test to ensure that midcovariance matrix is symmetric
when ``modify_sample_size=True`` (see #5972). | def test_biweight_midcovariance_symmetric():
"""
Regression test to ensure that midcovariance matrix is symmetric
when ``modify_sample_size=True`` (see #5972).
"""
rng = np.random.default_rng(1)
d = rng.gamma(2, 2, size=(3, 500))
cov = biweight_midcovariance(d)
assert_array_almost_equal_nulp(cov, cov.T, nulp=5)
cov = biweight_midcovariance(d, modify_sample_size=True)
assert_array_almost_equal_nulp(cov, cov.T, nulp=5) |
Regression test for #6905. | def test_biweight_32bit_runtime_warnings():
"""Regression test for #6905."""
with NumpyRNGContext(12345):
data = np.random.random(100).astype(np.float32)
data[50] = 30000.0
biweight_scale(data)
biweight_midvariance(data) |
Regression test to ensure ignore_nan=True gives same results for
ndarray and masked arrays that contain +/-inf. | def test_median_absolute_deviation_nans_masked():
"""
Regression test to ensure ignore_nan=True gives same results for
ndarray and masked arrays that contain +/-inf.
"""
data1 = np.array([1.0, np.nan, 2, np.inf])
data2 = np.ma.masked_array(data1, mask=False)
mad1 = funcs.median_absolute_deviation(data1, ignore_nan=True)
mad2 = funcs.median_absolute_deviation(data2, ignore_nan=True)
assert_equal(mad1, mad2)
# ensure that input masked array is not modified
assert np.isnan(data2[1]) |
Test intervals against those published in Gehrels 1986 | def test_poisson_conf_frequentist_confidence_gehrels():
"""Test intervals against those published in Gehrels 1986"""
nlh = np.array(
[
(0, 0, 1.841),
(1, 0.173, 3.300),
(2, 0.708, 4.638),
(3, 1.367, 5.918),
(4, 2.086, 7.163),
(5, 2.840, 8.382),
(6, 3.620, 9.584),
(7, 4.419, 10.77),
(8, 5.232, 11.95),
(9, 6.057, 13.11),
(10, 6.891, 14.27),
]
)
assert_allclose(
funcs.poisson_conf_interval(nlh[:, 0], interval="frequentist-confidence"),
nlh[:, 1:].T,
rtol=0.001,
atol=0.001,
) |
Test intervals against those published in Gehrels 1986
Note: I think there's a typo (transposition of digits) in Gehrels 1986,
specifically for the two-sigma lower limit for 3 events; they claim
0.569 but this function returns 0.59623... | def test_poisson_conf_frequentist_confidence_gehrels_2sigma():
"""Test intervals against those published in Gehrels 1986
Note: I think there's a typo (transposition of digits) in Gehrels 1986,
specifically for the two-sigma lower limit for 3 events; they claim
0.569 but this function returns 0.59623...
"""
nlh = np.array(
[
(0, 2, 0, 3.783),
(1, 2, 2.30e-2, 5.683),
(2, 2, 0.230, 7.348),
(3, 2, 0.596, 8.902),
(4, 2, 1.058, 10.39),
(5, 2, 1.583, 11.82),
(6, 2, 2.153, 13.22),
(7, 2, 2.758, 14.59),
(8, 2, 3.391, 15.94),
(9, 2, 4.046, 17.27),
(10, 2, 4.719, 18.58),
]
)
assert_allclose(
funcs.poisson_conf_interval(
nlh[:, 0], sigma=2, interval="frequentist-confidence"
).T,
nlh[:, 2:],
rtol=0.01,
) |
Test intervals against those published in Gehrels 1986 | def test_poisson_conf_frequentist_confidence_gehrels_3sigma():
"""Test intervals against those published in Gehrels 1986"""
nlh = np.array(
[
(0, 3, 0, 6.608),
(1, 3, 1.35e-3, 8.900),
(2, 3, 5.29e-2, 10.87),
(3, 3, 0.212, 12.68),
(4, 3, 0.465, 14.39),
(5, 3, 0.792, 16.03),
(6, 3, 1.175, 17.62),
(7, 3, 1.603, 19.17),
(8, 3, 2.068, 20.69),
(9, 3, 2.563, 22.18),
(10, 3, 3.084, 23.64),
]
)
assert_allclose(
funcs.poisson_conf_interval(
nlh[:, 0], sigma=3, interval="frequentist-confidence"
).T,
nlh[:, 2:],
rtol=0.01,
verbose=True,
) |
Test that the lower-level routine gives the snae number.
Test numbers are from table1 1, 3 in
Kraft, Burrows and Nousek in
`ApJ 374, 344 (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_ | def test_scipy_poisson_limit():
"""Test that the lower-level routine gives the snae number.
Test numbers are from table1 1, 3 in
Kraft, Burrows and Nousek in
`ApJ 374, 344 (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_
"""
assert_allclose(
funcs._scipy_kraft_burrows_nousek(5, 2.5, 0.99), (0, 10.67), rtol=1e-3
)
assert_allclose(
funcs._scipy_kraft_burrows_nousek(np.int32(5.0), 2.5, 0.99),
(0, 10.67),
rtol=1e-3,
)
assert_allclose(
funcs._scipy_kraft_burrows_nousek(np.int64(5.0), 2.5, 0.99),
(0, 10.67),
rtol=1e-3,
)
assert_allclose(
funcs._scipy_kraft_burrows_nousek(5, np.float32(2.5), 0.99),
(0, 10.67),
rtol=1e-3,
)
assert_allclose(
funcs._scipy_kraft_burrows_nousek(5, np.float64(2.5), 0.99),
(0, 10.67),
rtol=1e-3,
)
assert_allclose(
funcs._scipy_kraft_burrows_nousek(5, 2.5, np.float32(0.99)),
(0, 10.67),
rtol=1e-3,
)
assert_allclose(
funcs._scipy_kraft_burrows_nousek(5, 2.5, np.float64(0.99)),
(0, 10.67),
rtol=1e-3,
)
conf = funcs.poisson_conf_interval(
[5, 6],
"kraft-burrows-nousek",
background=[2.5, 2.0],
confidence_level=[0.99, 0.9],
)
assert_allclose(conf[:, 0], (0, 10.67), rtol=1e-3)
assert_allclose(conf[:, 1], (0.81, 8.99), rtol=5e-3) |
Check that the false positive probability is right
In particular, run m trials with N uniformly-distributed photons
and check that the number of false positives is consistent with
a binomial distribution. The more trials, the tighter the bounds
but the longer the runtime. | def test_uniform_binomial(N, m, p):
"""Check that the false positive probability is right
In particular, run m trials with N uniformly-distributed photons
and check that the number of false positives is consistent with
a binomial distribution. The more trials, the tighter the bounds
but the longer the runtime.
"""
from scipy.stats import binom
with NumpyRNGContext(1234):
fpps = np.array([funcs.kuiper(np.random.random(N))[1] for i in range(m)])
assert (fpps >= 0).all()
assert (fpps <= 1).all()
low = binom(n=m, p=p).ppf(0.01)
high = binom(n=m, p=p).ppf(0.99)
assert low < sum(fpps < p) < high |
This test is from:
http://www.statoek.wiso.uni-goettingen.de/veranstaltungen/non_semi_models/
AkaikeLsg.pdf
Note that in there, they compute a "normalized BIC". Therefore, the
answers presented here are recalculated versions based on their values. | def test_bayesian_info_criterion_lsq():
"""This test is from:
http://www.statoek.wiso.uni-goettingen.de/veranstaltungen/non_semi_models/
AkaikeLsg.pdf
Note that in there, they compute a "normalized BIC". Therefore, the
answers presented here are recalculated versions based on their values.
"""
n_samples = 25
n_params = (1, 2, 1)
ssr = (48959, 32512, 37980)
answer = (192.706, 185.706, 186.360)
assert_allclose(
answer[0],
bayesian_info_criterion_lsq(ssr[0], n_params[0], n_samples),
atol=1e-2,
)
assert_allclose(
answer[1],
bayesian_info_criterion_lsq(ssr[1], n_params[1], n_samples),
atol=1e-2,
)
assert_allclose(
answer[2],
bayesian_info_criterion_lsq(ssr[2], n_params[2], n_samples),
atol=1e-2,
) |
For masked=False and axis=None, masked elements should be removed
from the result. | def test_axis_none():
"""
For masked=False and axis=None, masked elements should be removed
from the result.
"""
data = np.arange(10.0)
data[0] = 100
result = sigma_clip(data, masked=False, axis=None)
assert_equal(result, data[1:]) |
Test that the returned mask is not a scalar. | def test_sigma_clip_scalar_mask():
"""Test that the returned mask is not a scalar."""
data = np.arange(5)
result = sigma_clip(data, sigma=100.0, maxiters=1)
assert result.mask.shape != () |
Test list data with input mask or mask_value (#3268). | def test_sigma_clipped_stats():
"""Test list data with input mask or mask_value (#3268)."""
# test list data with mask
data = [0, 1]
mask = np.array([True, False])
result = sigma_clipped_stats(data, mask=mask)
# Check that the result of np.ma.median was converted to a scalar
assert isinstance(result[1], float)
assert result == (1.0, 1.0, 0.0)
result2 = sigma_clipped_stats(data, mask=mask, axis=0)
assert_equal(result, result2)
# test list data with mask_value
result = sigma_clipped_stats(data, mask_value=0.0)
assert isinstance(result[1], float)
assert result == (1.0, 1.0, 0.0)
# test without mask
data = [0, 2]
result = sigma_clipped_stats(data)
assert isinstance(result[1], float)
assert result == (1.0, 1.0, 1.0)
_data = np.arange(10)
data = np.ma.MaskedArray([_data, _data, 10 * _data])
mean = sigma_clip(data, axis=0, sigma=1).mean(axis=0)
assert_equal(mean, _data)
mean, median, stddev = sigma_clipped_stats(data, axis=0, sigma=1)
assert_equal(mean, _data)
assert_equal(median, _data)
assert_equal(stddev, np.zeros_like(_data)) |
Test sigma_clip of data containing invalid values. | def test_invalid_sigma_clip():
"""Test sigma_clip of data containing invalid values."""
data = np.ones((5, 5))
data[2, 2] = 1000
data[3, 4] = np.nan
data[1, 1] = np.inf
data_ma = np.ma.MaskedArray(data)
with pytest.warns(AstropyUserWarning, match=r"Input data contains invalid values"):
result = sigma_clip(data)
with pytest.warns(AstropyUserWarning, match=r"Input data contains invalid values"):
result_ma = sigma_clip(data_ma)
assert_equal(result.data, result_ma.data)
assert_equal(result.mask, result_ma.mask)
# Pre #4051 if data contains any NaN or infs sigma_clip returns the
# mask containing `False` only or TypeError if data also contains a
# masked value.
assert result.mask[2, 2]
assert result.mask[3, 4]
assert result.mask[1, 1]
with pytest.warns(AstropyUserWarning, match=r"Input data contains invalid values"):
result2 = sigma_clip(data, axis=0)
assert result2.mask[1, 1]
assert result2.mask[3, 4]
with pytest.warns(AstropyUserWarning, match=r"Input data contains invalid values"):
result3 = sigma_clip(data, axis=0, copy=False)
assert result3.mask[1, 1]
assert result3.mask[3, 4]
# stats along axis with all nans
data[0, :] = np.nan # row of all nans
with pytest.warns(AstropyUserWarning, match=r"Input data contains invalid values"):
_, minarr, maxarr = sigma_clip(data, axis=1, masked=False, return_bounds=True)
assert np.isnan(minarr[0])
assert np.isnan(maxarr[0]) |
Test that dimensions are expanded correctly even if axis is negative. | def test_sigmaclip_negative_axis():
"""Test that dimensions are expanded correctly even if axis is negative."""
data = np.ones((3, 4))
# without correct expand_dims this would raise a ValueError
sigma_clip(data, axis=-1) |
Make sure a fully masked array is returned when sigma clipping a
fully masked array. | def test_sigmaclip_fully_masked():
"""
Make sure a fully masked array is returned when sigma clipping a
fully masked array.
"""
data = np.ma.MaskedArray(
data=[[1.0, 0.0], [0.0, 1.0]], mask=[[True, True], [True, True]]
)
clipped_data = sigma_clip(data)
assert np.ma.allequal(data, clipped_data)
clipped_data = sigma_clip(data, masked=False)
assert not isinstance(clipped_data, np.ma.MaskedArray)
assert np.all(np.isnan(clipped_data))
clipped_data, low, high = sigma_clip(data, return_bounds=True)
assert np.ma.allequal(data, clipped_data)
assert np.isnan(low)
assert np.isnan(high) |
Make sure an empty masked array is returned when sigma clipping an
empty masked array. | def test_sigmaclip_empty_masked():
"""
Make sure an empty masked array is returned when sigma clipping an
empty masked array.
"""
data = np.ma.MaskedArray(data=[], mask=[])
clipped_data = sigma_clip(data)
assert np.ma.allequal(data, clipped_data)
clipped_data, low, high = sigma_clip(data, return_bounds=True)
assert np.ma.allequal(data, clipped_data)
assert np.isnan(low)
assert np.isnan(high) |
Make sure an empty array is returned when sigma clipping an empty
array. | def test_sigmaclip_empty():
"""
Make sure an empty array is returned when sigma clipping an empty
array.
"""
data = np.array([])
clipped_data = sigma_clip(data)
assert isinstance(clipped_data, np.ma.MaskedArray)
assert_equal(data, clipped_data.data)
clipped_data, low, high = sigma_clip(data, return_bounds=True)
assert_equal(data, clipped_data)
assert np.isnan(low)
assert np.isnan(high) |
Test sigma clipping over a subset of axes (issue #7227). | def test_sigma_clip_axis_tuple_3D():
"""Test sigma clipping over a subset of axes (issue #7227)."""
data = np.sin(0.78 * np.arange(27)).reshape(3, 3, 3)
mask = np.zeros_like(data, dtype=np.bool_)
data_t = np.rollaxis(data, 1, 0)
mask_t = np.rollaxis(mask, 1, 0)
# Loop over what was originally axis 1 and clip each plane directly:
for data_plane, mask_plane in zip(data_t, mask_t):
mean = data_plane.mean()
maxdev = 1.5 * data_plane.std()
mask_plane[:] = np.logical_or(
data_plane < mean - maxdev, data_plane > mean + maxdev
)
# Do the equivalent thing using sigma_clip:
result = sigma_clip(data, sigma=1.5, cenfunc=np.mean, maxiters=1, axis=(0, -1))
assert_equal(result.mask, mask) |
Test sigma_clipped_stats when the input array is completely masked. | def test_sigma_clippped_stats_all_masked():
"""
Test sigma_clipped_stats when the input array is completely masked.
"""
arr = np.ma.MaskedArray(np.arange(10), mask=True)
result = sigma_clipped_stats(arr)
assert result == (np.ma.masked, np.ma.masked, np.ma.masked)
arr = np.ma.MaskedArray(np.zeros(10), mask=False)
result = sigma_clipped_stats(arr, mask_value=0.0)
assert result == (np.ma.masked, np.ma.masked, np.ma.masked)
arr = np.ma.MaskedArray(np.arange(10), mask=False)
mask = arr < 20
result = sigma_clipped_stats(arr, mask=mask)
assert result == (np.ma.masked, np.ma.masked, np.ma.masked) |
Subsets and Splits