diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_gcutils.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_gcutils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3eb7ca092582092e15d908d1b6c653a3b1392cfc Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_gcutils.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/doccer.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/doccer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b4c7403fd4f5aff2430b7e31a58b832898bb376 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/doccer.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/uarray.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/uarray.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..66f90cf33c3ef2a6f83a7f2533e2bd2e18702a90 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/uarray.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/_uarray/LICENSE b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/_uarray/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..5f2b90a026aaecbdc090b3d3234954ab29fce8ae --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/_uarray/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2018, Quansight-Labs +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/_uarray/__init__.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/_uarray/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..91afdcedb180599a41758cdd8c03416cf6c20d76 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/_uarray/__init__.py @@ -0,0 +1,116 @@ +""" +.. note: + If you are looking for overrides for NumPy-specific methods, see the + documentation for :obj:`unumpy`. This page explains how to write + back-ends and multimethods. + +``uarray`` is built around a back-end protocol, and overridable multimethods. +It is necessary to define multimethods for back-ends to be able to override them. +See the documentation of :obj:`generate_multimethod` on how to write multimethods. + + + +Let's start with the simplest: + +``__ua_domain__`` defines the back-end *domain*. The domain consists of period- +separated string consisting of the modules you extend plus the submodule. For +example, if a submodule ``module2.submodule`` extends ``module1`` +(i.e., it exposes dispatchables marked as types available in ``module1``), +then the domain string should be ``"module1.module2.submodule"``. + + +For the purpose of this demonstration, we'll be creating an object and setting +its attributes directly. However, note that you can use a module or your own type +as a backend as well. + +>>> class Backend: pass +>>> be = Backend() +>>> be.__ua_domain__ = "ua_examples" + +It might be useful at this point to sidetrack to the documentation of +:obj:`generate_multimethod` to find out how to generate a multimethod +overridable by :obj:`uarray`. Needless to say, writing a backend and +creating multimethods are mostly orthogonal activities, and knowing +one doesn't necessarily require knowledge of the other, although it +is certainly helpful. We expect core API designers/specifiers to write the +multimethods, and implementors to override them. But, as is often the case, +similar people write both. + +Without further ado, here's an example multimethod: + +>>> import uarray as ua +>>> from uarray import Dispatchable +>>> def override_me(a, b): +... return Dispatchable(a, int), +>>> def override_replacer(args, kwargs, dispatchables): +... return (dispatchables[0], args[1]), {} +>>> overridden_me = ua.generate_multimethod( +... override_me, override_replacer, "ua_examples" +... ) + +Next comes the part about overriding the multimethod. This requires +the ``__ua_function__`` protocol, and the ``__ua_convert__`` +protocol. The ``__ua_function__`` protocol has the signature +``(method, args, kwargs)`` where ``method`` is the passed +multimethod, ``args``/``kwargs`` specify the arguments and ``dispatchables`` +is the list of converted dispatchables passed in. + +>>> def __ua_function__(method, args, kwargs): +... return method.__name__, args, kwargs +>>> be.__ua_function__ = __ua_function__ + +The other protocol of interest is the ``__ua_convert__`` protocol. It has the +signature ``(dispatchables, coerce)``. When ``coerce`` is ``False``, conversion +between the formats should ideally be an ``O(1)`` operation, but it means that +no memory copying should be involved, only views of the existing data. + +>>> def __ua_convert__(dispatchables, coerce): +... for d in dispatchables: +... if d.type is int: +... if coerce and d.coercible: +... yield str(d.value) +... else: +... yield d.value +>>> be.__ua_convert__ = __ua_convert__ + +Now that we have defined the backend, the next thing to do is to call the multimethod. + +>>> with ua.set_backend(be): +... overridden_me(1, "2") +('override_me', (1, '2'), {}) + +Note that the marked type has no effect on the actual type of the passed object. +We can also coerce the type of the input. + +>>> with ua.set_backend(be, coerce=True): +... overridden_me(1, "2") +... overridden_me(1.0, "2") +('override_me', ('1', '2'), {}) +('override_me', ('1.0', '2'), {}) + +Another feature is that if you remove ``__ua_convert__``, the arguments are not +converted at all and it's up to the backend to handle that. + +>>> del be.__ua_convert__ +>>> with ua.set_backend(be): +... overridden_me(1, "2") +('override_me', (1, '2'), {}) + +You also have the option to return ``NotImplemented``, in which case processing moves on +to the next back-end, which in this case, doesn't exist. The same applies to +``__ua_convert__``. + +>>> be.__ua_function__ = lambda *a, **kw: NotImplemented +>>> with ua.set_backend(be): +... overridden_me(1, "2") +Traceback (most recent call last): + ... +uarray.BackendNotImplementedError: ... + +The last possibility is if we don't have ``__ua_convert__``, in which case the job is +left up to ``__ua_function__``, but putting things back into arrays after conversion +will not be possible. +""" + +from ._backend import * +__version__ = '0.8.8.dev0+aa94c5a4.scipy' diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/_uarray/__pycache__/__init__.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/_uarray/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8722b65489f403325eabd1ad01da0d9d89a1ca46 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/_uarray/__pycache__/__init__.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/_uarray/__pycache__/_backend.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/_uarray/__pycache__/_backend.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c12ff63ea5ffc2fe82470775d02e4a745ab7bef2 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/_uarray/__pycache__/_backend.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/_uarray/_backend.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/_uarray/_backend.py new file mode 100644 index 0000000000000000000000000000000000000000..67da7d35ccea8ad26bd471b16e9400071a821cc0 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/_uarray/_backend.py @@ -0,0 +1,704 @@ +import typing +import types +import inspect +import functools +from . import _uarray +import copyreg +import pickle +import contextlib + +from ._uarray import ( # type: ignore + BackendNotImplementedError, + _Function, + _SkipBackendContext, + _SetBackendContext, + _BackendState, +) + +__all__ = [ + "set_backend", + "set_global_backend", + "skip_backend", + "register_backend", + "determine_backend", + "determine_backend_multi", + "clear_backends", + "create_multimethod", + "generate_multimethod", + "_Function", + "BackendNotImplementedError", + "Dispatchable", + "wrap_single_convertor", + "wrap_single_convertor_instance", + "all_of_type", + "mark_as", + "set_state", + "get_state", + "reset_state", + "_BackendState", + "_SkipBackendContext", + "_SetBackendContext", +] + +ArgumentExtractorType = typing.Callable[..., tuple["Dispatchable", ...]] +ArgumentReplacerType = typing.Callable[ + [tuple, dict, tuple], tuple[tuple, dict] +] + +def unpickle_function(mod_name, qname, self_): + import importlib + + try: + module = importlib.import_module(mod_name) + qname = qname.split(".") + func = module + for q in qname: + func = getattr(func, q) + + if self_ is not None: + func = types.MethodType(func, self_) + + return func + except (ImportError, AttributeError) as e: + from pickle import UnpicklingError + + raise UnpicklingError from e + + +def pickle_function(func): + mod_name = getattr(func, "__module__", None) + qname = getattr(func, "__qualname__", None) + self_ = getattr(func, "__self__", None) + + try: + test = unpickle_function(mod_name, qname, self_) + except pickle.UnpicklingError: + test = None + + if test is not func: + raise pickle.PicklingError( + f"Can't pickle {func}: it's not the same object as {test}" + ) + + return unpickle_function, (mod_name, qname, self_) + + +def pickle_state(state): + return _uarray._BackendState._unpickle, state._pickle() + + +def pickle_set_backend_context(ctx): + return _SetBackendContext, ctx._pickle() + + +def pickle_skip_backend_context(ctx): + return _SkipBackendContext, ctx._pickle() + + +copyreg.pickle(_Function, pickle_function) +copyreg.pickle(_uarray._BackendState, pickle_state) +copyreg.pickle(_SetBackendContext, pickle_set_backend_context) +copyreg.pickle(_SkipBackendContext, pickle_skip_backend_context) + + +def get_state(): + """ + Returns an opaque object containing the current state of all the backends. + + Can be used for synchronization between threads/processes. + + See Also + -------- + set_state + Sets the state returned by this function. + """ + return _uarray.get_state() + + +@contextlib.contextmanager +def reset_state(): + """ + Returns a context manager that resets all state once exited. + + See Also + -------- + set_state + Context manager that sets the backend state. + get_state + Gets a state to be set by this context manager. + """ + with set_state(get_state()): + yield + + +@contextlib.contextmanager +def set_state(state): + """ + A context manager that sets the state of the backends to one returned by :obj:`get_state`. + + See Also + -------- + get_state + Gets a state to be set by this context manager. + """ # noqa: E501 + old_state = get_state() + _uarray.set_state(state) + try: + yield + finally: + _uarray.set_state(old_state, True) + + +def create_multimethod(*args, **kwargs): + """ + Creates a decorator for generating multimethods. + + This function creates a decorator that can be used with an argument + extractor in order to generate a multimethod. Other than for the + argument extractor, all arguments are passed on to + :obj:`generate_multimethod`. + + See Also + -------- + generate_multimethod + Generates a multimethod. + """ + + def wrapper(a): + return generate_multimethod(a, *args, **kwargs) + + return wrapper + + +def generate_multimethod( + argument_extractor: ArgumentExtractorType, + argument_replacer: ArgumentReplacerType, + domain: str, + default: typing.Optional[typing.Callable] = None, +): + """ + Generates a multimethod. + + Parameters + ---------- + argument_extractor : ArgumentExtractorType + A callable which extracts the dispatchable arguments. Extracted arguments + should be marked by the :obj:`Dispatchable` class. It has the same signature + as the desired multimethod. + argument_replacer : ArgumentReplacerType + A callable with the signature (args, kwargs, dispatchables), which should also + return an (args, kwargs) pair with the dispatchables replaced inside the + args/kwargs. + domain : str + A string value indicating the domain of this multimethod. + default: Optional[Callable], optional + The default implementation of this multimethod, where ``None`` (the default) + specifies there is no default implementation. + + Examples + -------- + In this example, ``a`` is to be dispatched over, so we return it, while marking it + as an ``int``. + The trailing comma is needed because the args have to be returned as an iterable. + + >>> def override_me(a, b): + ... return Dispatchable(a, int), + + Next, we define the argument replacer that replaces the dispatchables inside + args/kwargs with the supplied ones. + + >>> def override_replacer(args, kwargs, dispatchables): + ... return (dispatchables[0], args[1]), {} + + Next, we define the multimethod. + + >>> overridden_me = generate_multimethod( + ... override_me, override_replacer, "ua_examples" + ... ) + + Notice that there's no default implementation, unless you supply one. + + >>> overridden_me(1, "a") + Traceback (most recent call last): + ... + uarray.BackendNotImplementedError: ... + + >>> overridden_me2 = generate_multimethod( + ... override_me, override_replacer, "ua_examples", default=lambda x, y: (x, y) + ... ) + >>> overridden_me2(1, "a") + (1, 'a') + + See Also + -------- + uarray + See the module documentation for how to override the method by creating + backends. + """ + kw_defaults, arg_defaults, opts = get_defaults(argument_extractor) + ua_func = _Function( + argument_extractor, + argument_replacer, + domain, + arg_defaults, + kw_defaults, + default, + ) + + return functools.update_wrapper(ua_func, argument_extractor) + + +def set_backend(backend, coerce=False, only=False): + """ + A context manager that sets the preferred backend. + + Parameters + ---------- + backend + The backend to set. + coerce + Whether or not to coerce to a specific backend's types. Implies ``only``. + only + Whether or not this should be the last backend to try. + + See Also + -------- + skip_backend: A context manager that allows skipping of backends. + set_global_backend: Set a single, global backend for a domain. + """ + try: + return backend.__ua_cache__["set", coerce, only] + except AttributeError: + backend.__ua_cache__ = {} + except KeyError: + pass + + ctx = _SetBackendContext(backend, coerce, only) + backend.__ua_cache__["set", coerce, only] = ctx + return ctx + + +def skip_backend(backend): + """ + A context manager that allows one to skip a given backend from processing + entirely. This allows one to use another backend's code in a library that + is also a consumer of the same backend. + + Parameters + ---------- + backend + The backend to skip. + + See Also + -------- + set_backend: A context manager that allows setting of backends. + set_global_backend: Set a single, global backend for a domain. + """ + try: + return backend.__ua_cache__["skip"] + except AttributeError: + backend.__ua_cache__ = {} + except KeyError: + pass + + ctx = _SkipBackendContext(backend) + backend.__ua_cache__["skip"] = ctx + return ctx + + +def get_defaults(f): + sig = inspect.signature(f) + kw_defaults = {} + arg_defaults = [] + opts = set() + for k, v in sig.parameters.items(): + if v.default is not inspect.Parameter.empty: + kw_defaults[k] = v.default + if v.kind in ( + inspect.Parameter.POSITIONAL_ONLY, + inspect.Parameter.POSITIONAL_OR_KEYWORD, + ): + arg_defaults.append(v.default) + opts.add(k) + + return kw_defaults, tuple(arg_defaults), opts + + +def set_global_backend(backend, coerce=False, only=False, *, try_last=False): + """ + This utility method replaces the default backend for permanent use. It + will be tried in the list of backends automatically, unless the + ``only`` flag is set on a backend. This will be the first tried + backend outside the :obj:`set_backend` context manager. + + Note that this method is not thread-safe. + + .. warning:: + We caution library authors against using this function in + their code. We do *not* support this use-case. This function + is meant to be used only by users themselves, or by a reference + implementation, if one exists. + + Parameters + ---------- + backend + The backend to register. + coerce : bool + Whether to coerce input types when trying this backend. + only : bool + If ``True``, no more backends will be tried if this fails. + Implied by ``coerce=True``. + try_last : bool + If ``True``, the global backend is tried after registered backends. + + See Also + -------- + set_backend: A context manager that allows setting of backends. + skip_backend: A context manager that allows skipping of backends. + """ + _uarray.set_global_backend(backend, coerce, only, try_last) + + +def register_backend(backend): + """ + This utility method sets registers backend for permanent use. It + will be tried in the list of backends automatically, unless the + ``only`` flag is set on a backend. + + Note that this method is not thread-safe. + + Parameters + ---------- + backend + The backend to register. + """ + _uarray.register_backend(backend) + + +def clear_backends(domain, registered=True, globals=False): + """ + This utility method clears registered backends. + + .. warning:: + We caution library authors against using this function in + their code. We do *not* support this use-case. This function + is meant to be used only by users themselves. + + .. warning:: + Do NOT use this method inside a multimethod call, or the + program is likely to crash. + + Parameters + ---------- + domain : Optional[str] + The domain for which to de-register backends. ``None`` means + de-register for all domains. + registered : bool + Whether or not to clear registered backends. See :obj:`register_backend`. + globals : bool + Whether or not to clear global backends. See :obj:`set_global_backend`. + + See Also + -------- + register_backend : Register a backend globally. + set_global_backend : Set a global backend. + """ + _uarray.clear_backends(domain, registered, globals) + + +class Dispatchable: + """ + A utility class which marks an argument with a specific dispatch type. + + + Attributes + ---------- + value + The value of the Dispatchable. + + type + The type of the Dispatchable. + + Examples + -------- + >>> x = Dispatchable(1, str) + >>> x + , value=1> + + See Also + -------- + all_of_type + Marks all unmarked parameters of a function. + + mark_as + Allows one to create a utility function to mark as a given type. + """ + + def __init__(self, value, dispatch_type, coercible=True): + self.value = value + self.type = dispatch_type + self.coercible = coercible + + def __getitem__(self, index): + return (self.type, self.value)[index] + + def __str__(self): + return f"<{type(self).__name__}: type={self.type!r}, value={self.value!r}>" + + __repr__ = __str__ + + +def mark_as(dispatch_type): + """ + Creates a utility function to mark something as a specific type. + + Examples + -------- + >>> mark_int = mark_as(int) + >>> mark_int(1) + , value=1> + """ + return functools.partial(Dispatchable, dispatch_type=dispatch_type) + + +def all_of_type(arg_type): + """ + Marks all unmarked arguments as a given type. + + Examples + -------- + >>> @all_of_type(str) + ... def f(a, b): + ... return a, Dispatchable(b, int) + >>> f('a', 1) + (, value='a'>, + , value=1>) + """ + + def outer(func): + @functools.wraps(func) + def inner(*args, **kwargs): + extracted_args = func(*args, **kwargs) + return tuple( + Dispatchable(arg, arg_type) + if not isinstance(arg, Dispatchable) + else arg + for arg in extracted_args + ) + + return inner + + return outer + + +def wrap_single_convertor(convert_single): + """ + Wraps a ``__ua_convert__`` defined for a single element to all elements. + If any of them return ``NotImplemented``, the operation is assumed to be + undefined. + + Accepts a signature of (value, type, coerce). + """ + + @functools.wraps(convert_single) + def __ua_convert__(dispatchables, coerce): + converted = [] + for d in dispatchables: + c = convert_single(d.value, d.type, coerce and d.coercible) + + if c is NotImplemented: + return NotImplemented + + converted.append(c) + + return converted + + return __ua_convert__ + + +def wrap_single_convertor_instance(convert_single): + """ + Wraps a ``__ua_convert__`` defined for a single element to all elements. + If any of them return ``NotImplemented``, the operation is assumed to be + undefined. + + Accepts a signature of (value, type, coerce). + """ + + @functools.wraps(convert_single) + def __ua_convert__(self, dispatchables, coerce): + converted = [] + for d in dispatchables: + c = convert_single(self, d.value, d.type, coerce and d.coercible) + + if c is NotImplemented: + return NotImplemented + + converted.append(c) + + return converted + + return __ua_convert__ + + +def determine_backend(value, dispatch_type, *, domain, only=True, coerce=False): + """Set the backend to the first active backend that supports ``value`` + + This is useful for functions that call multimethods without any dispatchable + arguments. You can use :func:`determine_backend` to ensure the same backend + is used everywhere in a block of multimethod calls. + + Parameters + ---------- + value + The value being tested + dispatch_type + The dispatch type associated with ``value``, aka + ":ref:`marking `". + domain: string + The domain to query for backends and set. + coerce: bool + Whether or not to allow coercion to the backend's types. Implies ``only``. + only: bool + Whether or not this should be the last backend to try. + + See Also + -------- + set_backend: For when you know which backend to set + + Notes + ----- + + Support is determined by the ``__ua_convert__`` protocol. Backends not + supporting the type must return ``NotImplemented`` from their + ``__ua_convert__`` if they don't support input of that type. + + Examples + -------- + + Suppose we have two backends ``BackendA`` and ``BackendB`` each supporting + different types, ``TypeA`` and ``TypeB``. Neither supporting the other type: + + >>> with ua.set_backend(ex.BackendA): + ... ex.call_multimethod(ex.TypeB(), ex.TypeB()) + Traceback (most recent call last): + ... + uarray.BackendNotImplementedError: ... + + Now consider a multimethod that creates a new object of ``TypeA``, or + ``TypeB`` depending on the active backend. + + >>> with ua.set_backend(ex.BackendA), ua.set_backend(ex.BackendB): + ... res = ex.creation_multimethod() + ... ex.call_multimethod(res, ex.TypeA()) + Traceback (most recent call last): + ... + uarray.BackendNotImplementedError: ... + + ``res`` is an object of ``TypeB`` because ``BackendB`` is set in the + innermost with statement. So, ``call_multimethod`` fails since the types + don't match. + + Instead, we need to first find a backend suitable for all of our objects. + + >>> with ua.set_backend(ex.BackendA), ua.set_backend(ex.BackendB): + ... x = ex.TypeA() + ... with ua.determine_backend(x, "mark", domain="ua_examples"): + ... res = ex.creation_multimethod() + ... ex.call_multimethod(res, x) + TypeA + + """ + dispatchables = (Dispatchable(value, dispatch_type, coerce),) + backend = _uarray.determine_backend(domain, dispatchables, coerce) + + return set_backend(backend, coerce=coerce, only=only) + + +def determine_backend_multi( + dispatchables, *, domain, only=True, coerce=False, **kwargs +): + """Set a backend supporting all ``dispatchables`` + + This is useful for functions that call multimethods without any dispatchable + arguments. You can use :func:`determine_backend_multi` to ensure the same + backend is used everywhere in a block of multimethod calls involving + multiple arrays. + + Parameters + ---------- + dispatchables: Sequence[Union[uarray.Dispatchable, Any]] + The dispatchables that must be supported + domain: string + The domain to query for backends and set. + coerce: bool + Whether or not to allow coercion to the backend's types. Implies ``only``. + only: bool + Whether or not this should be the last backend to try. + dispatch_type: Optional[Any] + The default dispatch type associated with ``dispatchables``, aka + ":ref:`marking `". + + See Also + -------- + determine_backend: For a single dispatch value + set_backend: For when you know which backend to set + + Notes + ----- + + Support is determined by the ``__ua_convert__`` protocol. Backends not + supporting the type must return ``NotImplemented`` from their + ``__ua_convert__`` if they don't support input of that type. + + Examples + -------- + + :func:`determine_backend` allows the backend to be set from a single + object. :func:`determine_backend_multi` allows multiple objects to be + checked simultaneously for support in the backend. Suppose we have a + ``BackendAB`` which supports ``TypeA`` and ``TypeB`` in the same call, + and a ``BackendBC`` that doesn't support ``TypeA``. + + >>> with ua.set_backend(ex.BackendAB), ua.set_backend(ex.BackendBC): + ... a, b = ex.TypeA(), ex.TypeB() + ... with ua.determine_backend_multi( + ... [ua.Dispatchable(a, "mark"), ua.Dispatchable(b, "mark")], + ... domain="ua_examples" + ... ): + ... res = ex.creation_multimethod() + ... ex.call_multimethod(res, a, b) + TypeA + + This won't call ``BackendBC`` because it doesn't support ``TypeA``. + + We can also use leave out the ``ua.Dispatchable`` if we specify the + default ``dispatch_type`` for the ``dispatchables`` argument. + + >>> with ua.set_backend(ex.BackendAB), ua.set_backend(ex.BackendBC): + ... a, b = ex.TypeA(), ex.TypeB() + ... with ua.determine_backend_multi( + ... [a, b], dispatch_type="mark", domain="ua_examples" + ... ): + ... res = ex.creation_multimethod() + ... ex.call_multimethod(res, a, b) + TypeA + + """ + if "dispatch_type" in kwargs: + disp_type = kwargs.pop("dispatch_type") + dispatchables = tuple( + d if isinstance(d, Dispatchable) else Dispatchable(d, disp_type) + for d in dispatchables + ) + else: + dispatchables = tuple(dispatchables) + if not all(isinstance(d, Dispatchable) for d in dispatchables): + raise TypeError("dispatchables must be instances of uarray.Dispatchable") + + if len(kwargs) != 0: + raise TypeError(f"Received unexpected keyword arguments: {kwargs}") + + backend = _uarray.determine_backend(domain, dispatchables, coerce) + + return set_backend(backend, coerce=coerce, only=only) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/__init__.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..79712ae1bdb76eb6155e0823ec1992dd28bd0282 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/__init__.py @@ -0,0 +1,22 @@ +""" +NumPy Array API compatibility library + +This is a small wrapper around NumPy and CuPy that is compatible with the +Array API standard https://data-apis.org/array-api/latest/. See also NEP 47 +https://numpy.org/neps/nep-0047-array-api-standard.html. + +Unlike array_api_strict, this is not a strict minimal implementation of the +Array API, but rather just an extension of the main NumPy namespace with +changes needed to be compliant with the Array API. See +https://numpy.org/doc/stable/reference/array_api.html for a full list of +changes. In particular, unlike array_api_strict, this package does not use a +separate Array object, but rather just uses numpy.ndarray directly. + +Library authors using the Array API may wish to test against array_api_strict +to ensure they are not using functionality outside of the standard, but prefer +this implementation for the default when working with NumPy arrays. + +""" +__version__ = '1.5.1' + +from .common import * # noqa: F401, F403 diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/__pycache__/__init__.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed8ca1e90e06b9640ece2548c9da58126dbddda9 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/__pycache__/__init__.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/__pycache__/_internal.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/__pycache__/_internal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e53618005c370f366624ceb85152147998a047c Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/__pycache__/_internal.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/_internal.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/_internal.py new file mode 100644 index 0000000000000000000000000000000000000000..170a1ff9e6459a8cd76f8f6f9b4bca1e894e9883 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/_internal.py @@ -0,0 +1,46 @@ +""" +Internal helpers +""" + +from functools import wraps +from inspect import signature + +def get_xp(xp): + """ + Decorator to automatically replace xp with the corresponding array module. + + Use like + + import numpy as np + + @get_xp(np) + def func(x, /, xp, kwarg=None): + return xp.func(x, kwarg=kwarg) + + Note that xp must be a keyword argument and come after all non-keyword + arguments. + + """ + + def inner(f): + @wraps(f) + def wrapped_f(*args, **kwargs): + return f(*args, xp=xp, **kwargs) + + sig = signature(f) + new_sig = sig.replace( + parameters=[sig.parameters[i] for i in sig.parameters if i != "xp"] + ) + + if wrapped_f.__doc__ is None: + wrapped_f.__doc__ = f"""\ +Array API compatibility wrapper for {f.__name__}. + +See the corresponding documentation in NumPy/CuPy and/or the array API +specification for more details. + +""" + wrapped_f.__signature__ = new_sig + return wrapped_f + + return inner diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__init__.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..91ab1c405e1d700e2bab5a87fc70196a34871e7d --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__init__.py @@ -0,0 +1 @@ +from ._helpers import * # noqa: F403 diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/__init__.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e6c28cfc7c3a7a0f65e18429f07e567149ee589 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/__init__.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_aliases.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_aliases.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..087639b52346b7a0e8d877b5b5c6062cad41ad72 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_aliases.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_fft.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_fft.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0d0658997d1829d1ff1e6ca9b722f7bd74b3388 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_fft.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_helpers.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_helpers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c22d08676bfe8c5532b2988411824f54584f51f Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_helpers.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_linalg.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_linalg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..369b1b148868dca10bcf2cd6c3422ff730958cf2 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_linalg.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_typing.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_typing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..70d3be81405e3f476a31d2f1b51b0f9cf7baf61e Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_typing.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_aliases.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_aliases.py new file mode 100644 index 0000000000000000000000000000000000000000..f998481cc70ffe207633b458a9197f9883f1bc30 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_aliases.py @@ -0,0 +1,554 @@ +""" +These are functions that are just aliases of existing functions in NumPy. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING +if TYPE_CHECKING: + import numpy as np + from typing import Optional, Sequence, Tuple, Union + from ._typing import ndarray, Device, Dtype, NestedSequence, SupportsBufferProtocol + +from typing import NamedTuple +from types import ModuleType +import inspect + +from ._helpers import _check_device, is_numpy_array, array_namespace + +# These functions are modified from the NumPy versions. + +def arange( + start: Union[int, float], + /, + stop: Optional[Union[int, float]] = None, + step: Union[int, float] = 1, + *, + xp, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + **kwargs +) -> ndarray: + _check_device(xp, device) + return xp.arange(start, stop=stop, step=step, dtype=dtype, **kwargs) + +def empty( + shape: Union[int, Tuple[int, ...]], + xp, + *, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + **kwargs +) -> ndarray: + _check_device(xp, device) + return xp.empty(shape, dtype=dtype, **kwargs) + +def empty_like( + x: ndarray, /, xp, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None, + **kwargs +) -> ndarray: + _check_device(xp, device) + return xp.empty_like(x, dtype=dtype, **kwargs) + +def eye( + n_rows: int, + n_cols: Optional[int] = None, + /, + *, + xp, + k: int = 0, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + **kwargs, +) -> ndarray: + _check_device(xp, device) + return xp.eye(n_rows, M=n_cols, k=k, dtype=dtype, **kwargs) + +def full( + shape: Union[int, Tuple[int, ...]], + fill_value: Union[int, float], + xp, + *, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + **kwargs, +) -> ndarray: + _check_device(xp, device) + return xp.full(shape, fill_value, dtype=dtype, **kwargs) + +def full_like( + x: ndarray, + /, + fill_value: Union[int, float], + *, + xp, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + **kwargs, +) -> ndarray: + _check_device(xp, device) + return xp.full_like(x, fill_value, dtype=dtype, **kwargs) + +def linspace( + start: Union[int, float], + stop: Union[int, float], + /, + num: int, + *, + xp, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + endpoint: bool = True, + **kwargs, +) -> ndarray: + _check_device(xp, device) + return xp.linspace(start, stop, num, dtype=dtype, endpoint=endpoint, **kwargs) + +def ones( + shape: Union[int, Tuple[int, ...]], + xp, + *, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + **kwargs, +) -> ndarray: + _check_device(xp, device) + return xp.ones(shape, dtype=dtype, **kwargs) + +def ones_like( + x: ndarray, /, xp, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None, + **kwargs, +) -> ndarray: + _check_device(xp, device) + return xp.ones_like(x, dtype=dtype, **kwargs) + +def zeros( + shape: Union[int, Tuple[int, ...]], + xp, + *, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + **kwargs, +) -> ndarray: + _check_device(xp, device) + return xp.zeros(shape, dtype=dtype, **kwargs) + +def zeros_like( + x: ndarray, /, xp, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None, + **kwargs, +) -> ndarray: + _check_device(xp, device) + return xp.zeros_like(x, dtype=dtype, **kwargs) + +# np.unique() is split into four functions in the array API: +# unique_all, unique_counts, unique_inverse, and unique_values (this is done +# to remove polymorphic return types). + +# The functions here return namedtuples (np.unique() returns a normal +# tuple). + +# Note that these named tuples aren't actually part of the standard namespace, +# but I don't see any issue with exporting the names here regardless. +class UniqueAllResult(NamedTuple): + values: ndarray + indices: ndarray + inverse_indices: ndarray + counts: ndarray + + +class UniqueCountsResult(NamedTuple): + values: ndarray + counts: ndarray + + +class UniqueInverseResult(NamedTuple): + values: ndarray + inverse_indices: ndarray + + +def _unique_kwargs(xp): + # Older versions of NumPy and CuPy do not have equal_nan. Rather than + # trying to parse version numbers, just check if equal_nan is in the + # signature. + s = inspect.signature(xp.unique) + if 'equal_nan' in s.parameters: + return {'equal_nan': False} + return {} + +def unique_all(x: ndarray, /, xp) -> UniqueAllResult: + kwargs = _unique_kwargs(xp) + values, indices, inverse_indices, counts = xp.unique( + x, + return_counts=True, + return_index=True, + return_inverse=True, + **kwargs, + ) + # np.unique() flattens inverse indices, but they need to share x's shape + # See https://github.com/numpy/numpy/issues/20638 + inverse_indices = inverse_indices.reshape(x.shape) + return UniqueAllResult( + values, + indices, + inverse_indices, + counts, + ) + + +def unique_counts(x: ndarray, /, xp) -> UniqueCountsResult: + kwargs = _unique_kwargs(xp) + res = xp.unique( + x, + return_counts=True, + return_index=False, + return_inverse=False, + **kwargs + ) + + return UniqueCountsResult(*res) + + +def unique_inverse(x: ndarray, /, xp) -> UniqueInverseResult: + kwargs = _unique_kwargs(xp) + values, inverse_indices = xp.unique( + x, + return_counts=False, + return_index=False, + return_inverse=True, + **kwargs, + ) + # xp.unique() flattens inverse indices, but they need to share x's shape + # See https://github.com/numpy/numpy/issues/20638 + inverse_indices = inverse_indices.reshape(x.shape) + return UniqueInverseResult(values, inverse_indices) + + +def unique_values(x: ndarray, /, xp) -> ndarray: + kwargs = _unique_kwargs(xp) + return xp.unique( + x, + return_counts=False, + return_index=False, + return_inverse=False, + **kwargs, + ) + +def astype(x: ndarray, dtype: Dtype, /, *, copy: bool = True) -> ndarray: + if not copy and dtype == x.dtype: + return x + return x.astype(dtype=dtype, copy=copy) + +# These functions have different keyword argument names + +def std( + x: ndarray, + /, + xp, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + correction: Union[int, float] = 0.0, # correction instead of ddof + keepdims: bool = False, + **kwargs, +) -> ndarray: + return xp.std(x, axis=axis, ddof=correction, keepdims=keepdims, **kwargs) + +def var( + x: ndarray, + /, + xp, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + correction: Union[int, float] = 0.0, # correction instead of ddof + keepdims: bool = False, + **kwargs, +) -> ndarray: + return xp.var(x, axis=axis, ddof=correction, keepdims=keepdims, **kwargs) + +# Unlike transpose(), the axes argument to permute_dims() is required. +def permute_dims(x: ndarray, /, axes: Tuple[int, ...], xp) -> ndarray: + return xp.transpose(x, axes) + +# Creation functions add the device keyword (which does nothing for NumPy) + +# asarray also adds the copy keyword +def _asarray( + obj: Union[ + ndarray, + bool, + int, + float, + NestedSequence[bool | int | float], + SupportsBufferProtocol, + ], + /, + *, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + copy: "Optional[Union[bool, np._CopyMode]]" = None, + namespace = None, + **kwargs, +) -> ndarray: + """ + Array API compatibility wrapper for asarray(). + + See the corresponding documentation in NumPy/CuPy and/or the array API + specification for more details. + + """ + if namespace is None: + try: + xp = array_namespace(obj, _use_compat=False) + except ValueError: + # TODO: What about lists of arrays? + raise ValueError("A namespace must be specified for asarray() with non-array input") + elif isinstance(namespace, ModuleType): + xp = namespace + elif namespace == 'numpy': + import numpy as xp + elif namespace == 'cupy': + import cupy as xp + elif namespace == 'dask.array': + import dask.array as xp + else: + raise ValueError("Unrecognized namespace argument to asarray()") + + _check_device(xp, device) + if is_numpy_array(obj): + import numpy as np + if hasattr(np, '_CopyMode'): + # Not present in older NumPys + COPY_FALSE = (False, np._CopyMode.IF_NEEDED) + COPY_TRUE = (True, np._CopyMode.ALWAYS) + else: + COPY_FALSE = (False,) + COPY_TRUE = (True,) + else: + COPY_FALSE = (False,) + COPY_TRUE = (True,) + if copy in COPY_FALSE and namespace != "dask.array": + # copy=False is not yet implemented in xp.asarray + raise NotImplementedError("copy=False is not yet implemented") + if (hasattr(xp, "ndarray") and isinstance(obj, xp.ndarray)): + if dtype is not None and obj.dtype != dtype: + copy = True + if copy in COPY_TRUE: + return xp.array(obj, copy=True, dtype=dtype) + return obj + elif namespace == "dask.array": + if copy in COPY_TRUE: + if dtype is None: + return obj.copy() + # Go through numpy, since dask copy is no-op by default + import numpy as np + obj = np.array(obj, dtype=dtype, copy=True) + return xp.array(obj, dtype=dtype) + else: + import dask.array as da + import numpy as np + if not isinstance(obj, da.Array): + obj = np.asarray(obj, dtype=dtype) + return da.from_array(obj) + return obj + + return xp.asarray(obj, dtype=dtype, **kwargs) + +# np.reshape calls the keyword argument 'newshape' instead of 'shape' +def reshape(x: ndarray, + /, + shape: Tuple[int, ...], + xp, copy: Optional[bool] = None, + **kwargs) -> ndarray: + if copy is True: + x = x.copy() + elif copy is False: + y = x.view() + y.shape = shape + return y + return xp.reshape(x, shape, **kwargs) + +# The descending keyword is new in sort and argsort, and 'kind' replaced with +# 'stable' +def argsort( + x: ndarray, /, xp, *, axis: int = -1, descending: bool = False, stable: bool = True, + **kwargs, +) -> ndarray: + # Note: this keyword argument is different, and the default is different. + # We set it in kwargs like this because numpy.sort uses kind='quicksort' + # as the default whereas cupy.sort uses kind=None. + if stable: + kwargs['kind'] = "stable" + if not descending: + res = xp.argsort(x, axis=axis, **kwargs) + else: + # As NumPy has no native descending sort, we imitate it here. Note that + # simply flipping the results of xp.argsort(x, ...) would not + # respect the relative order like it would in native descending sorts. + res = xp.flip( + xp.argsort(xp.flip(x, axis=axis), axis=axis, **kwargs), + axis=axis, + ) + # Rely on flip()/argsort() to validate axis + normalised_axis = axis if axis >= 0 else x.ndim + axis + max_i = x.shape[normalised_axis] - 1 + res = max_i - res + return res + +def sort( + x: ndarray, /, xp, *, axis: int = -1, descending: bool = False, stable: bool = True, + **kwargs, +) -> ndarray: + # Note: this keyword argument is different, and the default is different. + # We set it in kwargs like this because numpy.sort uses kind='quicksort' + # as the default whereas cupy.sort uses kind=None. + if stable: + kwargs['kind'] = "stable" + res = xp.sort(x, axis=axis, **kwargs) + if descending: + res = xp.flip(res, axis=axis) + return res + +# nonzero should error for zero-dimensional arrays +def nonzero(x: ndarray, /, xp, **kwargs) -> Tuple[ndarray, ...]: + if x.ndim == 0: + raise ValueError("nonzero() does not support zero-dimensional arrays") + return xp.nonzero(x, **kwargs) + +# sum() and prod() should always upcast when dtype=None +def sum( + x: ndarray, + /, + xp, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + dtype: Optional[Dtype] = None, + keepdims: bool = False, + **kwargs, +) -> ndarray: + # `xp.sum` already upcasts integers, but not floats or complexes + if dtype is None: + if x.dtype == xp.float32: + dtype = xp.float64 + elif x.dtype == xp.complex64: + dtype = xp.complex128 + return xp.sum(x, axis=axis, dtype=dtype, keepdims=keepdims, **kwargs) + +def prod( + x: ndarray, + /, + xp, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + dtype: Optional[Dtype] = None, + keepdims: bool = False, + **kwargs, +) -> ndarray: + if dtype is None: + if x.dtype == xp.float32: + dtype = xp.float64 + elif x.dtype == xp.complex64: + dtype = xp.complex128 + return xp.prod(x, dtype=dtype, axis=axis, keepdims=keepdims, **kwargs) + +# ceil, floor, and trunc return integers for integer inputs + +def ceil(x: ndarray, /, xp, **kwargs) -> ndarray: + if xp.issubdtype(x.dtype, xp.integer): + return x + return xp.ceil(x, **kwargs) + +def floor(x: ndarray, /, xp, **kwargs) -> ndarray: + if xp.issubdtype(x.dtype, xp.integer): + return x + return xp.floor(x, **kwargs) + +def trunc(x: ndarray, /, xp, **kwargs) -> ndarray: + if xp.issubdtype(x.dtype, xp.integer): + return x + return xp.trunc(x, **kwargs) + +# linear algebra functions + +def matmul(x1: ndarray, x2: ndarray, /, xp, **kwargs) -> ndarray: + return xp.matmul(x1, x2, **kwargs) + +# Unlike transpose, matrix_transpose only transposes the last two axes. +def matrix_transpose(x: ndarray, /, xp) -> ndarray: + if x.ndim < 2: + raise ValueError("x must be at least 2-dimensional for matrix_transpose") + return xp.swapaxes(x, -1, -2) + +def tensordot(x1: ndarray, + x2: ndarray, + /, + xp, + *, + axes: Union[int, Tuple[Sequence[int], Sequence[int]]] = 2, + **kwargs, +) -> ndarray: + return xp.tensordot(x1, x2, axes=axes, **kwargs) + +def vecdot(x1: ndarray, x2: ndarray, /, xp, *, axis: int = -1) -> ndarray: + if x1.shape[axis] != x2.shape[axis]: + raise ValueError("x1 and x2 must have the same size along the given axis") + + if hasattr(xp, 'broadcast_tensors'): + _broadcast = xp.broadcast_tensors + else: + _broadcast = xp.broadcast_arrays + + x1_ = xp.moveaxis(x1, axis, -1) + x2_ = xp.moveaxis(x2, axis, -1) + x1_, x2_ = _broadcast(x1_, x2_) + + res = x1_[..., None, :] @ x2_[..., None] + return res[..., 0, 0] + +# isdtype is a new function in the 2022.12 array API specification. + +def isdtype( + dtype: Dtype, kind: Union[Dtype, str, Tuple[Union[Dtype, str], ...]], xp, + *, _tuple=True, # Disallow nested tuples +) -> bool: + """ + Returns a boolean indicating whether a provided dtype is of a specified data type ``kind``. + + Note that outside of this function, this compat library does not yet fully + support complex numbers. + + See + https://data-apis.org/array-api/latest/API_specification/generated/array_api.isdtype.html + for more details + """ + if isinstance(kind, tuple) and _tuple: + return any(isdtype(dtype, k, xp, _tuple=False) for k in kind) + elif isinstance(kind, str): + if kind == 'bool': + return dtype == xp.bool_ + elif kind == 'signed integer': + return xp.issubdtype(dtype, xp.signedinteger) + elif kind == 'unsigned integer': + return xp.issubdtype(dtype, xp.unsignedinteger) + elif kind == 'integral': + return xp.issubdtype(dtype, xp.integer) + elif kind == 'real floating': + return xp.issubdtype(dtype, xp.floating) + elif kind == 'complex floating': + return xp.issubdtype(dtype, xp.complexfloating) + elif kind == 'numeric': + return xp.issubdtype(dtype, xp.number) + else: + raise ValueError(f"Unrecognized data type kind: {kind!r}") + else: + # This will allow things that aren't required by the spec, like + # isdtype(np.float64, float) or isdtype(np.int64, 'l'). Should we be + # more strict here to match the type annotation? Note that the + # array_api_strict implementation will be very strict. + return dtype == kind + +__all__ = ['arange', 'empty', 'empty_like', 'eye', 'full', 'full_like', + 'linspace', 'ones', 'ones_like', 'zeros', 'zeros_like', + 'UniqueAllResult', 'UniqueCountsResult', 'UniqueInverseResult', + 'unique_all', 'unique_counts', 'unique_inverse', 'unique_values', + 'astype', 'std', 'var', 'permute_dims', 'reshape', 'argsort', + 'sort', 'nonzero', 'sum', 'prod', 'ceil', 'floor', 'trunc', + 'matmul', 'matrix_transpose', 'tensordot', 'vecdot', 'isdtype'] diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_fft.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_fft.py new file mode 100644 index 0000000000000000000000000000000000000000..666b0b1f84211052ac23be8a2a3009457b3b19d2 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_fft.py @@ -0,0 +1,183 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING, Union, Optional, Literal + +if TYPE_CHECKING: + from ._typing import Device, ndarray + from collections.abc import Sequence + +# Note: NumPy fft functions improperly upcast float32 and complex64 to +# complex128, which is why we require wrapping them all here. + +def fft( + x: ndarray, + /, + xp, + *, + n: Optional[int] = None, + axis: int = -1, + norm: Literal["backward", "ortho", "forward"] = "backward", +) -> ndarray: + res = xp.fft.fft(x, n=n, axis=axis, norm=norm) + if x.dtype in [xp.float32, xp.complex64]: + return res.astype(xp.complex64) + return res + +def ifft( + x: ndarray, + /, + xp, + *, + n: Optional[int] = None, + axis: int = -1, + norm: Literal["backward", "ortho", "forward"] = "backward", +) -> ndarray: + res = xp.fft.ifft(x, n=n, axis=axis, norm=norm) + if x.dtype in [xp.float32, xp.complex64]: + return res.astype(xp.complex64) + return res + +def fftn( + x: ndarray, + /, + xp, + *, + s: Sequence[int] = None, + axes: Sequence[int] = None, + norm: Literal["backward", "ortho", "forward"] = "backward", +) -> ndarray: + res = xp.fft.fftn(x, s=s, axes=axes, norm=norm) + if x.dtype in [xp.float32, xp.complex64]: + return res.astype(xp.complex64) + return res + +def ifftn( + x: ndarray, + /, + xp, + *, + s: Sequence[int] = None, + axes: Sequence[int] = None, + norm: Literal["backward", "ortho", "forward"] = "backward", +) -> ndarray: + res = xp.fft.ifftn(x, s=s, axes=axes, norm=norm) + if x.dtype in [xp.float32, xp.complex64]: + return res.astype(xp.complex64) + return res + +def rfft( + x: ndarray, + /, + xp, + *, + n: Optional[int] = None, + axis: int = -1, + norm: Literal["backward", "ortho", "forward"] = "backward", +) -> ndarray: + res = xp.fft.rfft(x, n=n, axis=axis, norm=norm) + if x.dtype == xp.float32: + return res.astype(xp.complex64) + return res + +def irfft( + x: ndarray, + /, + xp, + *, + n: Optional[int] = None, + axis: int = -1, + norm: Literal["backward", "ortho", "forward"] = "backward", +) -> ndarray: + res = xp.fft.irfft(x, n=n, axis=axis, norm=norm) + if x.dtype == xp.complex64: + return res.astype(xp.float32) + return res + +def rfftn( + x: ndarray, + /, + xp, + *, + s: Sequence[int] = None, + axes: Sequence[int] = None, + norm: Literal["backward", "ortho", "forward"] = "backward", +) -> ndarray: + res = xp.fft.rfftn(x, s=s, axes=axes, norm=norm) + if x.dtype == xp.float32: + return res.astype(xp.complex64) + return res + +def irfftn( + x: ndarray, + /, + xp, + *, + s: Sequence[int] = None, + axes: Sequence[int] = None, + norm: Literal["backward", "ortho", "forward"] = "backward", +) -> ndarray: + res = xp.fft.irfftn(x, s=s, axes=axes, norm=norm) + if x.dtype == xp.complex64: + return res.astype(xp.float32) + return res + +def hfft( + x: ndarray, + /, + xp, + *, + n: Optional[int] = None, + axis: int = -1, + norm: Literal["backward", "ortho", "forward"] = "backward", +) -> ndarray: + res = xp.fft.hfft(x, n=n, axis=axis, norm=norm) + if x.dtype in [xp.float32, xp.complex64]: + return res.astype(xp.float32) + return res + +def ihfft( + x: ndarray, + /, + xp, + *, + n: Optional[int] = None, + axis: int = -1, + norm: Literal["backward", "ortho", "forward"] = "backward", +) -> ndarray: + res = xp.fft.ihfft(x, n=n, axis=axis, norm=norm) + if x.dtype in [xp.float32, xp.complex64]: + return res.astype(xp.complex64) + return res + +def fftfreq(n: int, /, xp, *, d: float = 1.0, device: Optional[Device] = None) -> ndarray: + if device not in ["cpu", None]: + raise ValueError(f"Unsupported device {device!r}") + return xp.fft.fftfreq(n, d=d) + +def rfftfreq(n: int, /, xp, *, d: float = 1.0, device: Optional[Device] = None) -> ndarray: + if device not in ["cpu", None]: + raise ValueError(f"Unsupported device {device!r}") + return xp.fft.rfftfreq(n, d=d) + +def fftshift(x: ndarray, /, xp, *, axes: Union[int, Sequence[int]] = None) -> ndarray: + return xp.fft.fftshift(x, axes=axes) + +def ifftshift(x: ndarray, /, xp, *, axes: Union[int, Sequence[int]] = None) -> ndarray: + return xp.fft.ifftshift(x, axes=axes) + +__all__ = [ + "fft", + "ifft", + "fftn", + "ifftn", + "rfft", + "irfft", + "rfftn", + "irfftn", + "hfft", + "ihfft", + "fftfreq", + "rfftfreq", + "fftshift", + "ifftshift", +] diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_helpers.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..25419c01c2a4870d15eb8806b92af41723814446 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_helpers.py @@ -0,0 +1,515 @@ +""" +Various helper functions which are not part of the spec. + +Functions which start with an underscore are for internal use only but helpers +that are in __all__ are intended as additional helper functions for use by end +users of the compat library. +""" +from __future__ import annotations + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from typing import Optional, Union, Any + from ._typing import Array, Device + +import sys +import math +import inspect +import warnings + +def is_numpy_array(x): + """ + Return True if `x` is a NumPy array. + + This function does not import NumPy if it has not already been imported + and is therefore cheap to use. + + This also returns True for `ndarray` subclasses and NumPy scalar objects. + + See Also + -------- + + array_namespace + is_array_api_obj + is_cupy_array + is_torch_array + is_dask_array + is_jax_array + """ + # Avoid importing NumPy if it isn't already + if 'numpy' not in sys.modules: + return False + + import numpy as np + + # TODO: Should we reject ndarray subclasses? + return isinstance(x, (np.ndarray, np.generic)) + +def is_cupy_array(x): + """ + Return True if `x` is a CuPy array. + + This function does not import CuPy if it has not already been imported + and is therefore cheap to use. + + This also returns True for `cupy.ndarray` subclasses and CuPy scalar objects. + + See Also + -------- + + array_namespace + is_array_api_obj + is_numpy_array + is_torch_array + is_dask_array + is_jax_array + """ + # Avoid importing NumPy if it isn't already + if 'cupy' not in sys.modules: + return False + + import cupy as cp + + # TODO: Should we reject ndarray subclasses? + return isinstance(x, (cp.ndarray, cp.generic)) + +def is_torch_array(x): + """ + Return True if `x` is a PyTorch tensor. + + This function does not import PyTorch if it has not already been imported + and is therefore cheap to use. + + See Also + -------- + + array_namespace + is_array_api_obj + is_numpy_array + is_cupy_array + is_dask_array + is_jax_array + """ + # Avoid importing torch if it isn't already + if 'torch' not in sys.modules: + return False + + import torch + + # TODO: Should we reject ndarray subclasses? + return isinstance(x, torch.Tensor) + +def is_dask_array(x): + """ + Return True if `x` is a dask.array Array. + + This function does not import dask if it has not already been imported + and is therefore cheap to use. + + See Also + -------- + + array_namespace + is_array_api_obj + is_numpy_array + is_cupy_array + is_torch_array + is_jax_array + """ + # Avoid importing dask if it isn't already + if 'dask.array' not in sys.modules: + return False + + import dask.array + + return isinstance(x, dask.array.Array) + +def is_jax_array(x): + """ + Return True if `x` is a JAX array. + + This function does not import JAX if it has not already been imported + and is therefore cheap to use. + + + See Also + -------- + + array_namespace + is_array_api_obj + is_numpy_array + is_cupy_array + is_torch_array + is_dask_array + """ + # Avoid importing jax if it isn't already + if 'jax' not in sys.modules: + return False + + import jax + + return isinstance(x, jax.Array) + +def is_array_api_obj(x): + """ + Return True if `x` is an array API compatible array object. + + See Also + -------- + + array_namespace + is_numpy_array + is_cupy_array + is_torch_array + is_dask_array + is_jax_array + """ + return is_numpy_array(x) \ + or is_cupy_array(x) \ + or is_torch_array(x) \ + or is_dask_array(x) \ + or is_jax_array(x) \ + or hasattr(x, '__array_namespace__') + +def _check_api_version(api_version): + if api_version == '2021.12': + warnings.warn("The 2021.12 version of the array API specification was requested but the returned namespace is actually version 2022.12") + elif api_version is not None and api_version != '2022.12': + raise ValueError("Only the 2022.12 version of the array API specification is currently supported") + +def array_namespace(*xs, api_version=None, _use_compat=True): + """ + Get the array API compatible namespace for the arrays `xs`. + + Parameters + ---------- + xs: arrays + one or more arrays. + + api_version: str + The newest version of the spec that you need support for (currently + the compat library wrapped APIs support v2022.12). + + Returns + ------- + + out: namespace + The array API compatible namespace corresponding to the arrays in `xs`. + + Raises + ------ + TypeError + If `xs` contains arrays from different array libraries or contains a + non-array. + + + Typical usage is to pass the arguments of a function to + `array_namespace()` at the top of a function to get the corresponding + array API namespace: + + .. code:: python + + def your_function(x, y): + xp = array_api_compat.array_namespace(x, y) + # Now use xp as the array library namespace + return xp.mean(x, axis=0) + 2*xp.std(y, axis=0) + + + Wrapped array namespaces can also be imported directly. For example, + `array_namespace(np.array(...))` will return `array_api_compat.numpy`. + This function will also work for any array library not wrapped by + array-api-compat if it explicitly defines `__array_namespace__ + `__ + (the wrapped namespace is always preferred if it exists). + + See Also + -------- + + is_array_api_obj + is_numpy_array + is_cupy_array + is_torch_array + is_dask_array + is_jax_array + + """ + namespaces = set() + for x in xs: + if is_numpy_array(x): + _check_api_version(api_version) + if _use_compat: + from .. import numpy as numpy_namespace + namespaces.add(numpy_namespace) + else: + import numpy as np + namespaces.add(np) + elif is_cupy_array(x): + _check_api_version(api_version) + if _use_compat: + from .. import cupy as cupy_namespace + namespaces.add(cupy_namespace) + else: + import cupy as cp + namespaces.add(cp) + elif is_torch_array(x): + _check_api_version(api_version) + if _use_compat: + from .. import torch as torch_namespace + namespaces.add(torch_namespace) + else: + import torch + namespaces.add(torch) + elif is_dask_array(x): + _check_api_version(api_version) + if _use_compat: + from ..dask import array as dask_namespace + namespaces.add(dask_namespace) + else: + raise TypeError("_use_compat cannot be False if input array is a dask array!") + elif is_jax_array(x): + _check_api_version(api_version) + # jax.experimental.array_api is already an array namespace. We do + # not have a wrapper submodule for it. + import jax.experimental.array_api as jnp + namespaces.add(jnp) + elif hasattr(x, '__array_namespace__'): + namespaces.add(x.__array_namespace__(api_version=api_version)) + else: + # TODO: Support Python scalars? + raise TypeError(f"{type(x).__name__} is not a supported array type") + + if not namespaces: + raise TypeError("Unrecognized array input") + + if len(namespaces) != 1: + raise TypeError(f"Multiple namespaces for array inputs: {namespaces}") + + xp, = namespaces + + return xp + +# backwards compatibility alias +get_namespace = array_namespace + +def _check_device(xp, device): + if xp == sys.modules.get('numpy'): + if device not in ["cpu", None]: + raise ValueError(f"Unsupported device for NumPy: {device!r}") + +# Placeholder object to represent the dask device +# when the array backend is not the CPU. +# (since it is not easy to tell which device a dask array is on) +class _dask_device: + def __repr__(self): + return "DASK_DEVICE" + +_DASK_DEVICE = _dask_device() + +# device() is not on numpy.ndarray or dask.array and to_device() is not on numpy.ndarray +# or cupy.ndarray. They are not included in array objects of this library +# because this library just reuses the respective ndarray classes without +# wrapping or subclassing them. These helper functions can be used instead of +# the wrapper functions for libraries that need to support both NumPy/CuPy and +# other libraries that use devices. +def device(x: Array, /) -> Device: + """ + Hardware device the array data resides on. + + This is equivalent to `x.device` according to the `standard + `__. + This helper is included because some array libraries either do not have + the `device` attribute or include it with an incompatible API. + + Parameters + ---------- + x: array + array instance from an array API compatible library. + + Returns + ------- + out: device + a ``device`` object (see the `Device Support `__ + section of the array API specification). + + Notes + ----- + + For NumPy the device is always `"cpu"`. For Dask, the device is always a + special `DASK_DEVICE` object. + + See Also + -------- + + to_device : Move array data to a different device. + + """ + if is_numpy_array(x): + return "cpu" + elif is_dask_array(x): + # Peek at the metadata of the jax array to determine type + try: + import numpy as np + if isinstance(x._meta, np.ndarray): + # Must be on CPU since backed by numpy + return "cpu" + except ImportError: + pass + return _DASK_DEVICE + elif is_jax_array(x): + # JAX has .device() as a method, but it is being deprecated so that it + # can become a property, in accordance with the standard. In order for + # this function to not break when JAX makes the flip, we check for + # both here. + if inspect.ismethod(x.device): + return x.device() + else: + return x.device + return x.device + +# Based on cupy.array_api.Array.to_device +def _cupy_to_device(x, device, /, stream=None): + import cupy as cp + from cupy.cuda import Device as _Device + from cupy.cuda import stream as stream_module + from cupy_backends.cuda.api import runtime + + if device == x.device: + return x + elif device == "cpu": + # allowing us to use `to_device(x, "cpu")` + # is useful for portable test swapping between + # host and device backends + return x.get() + elif not isinstance(device, _Device): + raise ValueError(f"Unsupported device {device!r}") + else: + # see cupy/cupy#5985 for the reason how we handle device/stream here + prev_device = runtime.getDevice() + prev_stream: stream_module.Stream = None + if stream is not None: + prev_stream = stream_module.get_current_stream() + # stream can be an int as specified in __dlpack__, or a CuPy stream + if isinstance(stream, int): + stream = cp.cuda.ExternalStream(stream) + elif isinstance(stream, cp.cuda.Stream): + pass + else: + raise ValueError('the input stream is not recognized') + stream.use() + try: + runtime.setDevice(device.id) + arr = x.copy() + finally: + runtime.setDevice(prev_device) + if stream is not None: + prev_stream.use() + return arr + +def _torch_to_device(x, device, /, stream=None): + if stream is not None: + raise NotImplementedError + return x.to(device) + +def to_device(x: Array, device: Device, /, *, stream: Optional[Union[int, Any]] = None) -> Array: + """ + Copy the array from the device on which it currently resides to the specified ``device``. + + This is equivalent to `x.to_device(device, stream=stream)` according to + the `standard + `__. + This helper is included because some array libraries do not have the + `to_device` method. + + Parameters + ---------- + + x: array + array instance from an array API compatible library. + + device: device + a ``device`` object (see the `Device Support `__ + section of the array API specification). + + stream: Optional[Union[int, Any]] + stream object to use during copy. In addition to the types supported + in ``array.__dlpack__``, implementations may choose to support any + library-specific stream object with the caveat that any code using + such an object would not be portable. + + Returns + ------- + + out: array + an array with the same data and data type as ``x`` and located on the + specified ``device``. + + Notes + ----- + + For NumPy, this function effectively does nothing since the only supported + device is the CPU. For CuPy, this method supports CuPy CUDA + :external+cupy:class:`Device ` and + :external+cupy:class:`Stream ` objects. For PyTorch, + this is the same as :external+torch:meth:`x.to(device) ` + (the ``stream`` argument is not supported in PyTorch). + + See Also + -------- + + device : Hardware device the array data resides on. + + """ + if is_numpy_array(x): + if stream is not None: + raise ValueError("The stream argument to to_device() is not supported") + if device == 'cpu': + return x + raise ValueError(f"Unsupported device {device!r}") + elif is_cupy_array(x): + # cupy does not yet have to_device + return _cupy_to_device(x, device, stream=stream) + elif is_torch_array(x): + return _torch_to_device(x, device, stream=stream) + elif is_dask_array(x): + if stream is not None: + raise ValueError("The stream argument to to_device() is not supported") + # TODO: What if our array is on the GPU already? + if device == 'cpu': + return x + raise ValueError(f"Unsupported device {device!r}") + elif is_jax_array(x): + # This import adds to_device to x + import jax.experimental.array_api # noqa: F401 + return x.to_device(device, stream=stream) + return x.to_device(device, stream=stream) + +def size(x): + """ + Return the total number of elements of x. + + This is equivalent to `x.size` according to the `standard + `__. + This helper is included because PyTorch defines `size` in an + :external+torch:meth:`incompatible way `. + + """ + if None in x.shape: + return None + return math.prod(x.shape) + +__all__ = [ + "array_namespace", + "device", + "get_namespace", + "is_array_api_obj", + "is_cupy_array", + "is_dask_array", + "is_jax_array", + "is_numpy_array", + "is_torch_array", + "size", + "to_device", +] + +_all_ignore = ['sys', 'math', 'inspect', 'warnings'] diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_linalg.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_linalg.py new file mode 100644 index 0000000000000000000000000000000000000000..dc2b69d87b826717a6bea9dc27610b93a6908061 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_linalg.py @@ -0,0 +1,161 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING, NamedTuple +if TYPE_CHECKING: + from typing import Literal, Optional, Tuple, Union + from ._typing import ndarray + +import math + +import numpy as np +if np.__version__[0] == "2": + from numpy.lib.array_utils import normalize_axis_tuple +else: + from numpy.core.numeric import normalize_axis_tuple + +from ._aliases import matmul, matrix_transpose, tensordot, vecdot, isdtype +from .._internal import get_xp + +# These are in the main NumPy namespace but not in numpy.linalg +def cross(x1: ndarray, x2: ndarray, /, xp, *, axis: int = -1, **kwargs) -> ndarray: + return xp.cross(x1, x2, axis=axis, **kwargs) + +def outer(x1: ndarray, x2: ndarray, /, xp, **kwargs) -> ndarray: + return xp.outer(x1, x2, **kwargs) + +class EighResult(NamedTuple): + eigenvalues: ndarray + eigenvectors: ndarray + +class QRResult(NamedTuple): + Q: ndarray + R: ndarray + +class SlogdetResult(NamedTuple): + sign: ndarray + logabsdet: ndarray + +class SVDResult(NamedTuple): + U: ndarray + S: ndarray + Vh: ndarray + +# These functions are the same as their NumPy counterparts except they return +# a namedtuple. +def eigh(x: ndarray, /, xp, **kwargs) -> EighResult: + return EighResult(*xp.linalg.eigh(x, **kwargs)) + +def qr(x: ndarray, /, xp, *, mode: Literal['reduced', 'complete'] = 'reduced', + **kwargs) -> QRResult: + return QRResult(*xp.linalg.qr(x, mode=mode, **kwargs)) + +def slogdet(x: ndarray, /, xp, **kwargs) -> SlogdetResult: + return SlogdetResult(*xp.linalg.slogdet(x, **kwargs)) + +def svd(x: ndarray, /, xp, *, full_matrices: bool = True, **kwargs) -> SVDResult: + return SVDResult(*xp.linalg.svd(x, full_matrices=full_matrices, **kwargs)) + +# These functions have additional keyword arguments + +# The upper keyword argument is new from NumPy +def cholesky(x: ndarray, /, xp, *, upper: bool = False, **kwargs) -> ndarray: + L = xp.linalg.cholesky(x, **kwargs) + if upper: + U = get_xp(xp)(matrix_transpose)(L) + if get_xp(xp)(isdtype)(U.dtype, 'complex floating'): + U = xp.conj(U) + return U + return L + +# The rtol keyword argument of matrix_rank() and pinv() is new from NumPy. +# Note that it has a different semantic meaning from tol and rcond. +def matrix_rank(x: ndarray, + /, + xp, + *, + rtol: Optional[Union[float, ndarray]] = None, + **kwargs) -> ndarray: + # this is different from xp.linalg.matrix_rank, which supports 1 + # dimensional arrays. + if x.ndim < 2: + raise xp.linalg.LinAlgError("1-dimensional array given. Array must be at least two-dimensional") + S = get_xp(xp)(svdvals)(x, **kwargs) + if rtol is None: + tol = S.max(axis=-1, keepdims=True) * max(x.shape[-2:]) * xp.finfo(S.dtype).eps + else: + # this is different from xp.linalg.matrix_rank, which does not + # multiply the tolerance by the largest singular value. + tol = S.max(axis=-1, keepdims=True)*xp.asarray(rtol)[..., xp.newaxis] + return xp.count_nonzero(S > tol, axis=-1) + +def pinv(x: ndarray, /, xp, *, rtol: Optional[Union[float, ndarray]] = None, **kwargs) -> ndarray: + # this is different from xp.linalg.pinv, which does not multiply the + # default tolerance by max(M, N). + if rtol is None: + rtol = max(x.shape[-2:]) * xp.finfo(x.dtype).eps + return xp.linalg.pinv(x, rcond=rtol, **kwargs) + +# These functions are new in the array API spec + +def matrix_norm(x: ndarray, /, xp, *, keepdims: bool = False, ord: Optional[Union[int, float, Literal['fro', 'nuc']]] = 'fro') -> ndarray: + return xp.linalg.norm(x, axis=(-2, -1), keepdims=keepdims, ord=ord) + +# svdvals is not in NumPy (but it is in SciPy). It is equivalent to +# xp.linalg.svd(compute_uv=False). +def svdvals(x: ndarray, /, xp) -> Union[ndarray, Tuple[ndarray, ...]]: + return xp.linalg.svd(x, compute_uv=False) + +def vector_norm(x: ndarray, /, xp, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False, ord: Optional[Union[int, float]] = 2) -> ndarray: + # xp.linalg.norm tries to do a matrix norm whenever axis is a 2-tuple or + # when axis=None and the input is 2-D, so to force a vector norm, we make + # it so the input is 1-D (for axis=None), or reshape so that norm is done + # on a single dimension. + if axis is None: + # Note: xp.linalg.norm() doesn't handle 0-D arrays + _x = x.ravel() + _axis = 0 + elif isinstance(axis, tuple): + # Note: The axis argument supports any number of axes, whereas + # xp.linalg.norm() only supports a single axis for vector norm. + normalized_axis = normalize_axis_tuple(axis, x.ndim) + rest = tuple(i for i in range(x.ndim) if i not in normalized_axis) + newshape = axis + rest + _x = xp.transpose(x, newshape).reshape( + (math.prod([x.shape[i] for i in axis]), *[x.shape[i] for i in rest])) + _axis = 0 + else: + _x = x + _axis = axis + + res = xp.linalg.norm(_x, axis=_axis, ord=ord) + + if keepdims: + # We can't reuse xp.linalg.norm(keepdims) because of the reshape hacks + # above to avoid matrix norm logic. + shape = list(x.shape) + _axis = normalize_axis_tuple(range(x.ndim) if axis is None else axis, x.ndim) + for i in _axis: + shape[i] = 1 + res = xp.reshape(res, tuple(shape)) + + return res + +# xp.diagonal and xp.trace operate on the first two axes whereas these +# operates on the last two + +def diagonal(x: ndarray, /, xp, *, offset: int = 0, **kwargs) -> ndarray: + return xp.diagonal(x, offset=offset, axis1=-2, axis2=-1, **kwargs) + +def trace(x: ndarray, /, xp, *, offset: int = 0, dtype=None, **kwargs) -> ndarray: + if dtype is None: + if x.dtype == xp.float32: + dtype = xp.float64 + elif x.dtype == xp.complex64: + dtype = xp.complex128 + return xp.asarray(xp.trace(x, offset=offset, dtype=dtype, axis1=-2, axis2=-1, **kwargs)) + +__all__ = ['cross', 'matmul', 'outer', 'tensordot', 'EighResult', + 'QRResult', 'SlogdetResult', 'SVDResult', 'eigh', 'qr', 'slogdet', + 'svd', 'cholesky', 'matrix_rank', 'pinv', 'matrix_norm', + 'matrix_transpose', 'svdvals', 'vecdot', 'vector_norm', 'diagonal', + 'trace'] diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_typing.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_typing.py new file mode 100644 index 0000000000000000000000000000000000000000..07f3850d21fade94814f9fe1e638286c72a1c552 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_typing.py @@ -0,0 +1,23 @@ +from __future__ import annotations + +__all__ = [ + "NestedSequence", + "SupportsBufferProtocol", +] + +from typing import ( + Any, + TypeVar, + Protocol, +) + +_T_co = TypeVar("_T_co", covariant=True) + +class NestedSequence(Protocol[_T_co]): + def __getitem__(self, key: int, /) -> _T_co | NestedSequence[_T_co]: ... + def __len__(self, /) -> int: ... + +SupportsBufferProtocol = Any + +Array = Any +Device = Any diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__init__.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7968d68d3d0e9d7fd1cccd72a4980217bc54124a --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__init__.py @@ -0,0 +1,16 @@ +from cupy import * # noqa: F403 + +# from cupy import * doesn't overwrite these builtin names +from cupy import abs, max, min, round # noqa: F401 + +# These imports may overwrite names from the import * above. +from ._aliases import * # noqa: F403 + +# See the comment in the numpy __init__.py +__import__(__package__ + '.linalg') + +__import__(__package__ + '.fft') + +from ..common._helpers import * # noqa: F401,F403 + +__array_api_version__ = '2022.12' diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/__init__.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b49ce6016a0880a1c0f58679268aaf6834503f6 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/__init__.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/_aliases.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/_aliases.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e2035bf5e33ed5b09520817319600119fc892d68 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/_aliases.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/_typing.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/_typing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd53c463672432bd057cbdd41744baeba280bf22 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/_typing.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/fft.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/fft.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a6c0774a26408b960587586c4c29b6161eb7433f Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/fft.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/linalg.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/linalg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..beb982c6529bd8652fc397ddea8c23a921a0ed13 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/linalg.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/_aliases.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/_aliases.py new file mode 100644 index 0000000000000000000000000000000000000000..b9364ac691ab734bd6703d51a7a30dab13e545c4 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/_aliases.py @@ -0,0 +1,81 @@ +from __future__ import annotations + +from functools import partial + +import cupy as cp + +from ..common import _aliases +from .._internal import get_xp + +asarray = asarray_cupy = partial(_aliases._asarray, namespace='cupy') +asarray.__doc__ = _aliases._asarray.__doc__ +del partial + +bool = cp.bool_ + +# Basic renames +acos = cp.arccos +acosh = cp.arccosh +asin = cp.arcsin +asinh = cp.arcsinh +atan = cp.arctan +atan2 = cp.arctan2 +atanh = cp.arctanh +bitwise_left_shift = cp.left_shift +bitwise_invert = cp.invert +bitwise_right_shift = cp.right_shift +concat = cp.concatenate +pow = cp.power + +arange = get_xp(cp)(_aliases.arange) +empty = get_xp(cp)(_aliases.empty) +empty_like = get_xp(cp)(_aliases.empty_like) +eye = get_xp(cp)(_aliases.eye) +full = get_xp(cp)(_aliases.full) +full_like = get_xp(cp)(_aliases.full_like) +linspace = get_xp(cp)(_aliases.linspace) +ones = get_xp(cp)(_aliases.ones) +ones_like = get_xp(cp)(_aliases.ones_like) +zeros = get_xp(cp)(_aliases.zeros) +zeros_like = get_xp(cp)(_aliases.zeros_like) +UniqueAllResult = get_xp(cp)(_aliases.UniqueAllResult) +UniqueCountsResult = get_xp(cp)(_aliases.UniqueCountsResult) +UniqueInverseResult = get_xp(cp)(_aliases.UniqueInverseResult) +unique_all = get_xp(cp)(_aliases.unique_all) +unique_counts = get_xp(cp)(_aliases.unique_counts) +unique_inverse = get_xp(cp)(_aliases.unique_inverse) +unique_values = get_xp(cp)(_aliases.unique_values) +astype = _aliases.astype +std = get_xp(cp)(_aliases.std) +var = get_xp(cp)(_aliases.var) +permute_dims = get_xp(cp)(_aliases.permute_dims) +reshape = get_xp(cp)(_aliases.reshape) +argsort = get_xp(cp)(_aliases.argsort) +sort = get_xp(cp)(_aliases.sort) +nonzero = get_xp(cp)(_aliases.nonzero) +sum = get_xp(cp)(_aliases.sum) +prod = get_xp(cp)(_aliases.prod) +ceil = get_xp(cp)(_aliases.ceil) +floor = get_xp(cp)(_aliases.floor) +trunc = get_xp(cp)(_aliases.trunc) +matmul = get_xp(cp)(_aliases.matmul) +matrix_transpose = get_xp(cp)(_aliases.matrix_transpose) +tensordot = get_xp(cp)(_aliases.tensordot) + +# These functions are completely new here. If the library already has them +# (i.e., numpy 2.0), use the library version instead of our wrapper. +if hasattr(cp, 'vecdot'): + vecdot = cp.vecdot +else: + vecdot = get_xp(cp)(_aliases.vecdot) +if hasattr(cp, 'isdtype'): + isdtype = cp.isdtype +else: + isdtype = get_xp(cp)(_aliases.isdtype) + +__all__ = _aliases.__all__ + ['asarray', 'asarray_cupy', 'bool', 'acos', + 'acosh', 'asin', 'asinh', 'atan', 'atan2', + 'atanh', 'bitwise_left_shift', 'bitwise_invert', + 'bitwise_right_shift', 'concat', 'pow'] + +_all_ignore = ['cp', 'get_xp'] diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/_typing.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/_typing.py new file mode 100644 index 0000000000000000000000000000000000000000..f3d9aab67e52f3300cd96c3d0e701d1604eaccbb --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/_typing.py @@ -0,0 +1,46 @@ +from __future__ import annotations + +__all__ = [ + "ndarray", + "Device", + "Dtype", +] + +import sys +from typing import ( + Union, + TYPE_CHECKING, +) + +from cupy import ( + ndarray, + dtype, + int8, + int16, + int32, + int64, + uint8, + uint16, + uint32, + uint64, + float32, + float64, +) + +from cupy.cuda.device import Device + +if TYPE_CHECKING or sys.version_info >= (3, 9): + Dtype = dtype[Union[ + int8, + int16, + int32, + int64, + uint8, + uint16, + uint32, + uint64, + float32, + float64, + ]] +else: + Dtype = dtype diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/fft.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/fft.py new file mode 100644 index 0000000000000000000000000000000000000000..307e0f7277710693063ef8c4d2cd7893275ad44a --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/fft.py @@ -0,0 +1,36 @@ +from cupy.fft import * # noqa: F403 +# cupy.fft doesn't have __all__. If it is added, replace this with +# +# from cupy.fft import __all__ as linalg_all +_n = {} +exec('from cupy.fft import *', _n) +del _n['__builtins__'] +fft_all = list(_n) +del _n + +from ..common import _fft +from .._internal import get_xp + +import cupy as cp + +fft = get_xp(cp)(_fft.fft) +ifft = get_xp(cp)(_fft.ifft) +fftn = get_xp(cp)(_fft.fftn) +ifftn = get_xp(cp)(_fft.ifftn) +rfft = get_xp(cp)(_fft.rfft) +irfft = get_xp(cp)(_fft.irfft) +rfftn = get_xp(cp)(_fft.rfftn) +irfftn = get_xp(cp)(_fft.irfftn) +hfft = get_xp(cp)(_fft.hfft) +ihfft = get_xp(cp)(_fft.ihfft) +fftfreq = get_xp(cp)(_fft.fftfreq) +rfftfreq = get_xp(cp)(_fft.rfftfreq) +fftshift = get_xp(cp)(_fft.fftshift) +ifftshift = get_xp(cp)(_fft.ifftshift) + +__all__ = fft_all + _fft.__all__ + +del get_xp +del cp +del fft_all +del _fft diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/linalg.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/linalg.py new file mode 100644 index 0000000000000000000000000000000000000000..7fcdd498e0073ada094a20a9ae423e01cb0f8ceb --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/linalg.py @@ -0,0 +1,49 @@ +from cupy.linalg import * # noqa: F403 +# cupy.linalg doesn't have __all__. If it is added, replace this with +# +# from cupy.linalg import __all__ as linalg_all +_n = {} +exec('from cupy.linalg import *', _n) +del _n['__builtins__'] +linalg_all = list(_n) +del _n + +from ..common import _linalg +from .._internal import get_xp + +import cupy as cp + +# These functions are in both the main and linalg namespaces +from ._aliases import matmul, matrix_transpose, tensordot, vecdot # noqa: F401 + +cross = get_xp(cp)(_linalg.cross) +outer = get_xp(cp)(_linalg.outer) +EighResult = _linalg.EighResult +QRResult = _linalg.QRResult +SlogdetResult = _linalg.SlogdetResult +SVDResult = _linalg.SVDResult +eigh = get_xp(cp)(_linalg.eigh) +qr = get_xp(cp)(_linalg.qr) +slogdet = get_xp(cp)(_linalg.slogdet) +svd = get_xp(cp)(_linalg.svd) +cholesky = get_xp(cp)(_linalg.cholesky) +matrix_rank = get_xp(cp)(_linalg.matrix_rank) +pinv = get_xp(cp)(_linalg.pinv) +matrix_norm = get_xp(cp)(_linalg.matrix_norm) +svdvals = get_xp(cp)(_linalg.svdvals) +diagonal = get_xp(cp)(_linalg.diagonal) +trace = get_xp(cp)(_linalg.trace) + +# These functions are completely new here. If the library already has them +# (i.e., numpy 2.0), use the library version instead of our wrapper. +if hasattr(cp.linalg, 'vector_norm'): + vector_norm = cp.linalg.vector_norm +else: + vector_norm = get_xp(cp)(_linalg.vector_norm) + +__all__ = linalg_all + _linalg.__all__ + +del get_xp +del cp +del linalg_all +del _linalg diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/dask/__init__.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/dask/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/dask/__pycache__/__init__.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/dask/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..38ac22c2d1daf5d63b724bd468db6053f18e742b Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/dask/__pycache__/__init__.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/dask/array/__init__.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/dask/array/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..03e0cd7239cb882423a2fcdcdbfd4781cdf04612 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/dask/array/__init__.py @@ -0,0 +1,8 @@ +from dask.array import * # noqa: F403 + +# These imports may overwrite names from the import * above. +from ._aliases import * # noqa: F403 + +__array_api_version__ = '2022.12' + +__import__(__package__ + '.linalg') diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/dask/array/__pycache__/__init__.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/dask/array/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ede59a98791c7618d0e318230fce5c9425d2f52b Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/dask/array/__pycache__/__init__.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/dask/array/__pycache__/_aliases.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/dask/array/__pycache__/_aliases.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd9c6989b93baba2237c4a2ebf7ccb381f3b302f Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/dask/array/__pycache__/_aliases.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/dask/array/__pycache__/linalg.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/dask/array/__pycache__/linalg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ded84bcb22c2e36278ee51c9103a1d97f47dd302 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/dask/array/__pycache__/linalg.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/dask/array/_aliases.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/dask/array/_aliases.py new file mode 100644 index 0000000000000000000000000000000000000000..94d938a40ab877fd1f039d906f5f771e0320a248 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/dask/array/_aliases.py @@ -0,0 +1,146 @@ +from __future__ import annotations + +from ...common import _aliases +from ...common._helpers import _check_device + +from ..._internal import get_xp + +import numpy as np +from numpy import ( + # Constants + e, + inf, + nan, + pi, + newaxis, + # Dtypes + bool_ as bool, + float32, + float64, + int8, + int16, + int32, + int64, + uint8, + uint16, + uint32, + uint64, + complex64, + complex128, + iinfo, + finfo, + can_cast, + result_type, +) + +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from typing import Optional, Union + + from ...common._typing import Device, Dtype, Array + +import dask.array as da + +isdtype = get_xp(np)(_aliases.isdtype) +astype = _aliases.astype + +# Common aliases + +# This arange func is modified from the common one to +# not pass stop/step as keyword arguments, which will cause +# an error with dask + +# TODO: delete the xp stuff, it shouldn't be necessary +def _dask_arange( + start: Union[int, float], + /, + stop: Optional[Union[int, float]] = None, + step: Union[int, float] = 1, + *, + xp, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + **kwargs, +) -> Array: + _check_device(xp, device) + args = [start] + if stop is not None: + args.append(stop) + else: + # stop is None, so start is actually stop + # prepend the default value for start which is 0 + args.insert(0, 0) + args.append(step) + return xp.arange(*args, dtype=dtype, **kwargs) + +arange = get_xp(da)(_dask_arange) +eye = get_xp(da)(_aliases.eye) + +from functools import partial +asarray = partial(_aliases._asarray, namespace='dask.array') +asarray.__doc__ = _aliases._asarray.__doc__ + +linspace = get_xp(da)(_aliases.linspace) +eye = get_xp(da)(_aliases.eye) +UniqueAllResult = get_xp(da)(_aliases.UniqueAllResult) +UniqueCountsResult = get_xp(da)(_aliases.UniqueCountsResult) +UniqueInverseResult = get_xp(da)(_aliases.UniqueInverseResult) +unique_all = get_xp(da)(_aliases.unique_all) +unique_counts = get_xp(da)(_aliases.unique_counts) +unique_inverse = get_xp(da)(_aliases.unique_inverse) +unique_values = get_xp(da)(_aliases.unique_values) +permute_dims = get_xp(da)(_aliases.permute_dims) +std = get_xp(da)(_aliases.std) +var = get_xp(da)(_aliases.var) +empty = get_xp(da)(_aliases.empty) +empty_like = get_xp(da)(_aliases.empty_like) +full = get_xp(da)(_aliases.full) +full_like = get_xp(da)(_aliases.full_like) +ones = get_xp(da)(_aliases.ones) +ones_like = get_xp(da)(_aliases.ones_like) +zeros = get_xp(da)(_aliases.zeros) +zeros_like = get_xp(da)(_aliases.zeros_like) +reshape = get_xp(da)(_aliases.reshape) +matrix_transpose = get_xp(da)(_aliases.matrix_transpose) +vecdot = get_xp(da)(_aliases.vecdot) + +nonzero = get_xp(da)(_aliases.nonzero) +sum = get_xp(np)(_aliases.sum) +prod = get_xp(np)(_aliases.prod) +ceil = get_xp(np)(_aliases.ceil) +floor = get_xp(np)(_aliases.floor) +trunc = get_xp(np)(_aliases.trunc) +matmul = get_xp(np)(_aliases.matmul) +tensordot = get_xp(np)(_aliases.tensordot) + +from dask.array import ( + # Element wise aliases + arccos as acos, + arccosh as acosh, + arcsin as asin, + arcsinh as asinh, + arctan as atan, + arctan2 as atan2, + arctanh as atanh, + left_shift as bitwise_left_shift, + right_shift as bitwise_right_shift, + invert as bitwise_invert, + power as pow, + # Other + concatenate as concat, +) + +# exclude these from all since +_da_unsupported = ['sort', 'argsort'] + +common_aliases = [alias for alias in _aliases.__all__ if alias not in _da_unsupported] + +__all__ = common_aliases + ['asarray', 'bool', 'acos', + 'acosh', 'asin', 'asinh', 'atan', 'atan2', + 'atanh', 'bitwise_left_shift', 'bitwise_invert', + 'bitwise_right_shift', 'concat', 'pow', + 'e', 'inf', 'nan', 'pi', 'newaxis', 'float32', 'float64', 'int8', + 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64', + 'complex64', 'complex128', 'iinfo', 'finfo', 'can_cast', 'result_type'] + +_all_ignore = ['get_xp', 'da', 'partial', 'common_aliases', 'np'] diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/dask/array/linalg.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/dask/array/linalg.py new file mode 100644 index 0000000000000000000000000000000000000000..7f5b2c6e2840e8a0bc95c4daa6e7231ae5b9053a --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/dask/array/linalg.py @@ -0,0 +1,72 @@ +from __future__ import annotations + +from ...common import _linalg +from ..._internal import get_xp + +# Exports +from dask.array.linalg import * # noqa: F403 +from dask.array import trace, outer + +# These functions are in both the main and linalg namespaces +from dask.array import matmul, tensordot +from ._aliases import matrix_transpose, vecdot + +import dask.array as da + +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from ...common._typing import Array + from typing import Literal + +# dask.array.linalg doesn't have __all__. If it is added, replace this with +# +# from dask.array.linalg import __all__ as linalg_all +_n = {} +exec('from dask.array.linalg import *', _n) +del _n['__builtins__'] +if 'annotations' in _n: + del _n['annotations'] +linalg_all = list(_n) +del _n + +EighResult = _linalg.EighResult +QRResult = _linalg.QRResult +SlogdetResult = _linalg.SlogdetResult +SVDResult = _linalg.SVDResult +# TODO: use the QR wrapper once dask +# supports the mode keyword on QR +# https://github.com/dask/dask/issues/10388 +#qr = get_xp(da)(_linalg.qr) +def qr(x: Array, mode: Literal['reduced', 'complete'] = 'reduced', + **kwargs) -> QRResult: + if mode != "reduced": + raise ValueError("dask arrays only support using mode='reduced'") + return QRResult(*da.linalg.qr(x, **kwargs)) +cholesky = get_xp(da)(_linalg.cholesky) +matrix_rank = get_xp(da)(_linalg.matrix_rank) +matrix_norm = get_xp(da)(_linalg.matrix_norm) + + +# Wrap the svd functions to not pass full_matrices to dask +# when full_matrices=False (as that is the default behavior for dask), +# and dask doesn't have the full_matrices keyword +def svd(x: Array, full_matrices: bool = True, **kwargs) -> SVDResult: + if full_matrices: + raise ValueError("full_matrics=True is not supported by dask.") + return da.linalg.svd(x, coerce_signs=False, **kwargs) + +def svdvals(x: Array) -> Array: + # TODO: can't avoid computing U or V for dask + _, s, _ = svd(x) + return s + +vector_norm = get_xp(da)(_linalg.vector_norm) +diagonal = get_xp(da)(_linalg.diagonal) + +__all__ = linalg_all + ["trace", "outer", "matmul", "tensordot", + "matrix_transpose", "vecdot", "EighResult", + "QRResult", "SlogdetResult", "SVDResult", "qr", + "cholesky", "matrix_rank", "matrix_norm", "svdvals", + "vector_norm", "diagonal"] + +_all_ignore = ['get_xp', 'da', 'linalg_all'] diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__init__.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..879087094cb3894566a8c9be9d322e415df72ce6 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__init__.py @@ -0,0 +1,24 @@ +from numpy import * # noqa: F403 + +# from numpy import * doesn't overwrite these builtin names +from numpy import abs, max, min, round # noqa: F401 + +# These imports may overwrite names from the import * above. +from ._aliases import * # noqa: F403 + +# Don't know why, but we have to do an absolute import to import linalg. If we +# instead do +# +# from . import linalg +# +# It doesn't overwrite np.linalg from above. The import is generated +# dynamically so that the library can be vendored. +__import__(__package__ + '.linalg') + +__import__(__package__ + '.fft') + +from .linalg import matrix_transpose, vecdot # noqa: F401 + +from ..common._helpers import * # noqa: F403 + +__array_api_version__ = '2022.12' diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/__init__.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d199ebdb787fced7a417834f31af55a48e97374 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/__init__.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/_aliases.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/_aliases.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5628111395095fa682f9337acdb5bc9f0ada3a04 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/_aliases.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/_typing.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/_typing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e5fdf0bf36c7fb158c2f90e10c9536670783825d Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/_typing.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/fft.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/fft.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..37485aeea67b178b490b11c5a4fa9a07519b58b9 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/fft.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/linalg.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/linalg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0f933f7f5d08248478a91d3488411d540f62184 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/linalg.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/_aliases.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/_aliases.py new file mode 100644 index 0000000000000000000000000000000000000000..1201d798864d82c77e2bb95a5e664d3f79ee59c7 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/_aliases.py @@ -0,0 +1,81 @@ +from __future__ import annotations + +from functools import partial + +from ..common import _aliases + +from .._internal import get_xp + +asarray = asarray_numpy = partial(_aliases._asarray, namespace='numpy') +asarray.__doc__ = _aliases._asarray.__doc__ +del partial + +import numpy as np +bool = np.bool_ + +# Basic renames +acos = np.arccos +acosh = np.arccosh +asin = np.arcsin +asinh = np.arcsinh +atan = np.arctan +atan2 = np.arctan2 +atanh = np.arctanh +bitwise_left_shift = np.left_shift +bitwise_invert = np.invert +bitwise_right_shift = np.right_shift +concat = np.concatenate +pow = np.power + +arange = get_xp(np)(_aliases.arange) +empty = get_xp(np)(_aliases.empty) +empty_like = get_xp(np)(_aliases.empty_like) +eye = get_xp(np)(_aliases.eye) +full = get_xp(np)(_aliases.full) +full_like = get_xp(np)(_aliases.full_like) +linspace = get_xp(np)(_aliases.linspace) +ones = get_xp(np)(_aliases.ones) +ones_like = get_xp(np)(_aliases.ones_like) +zeros = get_xp(np)(_aliases.zeros) +zeros_like = get_xp(np)(_aliases.zeros_like) +UniqueAllResult = get_xp(np)(_aliases.UniqueAllResult) +UniqueCountsResult = get_xp(np)(_aliases.UniqueCountsResult) +UniqueInverseResult = get_xp(np)(_aliases.UniqueInverseResult) +unique_all = get_xp(np)(_aliases.unique_all) +unique_counts = get_xp(np)(_aliases.unique_counts) +unique_inverse = get_xp(np)(_aliases.unique_inverse) +unique_values = get_xp(np)(_aliases.unique_values) +astype = _aliases.astype +std = get_xp(np)(_aliases.std) +var = get_xp(np)(_aliases.var) +permute_dims = get_xp(np)(_aliases.permute_dims) +reshape = get_xp(np)(_aliases.reshape) +argsort = get_xp(np)(_aliases.argsort) +sort = get_xp(np)(_aliases.sort) +nonzero = get_xp(np)(_aliases.nonzero) +sum = get_xp(np)(_aliases.sum) +prod = get_xp(np)(_aliases.prod) +ceil = get_xp(np)(_aliases.ceil) +floor = get_xp(np)(_aliases.floor) +trunc = get_xp(np)(_aliases.trunc) +matmul = get_xp(np)(_aliases.matmul) +matrix_transpose = get_xp(np)(_aliases.matrix_transpose) +tensordot = get_xp(np)(_aliases.tensordot) + +# These functions are completely new here. If the library already has them +# (i.e., numpy 2.0), use the library version instead of our wrapper. +if hasattr(np, 'vecdot'): + vecdot = np.vecdot +else: + vecdot = get_xp(np)(_aliases.vecdot) +if hasattr(np, 'isdtype'): + isdtype = np.isdtype +else: + isdtype = get_xp(np)(_aliases.isdtype) + +__all__ = _aliases.__all__ + ['asarray', 'asarray_numpy', 'bool', 'acos', + 'acosh', 'asin', 'asinh', 'atan', 'atan2', + 'atanh', 'bitwise_left_shift', 'bitwise_invert', + 'bitwise_right_shift', 'concat', 'pow'] + +_all_ignore = ['np', 'get_xp'] diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/_typing.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/_typing.py new file mode 100644 index 0000000000000000000000000000000000000000..c5ebb5abb987572be625ee864a37e61126d36d8b --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/_typing.py @@ -0,0 +1,46 @@ +from __future__ import annotations + +__all__ = [ + "ndarray", + "Device", + "Dtype", +] + +import sys +from typing import ( + Literal, + Union, + TYPE_CHECKING, +) + +from numpy import ( + ndarray, + dtype, + int8, + int16, + int32, + int64, + uint8, + uint16, + uint32, + uint64, + float32, + float64, +) + +Device = Literal["cpu"] +if TYPE_CHECKING or sys.version_info >= (3, 9): + Dtype = dtype[Union[ + int8, + int16, + int32, + int64, + uint8, + uint16, + uint32, + uint64, + float32, + float64, + ]] +else: + Dtype = dtype diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/fft.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/fft.py new file mode 100644 index 0000000000000000000000000000000000000000..286675946e0fbb0aa18105d25db08ebbbd2e4d0c --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/fft.py @@ -0,0 +1,29 @@ +from numpy.fft import * # noqa: F403 +from numpy.fft import __all__ as fft_all + +from ..common import _fft +from .._internal import get_xp + +import numpy as np + +fft = get_xp(np)(_fft.fft) +ifft = get_xp(np)(_fft.ifft) +fftn = get_xp(np)(_fft.fftn) +ifftn = get_xp(np)(_fft.ifftn) +rfft = get_xp(np)(_fft.rfft) +irfft = get_xp(np)(_fft.irfft) +rfftn = get_xp(np)(_fft.rfftn) +irfftn = get_xp(np)(_fft.irfftn) +hfft = get_xp(np)(_fft.hfft) +ihfft = get_xp(np)(_fft.ihfft) +fftfreq = get_xp(np)(_fft.fftfreq) +rfftfreq = get_xp(np)(_fft.rfftfreq) +fftshift = get_xp(np)(_fft.fftshift) +ifftshift = get_xp(np)(_fft.ifftshift) + +__all__ = fft_all + _fft.__all__ + +del get_xp +del np +del fft_all +del _fft diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/linalg.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/linalg.py new file mode 100644 index 0000000000000000000000000000000000000000..8f01593bd0ae619b3bea471980b4eeabfc29f319 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/linalg.py @@ -0,0 +1,90 @@ +from numpy.linalg import * # noqa: F403 +from numpy.linalg import __all__ as linalg_all +import numpy as _np + +from ..common import _linalg +from .._internal import get_xp + +# These functions are in both the main and linalg namespaces +from ._aliases import matmul, matrix_transpose, tensordot, vecdot # noqa: F401 + +import numpy as np + +cross = get_xp(np)(_linalg.cross) +outer = get_xp(np)(_linalg.outer) +EighResult = _linalg.EighResult +QRResult = _linalg.QRResult +SlogdetResult = _linalg.SlogdetResult +SVDResult = _linalg.SVDResult +eigh = get_xp(np)(_linalg.eigh) +qr = get_xp(np)(_linalg.qr) +slogdet = get_xp(np)(_linalg.slogdet) +svd = get_xp(np)(_linalg.svd) +cholesky = get_xp(np)(_linalg.cholesky) +matrix_rank = get_xp(np)(_linalg.matrix_rank) +pinv = get_xp(np)(_linalg.pinv) +matrix_norm = get_xp(np)(_linalg.matrix_norm) +svdvals = get_xp(np)(_linalg.svdvals) +diagonal = get_xp(np)(_linalg.diagonal) +trace = get_xp(np)(_linalg.trace) + +# Note: unlike np.linalg.solve, the array API solve() only accepts x2 as a +# vector when it is exactly 1-dimensional. All other cases treat x2 as a stack +# of matrices. The np.linalg.solve behavior of allowing stacks of both +# matrices and vectors is ambiguous c.f. +# https://github.com/numpy/numpy/issues/15349 and +# https://github.com/data-apis/array-api/issues/285. + +# To workaround this, the below is the code from np.linalg.solve except +# only calling solve1 in the exactly 1D case. + +# This code is here instead of in common because it is numpy specific. Also +# note that CuPy's solve() does not currently support broadcasting (see +# https://github.com/cupy/cupy/blob/main/cupy/cublas.py#L43). +def solve(x1: _np.ndarray, x2: _np.ndarray, /) -> _np.ndarray: + try: + from numpy.linalg._linalg import ( + _makearray, _assert_stacked_2d, _assert_stacked_square, + _commonType, isComplexType, _raise_linalgerror_singular + ) + except ImportError: + from numpy.linalg.linalg import ( + _makearray, _assert_stacked_2d, _assert_stacked_square, + _commonType, isComplexType, _raise_linalgerror_singular + ) + from numpy.linalg import _umath_linalg + + x1, _ = _makearray(x1) + _assert_stacked_2d(x1) + _assert_stacked_square(x1) + x2, wrap = _makearray(x2) + t, result_t = _commonType(x1, x2) + + # This part is different from np.linalg.solve + if x2.ndim == 1: + gufunc = _umath_linalg.solve1 + else: + gufunc = _umath_linalg.solve + + # This does nothing currently but is left in because it will be relevant + # when complex dtype support is added to the spec in 2022. + signature = 'DD->D' if isComplexType(t) else 'dd->d' + with _np.errstate(call=_raise_linalgerror_singular, invalid='call', + over='ignore', divide='ignore', under='ignore'): + r = gufunc(x1, x2, signature=signature) + + return wrap(r.astype(result_t, copy=False)) + +# These functions are completely new here. If the library already has them +# (i.e., numpy 2.0), use the library version instead of our wrapper. +if hasattr(np.linalg, 'vector_norm'): + vector_norm = np.linalg.vector_norm +else: + vector_norm = get_xp(np)(_linalg.vector_norm) + +__all__ = linalg_all + _linalg.__all__ + ['solve'] + +del get_xp +del np +del linalg_all +del _linalg diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__init__.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..172f52792c231f6ce90145fa0dff7a687dd57089 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__init__.py @@ -0,0 +1,24 @@ +from torch import * # noqa: F403 + +# Several names are not included in the above import * +import torch +for n in dir(torch): + if (n.startswith('_') + or n.endswith('_') + or 'cuda' in n + or 'cpu' in n + or 'backward' in n): + continue + exec(n + ' = torch.' + n) + +# These imports may overwrite names from the import * above. +from ._aliases import * # noqa: F403 + +# See the comment in the numpy __init__.py +__import__(__package__ + '.linalg') + +__import__(__package__ + '.fft') + +from ..common._helpers import * # noqa: F403 + +__array_api_version__ = '2022.12' diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__pycache__/__init__.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f2c839a0f91fab0be4d629a272e756d84952be9 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__pycache__/__init__.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__pycache__/_aliases.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__pycache__/_aliases.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b42e6e86d1cb6151bc02efac9af5965451c2a44a Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__pycache__/_aliases.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__pycache__/fft.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__pycache__/fft.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..019ad6215918fb4bb7ffb80b44b3fb6c1d840495 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__pycache__/fft.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__pycache__/linalg.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__pycache__/linalg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1db071599799e16dd18a9c84d55ed7237554fb86 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__pycache__/linalg.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/_aliases.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/_aliases.py new file mode 100644 index 0000000000000000000000000000000000000000..fb53e0eeb072bda5d6fdc806442c4ed5437895b8 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/_aliases.py @@ -0,0 +1,718 @@ +from __future__ import annotations + +from functools import wraps as _wraps +from builtins import all as _builtin_all, any as _builtin_any + +from ..common._aliases import (matrix_transpose as _aliases_matrix_transpose, + vecdot as _aliases_vecdot) +from .._internal import get_xp + +import torch + +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from typing import List, Optional, Sequence, Tuple, Union + from ..common._typing import Device + from torch import dtype as Dtype + + array = torch.Tensor + +_int_dtypes = { + torch.uint8, + torch.int8, + torch.int16, + torch.int32, + torch.int64, +} + +_array_api_dtypes = { + torch.bool, + *_int_dtypes, + torch.float32, + torch.float64, + torch.complex64, + torch.complex128, +} + +_promotion_table = { + # bool + (torch.bool, torch.bool): torch.bool, + # ints + (torch.int8, torch.int8): torch.int8, + (torch.int8, torch.int16): torch.int16, + (torch.int8, torch.int32): torch.int32, + (torch.int8, torch.int64): torch.int64, + (torch.int16, torch.int8): torch.int16, + (torch.int16, torch.int16): torch.int16, + (torch.int16, torch.int32): torch.int32, + (torch.int16, torch.int64): torch.int64, + (torch.int32, torch.int8): torch.int32, + (torch.int32, torch.int16): torch.int32, + (torch.int32, torch.int32): torch.int32, + (torch.int32, torch.int64): torch.int64, + (torch.int64, torch.int8): torch.int64, + (torch.int64, torch.int16): torch.int64, + (torch.int64, torch.int32): torch.int64, + (torch.int64, torch.int64): torch.int64, + # uints + (torch.uint8, torch.uint8): torch.uint8, + # ints and uints (mixed sign) + (torch.int8, torch.uint8): torch.int16, + (torch.int16, torch.uint8): torch.int16, + (torch.int32, torch.uint8): torch.int32, + (torch.int64, torch.uint8): torch.int64, + (torch.uint8, torch.int8): torch.int16, + (torch.uint8, torch.int16): torch.int16, + (torch.uint8, torch.int32): torch.int32, + (torch.uint8, torch.int64): torch.int64, + # floats + (torch.float32, torch.float32): torch.float32, + (torch.float32, torch.float64): torch.float64, + (torch.float64, torch.float32): torch.float64, + (torch.float64, torch.float64): torch.float64, + # complexes + (torch.complex64, torch.complex64): torch.complex64, + (torch.complex64, torch.complex128): torch.complex128, + (torch.complex128, torch.complex64): torch.complex128, + (torch.complex128, torch.complex128): torch.complex128, + # Mixed float and complex + (torch.float32, torch.complex64): torch.complex64, + (torch.float32, torch.complex128): torch.complex128, + (torch.float64, torch.complex64): torch.complex128, + (torch.float64, torch.complex128): torch.complex128, +} + + +def _two_arg(f): + @_wraps(f) + def _f(x1, x2, /, **kwargs): + x1, x2 = _fix_promotion(x1, x2) + return f(x1, x2, **kwargs) + if _f.__doc__ is None: + _f.__doc__ = f"""\ +Array API compatibility wrapper for torch.{f.__name__}. + +See the corresponding PyTorch documentation and/or the array API specification +for more details. + +""" + return _f + +def _fix_promotion(x1, x2, only_scalar=True): + if not isinstance(x1, torch.Tensor) or not isinstance(x2, torch.Tensor): + return x1, x2 + if x1.dtype not in _array_api_dtypes or x2.dtype not in _array_api_dtypes: + return x1, x2 + # If an argument is 0-D pytorch downcasts the other argument + if not only_scalar or x1.shape == (): + dtype = result_type(x1, x2) + x2 = x2.to(dtype) + if not only_scalar or x2.shape == (): + dtype = result_type(x1, x2) + x1 = x1.to(dtype) + return x1, x2 + +def result_type(*arrays_and_dtypes: Union[array, Dtype]) -> Dtype: + if len(arrays_and_dtypes) == 0: + raise TypeError("At least one array or dtype must be provided") + if len(arrays_and_dtypes) == 1: + x = arrays_and_dtypes[0] + if isinstance(x, torch.dtype): + return x + return x.dtype + if len(arrays_and_dtypes) > 2: + return result_type(arrays_and_dtypes[0], result_type(*arrays_and_dtypes[1:])) + + x, y = arrays_and_dtypes + xdt = x.dtype if not isinstance(x, torch.dtype) else x + ydt = y.dtype if not isinstance(y, torch.dtype) else y + + if (xdt, ydt) in _promotion_table: + return _promotion_table[xdt, ydt] + + # This doesn't result_type(dtype, dtype) for non-array API dtypes + # because torch.result_type only accepts tensors. This does however, allow + # cross-kind promotion. + x = torch.tensor([], dtype=x) if isinstance(x, torch.dtype) else x + y = torch.tensor([], dtype=y) if isinstance(y, torch.dtype) else y + return torch.result_type(x, y) + +def can_cast(from_: Union[Dtype, array], to: Dtype, /) -> bool: + if not isinstance(from_, torch.dtype): + from_ = from_.dtype + return torch.can_cast(from_, to) + +# Basic renames +bitwise_invert = torch.bitwise_not +newaxis = None + +# Two-arg elementwise functions +# These require a wrapper to do the correct type promotion on 0-D tensors +add = _two_arg(torch.add) +atan2 = _two_arg(torch.atan2) +bitwise_and = _two_arg(torch.bitwise_and) +bitwise_left_shift = _two_arg(torch.bitwise_left_shift) +bitwise_or = _two_arg(torch.bitwise_or) +bitwise_right_shift = _two_arg(torch.bitwise_right_shift) +bitwise_xor = _two_arg(torch.bitwise_xor) +divide = _two_arg(torch.divide) +# Also a rename. torch.equal does not broadcast +equal = _two_arg(torch.eq) +floor_divide = _two_arg(torch.floor_divide) +greater = _two_arg(torch.greater) +greater_equal = _two_arg(torch.greater_equal) +less = _two_arg(torch.less) +less_equal = _two_arg(torch.less_equal) +logaddexp = _two_arg(torch.logaddexp) +# logical functions are not included here because they only accept bool in the +# spec, so type promotion is irrelevant. +multiply = _two_arg(torch.multiply) +not_equal = _two_arg(torch.not_equal) +pow = _two_arg(torch.pow) +remainder = _two_arg(torch.remainder) +subtract = _two_arg(torch.subtract) + +# These wrappers are mostly based on the fact that pytorch uses 'dim' instead +# of 'axis'. + +# torch.min and torch.max return a tuple and don't support multiple axes https://github.com/pytorch/pytorch/issues/58745 +def max(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False) -> array: + # https://github.com/pytorch/pytorch/issues/29137 + if axis == (): + return torch.clone(x) + return torch.amax(x, axis, keepdims=keepdims) + +def min(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False) -> array: + # https://github.com/pytorch/pytorch/issues/29137 + if axis == (): + return torch.clone(x) + return torch.amin(x, axis, keepdims=keepdims) + +# torch.sort also returns a tuple +# https://github.com/pytorch/pytorch/issues/70921 +def sort(x: array, /, *, axis: int = -1, descending: bool = False, stable: bool = True, **kwargs) -> array: + return torch.sort(x, dim=axis, descending=descending, stable=stable, **kwargs).values + +def _normalize_axes(axis, ndim): + axes = [] + if ndim == 0 and axis: + # Better error message in this case + raise IndexError(f"Dimension out of range: {axis[0]}") + lower, upper = -ndim, ndim - 1 + for a in axis: + if a < lower or a > upper: + # Match torch error message (e.g., from sum()) + raise IndexError(f"Dimension out of range (expected to be in range of [{lower}, {upper}], but got {a}") + if a < 0: + a = a + ndim + if a in axes: + # Use IndexError instead of RuntimeError, and "axis" instead of "dim" + raise IndexError(f"Axis {a} appears multiple times in the list of axes") + axes.append(a) + return sorted(axes) + +def _axis_none_keepdims(x, ndim, keepdims): + # Apply keepdims when axis=None + # (https://github.com/pytorch/pytorch/issues/71209) + # Note that this is only valid for the axis=None case. + if keepdims: + for i in range(ndim): + x = torch.unsqueeze(x, 0) + return x + +def _reduce_multiple_axes(f, x, axis, keepdims=False, **kwargs): + # Some reductions don't support multiple axes + # (https://github.com/pytorch/pytorch/issues/56586). + axes = _normalize_axes(axis, x.ndim) + for a in reversed(axes): + x = torch.movedim(x, a, -1) + x = torch.flatten(x, -len(axes)) + + out = f(x, -1, **kwargs) + + if keepdims: + for a in axes: + out = torch.unsqueeze(out, a) + return out + +def prod(x: array, + /, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + dtype: Optional[Dtype] = None, + keepdims: bool = False, + **kwargs) -> array: + x = torch.asarray(x) + ndim = x.ndim + + # https://github.com/pytorch/pytorch/issues/29137. Separate from the logic + # below because it still needs to upcast. + if axis == (): + if dtype is None: + # We can't upcast uint8 according to the spec because there is no + # torch.uint64, so at least upcast to int64 which is what sum does + # when axis=None. + if x.dtype in [torch.int8, torch.int16, torch.int32, torch.uint8]: + return x.to(torch.int64) + return x.clone() + return x.to(dtype) + + # torch.prod doesn't support multiple axes + # (https://github.com/pytorch/pytorch/issues/56586). + if isinstance(axis, tuple): + return _reduce_multiple_axes(torch.prod, x, axis, keepdims=keepdims, dtype=dtype, **kwargs) + if axis is None: + # torch doesn't support keepdims with axis=None + # (https://github.com/pytorch/pytorch/issues/71209) + res = torch.prod(x, dtype=dtype, **kwargs) + res = _axis_none_keepdims(res, ndim, keepdims) + return res + + return torch.prod(x, axis, dtype=dtype, keepdims=keepdims, **kwargs) + + +def sum(x: array, + /, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + dtype: Optional[Dtype] = None, + keepdims: bool = False, + **kwargs) -> array: + x = torch.asarray(x) + ndim = x.ndim + + # https://github.com/pytorch/pytorch/issues/29137. + # Make sure it upcasts. + if axis == (): + if dtype is None: + # We can't upcast uint8 according to the spec because there is no + # torch.uint64, so at least upcast to int64 which is what sum does + # when axis=None. + if x.dtype in [torch.int8, torch.int16, torch.int32, torch.uint8]: + return x.to(torch.int64) + return x.clone() + return x.to(dtype) + + if axis is None: + # torch doesn't support keepdims with axis=None + # (https://github.com/pytorch/pytorch/issues/71209) + res = torch.sum(x, dtype=dtype, **kwargs) + res = _axis_none_keepdims(res, ndim, keepdims) + return res + + return torch.sum(x, axis, dtype=dtype, keepdims=keepdims, **kwargs) + +def any(x: array, + /, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + keepdims: bool = False, + **kwargs) -> array: + x = torch.asarray(x) + ndim = x.ndim + if axis == (): + return x.to(torch.bool) + # torch.any doesn't support multiple axes + # (https://github.com/pytorch/pytorch/issues/56586). + if isinstance(axis, tuple): + res = _reduce_multiple_axes(torch.any, x, axis, keepdims=keepdims, **kwargs) + return res.to(torch.bool) + if axis is None: + # torch doesn't support keepdims with axis=None + # (https://github.com/pytorch/pytorch/issues/71209) + res = torch.any(x, **kwargs) + res = _axis_none_keepdims(res, ndim, keepdims) + return res.to(torch.bool) + + # torch.any doesn't return bool for uint8 + return torch.any(x, axis, keepdims=keepdims).to(torch.bool) + +def all(x: array, + /, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + keepdims: bool = False, + **kwargs) -> array: + x = torch.asarray(x) + ndim = x.ndim + if axis == (): + return x.to(torch.bool) + # torch.all doesn't support multiple axes + # (https://github.com/pytorch/pytorch/issues/56586). + if isinstance(axis, tuple): + res = _reduce_multiple_axes(torch.all, x, axis, keepdims=keepdims, **kwargs) + return res.to(torch.bool) + if axis is None: + # torch doesn't support keepdims with axis=None + # (https://github.com/pytorch/pytorch/issues/71209) + res = torch.all(x, **kwargs) + res = _axis_none_keepdims(res, ndim, keepdims) + return res.to(torch.bool) + + # torch.all doesn't return bool for uint8 + return torch.all(x, axis, keepdims=keepdims).to(torch.bool) + +def mean(x: array, + /, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + keepdims: bool = False, + **kwargs) -> array: + # https://github.com/pytorch/pytorch/issues/29137 + if axis == (): + return torch.clone(x) + if axis is None: + # torch doesn't support keepdims with axis=None + # (https://github.com/pytorch/pytorch/issues/71209) + res = torch.mean(x, **kwargs) + res = _axis_none_keepdims(res, x.ndim, keepdims) + return res + return torch.mean(x, axis, keepdims=keepdims, **kwargs) + +def std(x: array, + /, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + correction: Union[int, float] = 0.0, + keepdims: bool = False, + **kwargs) -> array: + # Note, float correction is not supported + # https://github.com/pytorch/pytorch/issues/61492. We don't try to + # implement it here for now. + + if isinstance(correction, float): + _correction = int(correction) + if correction != _correction: + raise NotImplementedError("float correction in torch std() is not yet supported") + else: + _correction = correction + + # https://github.com/pytorch/pytorch/issues/29137 + if axis == (): + return torch.zeros_like(x) + if isinstance(axis, int): + axis = (axis,) + if axis is None: + # torch doesn't support keepdims with axis=None + # (https://github.com/pytorch/pytorch/issues/71209) + res = torch.std(x, tuple(range(x.ndim)), correction=_correction, **kwargs) + res = _axis_none_keepdims(res, x.ndim, keepdims) + return res + return torch.std(x, axis, correction=_correction, keepdims=keepdims, **kwargs) + +def var(x: array, + /, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + correction: Union[int, float] = 0.0, + keepdims: bool = False, + **kwargs) -> array: + # Note, float correction is not supported + # https://github.com/pytorch/pytorch/issues/61492. We don't try to + # implement it here for now. + + # if isinstance(correction, float): + # correction = int(correction) + + # https://github.com/pytorch/pytorch/issues/29137 + if axis == (): + return torch.zeros_like(x) + if isinstance(axis, int): + axis = (axis,) + if axis is None: + # torch doesn't support keepdims with axis=None + # (https://github.com/pytorch/pytorch/issues/71209) + res = torch.var(x, tuple(range(x.ndim)), correction=correction, **kwargs) + res = _axis_none_keepdims(res, x.ndim, keepdims) + return res + return torch.var(x, axis, correction=correction, keepdims=keepdims, **kwargs) + +# torch.concat doesn't support dim=None +# https://github.com/pytorch/pytorch/issues/70925 +def concat(arrays: Union[Tuple[array, ...], List[array]], + /, + *, + axis: Optional[int] = 0, + **kwargs) -> array: + if axis is None: + arrays = tuple(ar.flatten() for ar in arrays) + axis = 0 + return torch.concat(arrays, axis, **kwargs) + +# torch.squeeze only accepts int dim and doesn't require it +# https://github.com/pytorch/pytorch/issues/70924. Support for tuple dim was +# added at https://github.com/pytorch/pytorch/pull/89017. +def squeeze(x: array, /, axis: Union[int, Tuple[int, ...]]) -> array: + if isinstance(axis, int): + axis = (axis,) + for a in axis: + if x.shape[a] != 1: + raise ValueError("squeezed dimensions must be equal to 1") + axes = _normalize_axes(axis, x.ndim) + # Remove this once pytorch 1.14 is released with the above PR #89017. + sequence = [a - i for i, a in enumerate(axes)] + for a in sequence: + x = torch.squeeze(x, a) + return x + +# torch.broadcast_to uses size instead of shape +def broadcast_to(x: array, /, shape: Tuple[int, ...], **kwargs) -> array: + return torch.broadcast_to(x, shape, **kwargs) + +# torch.permute uses dims instead of axes +def permute_dims(x: array, /, axes: Tuple[int, ...]) -> array: + return torch.permute(x, axes) + +# The axis parameter doesn't work for flip() and roll() +# https://github.com/pytorch/pytorch/issues/71210. Also torch.flip() doesn't +# accept axis=None +def flip(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, **kwargs) -> array: + if axis is None: + axis = tuple(range(x.ndim)) + # torch.flip doesn't accept dim as an int but the method does + # https://github.com/pytorch/pytorch/issues/18095 + return x.flip(axis, **kwargs) + +def roll(x: array, /, shift: Union[int, Tuple[int, ...]], *, axis: Optional[Union[int, Tuple[int, ...]]] = None, **kwargs) -> array: + return torch.roll(x, shift, axis, **kwargs) + +def nonzero(x: array, /, **kwargs) -> Tuple[array, ...]: + if x.ndim == 0: + raise ValueError("nonzero() does not support zero-dimensional arrays") + return torch.nonzero(x, as_tuple=True, **kwargs) + +def where(condition: array, x1: array, x2: array, /) -> array: + x1, x2 = _fix_promotion(x1, x2) + return torch.where(condition, x1, x2) + +# torch.reshape doesn't have the copy keyword +def reshape(x: array, + /, + shape: Tuple[int, ...], + copy: Optional[bool] = None, + **kwargs) -> array: + if copy is not None: + raise NotImplementedError("torch.reshape doesn't yet support the copy keyword") + return torch.reshape(x, shape, **kwargs) + +# torch.arange doesn't support returning empty arrays +# (https://github.com/pytorch/pytorch/issues/70915), and doesn't support some +# keyword argument combinations +# (https://github.com/pytorch/pytorch/issues/70914) +def arange(start: Union[int, float], + /, + stop: Optional[Union[int, float]] = None, + step: Union[int, float] = 1, + *, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + **kwargs) -> array: + if stop is None: + start, stop = 0, start + if step > 0 and stop <= start or step < 0 and stop >= start: + if dtype is None: + if _builtin_all(isinstance(i, int) for i in [start, stop, step]): + dtype = torch.int64 + else: + dtype = torch.float32 + return torch.empty(0, dtype=dtype, device=device, **kwargs) + return torch.arange(start, stop, step, dtype=dtype, device=device, **kwargs) + +# torch.eye does not accept None as a default for the second argument and +# doesn't support off-diagonals (https://github.com/pytorch/pytorch/issues/70910) +def eye(n_rows: int, + n_cols: Optional[int] = None, + /, + *, + k: int = 0, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + **kwargs) -> array: + if n_cols is None: + n_cols = n_rows + z = torch.zeros(n_rows, n_cols, dtype=dtype, device=device, **kwargs) + if abs(k) <= n_rows + n_cols: + z.diagonal(k).fill_(1) + return z + +# torch.linspace doesn't have the endpoint parameter +def linspace(start: Union[int, float], + stop: Union[int, float], + /, + num: int, + *, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + endpoint: bool = True, + **kwargs) -> array: + if not endpoint: + return torch.linspace(start, stop, num+1, dtype=dtype, device=device, **kwargs)[:-1] + return torch.linspace(start, stop, num, dtype=dtype, device=device, **kwargs) + +# torch.full does not accept an int size +# https://github.com/pytorch/pytorch/issues/70906 +def full(shape: Union[int, Tuple[int, ...]], + fill_value: Union[bool, int, float, complex], + *, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + **kwargs) -> array: + if isinstance(shape, int): + shape = (shape,) + + return torch.full(shape, fill_value, dtype=dtype, device=device, **kwargs) + +# ones, zeros, and empty do not accept shape as a keyword argument +def ones(shape: Union[int, Tuple[int, ...]], + *, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + **kwargs) -> array: + return torch.ones(shape, dtype=dtype, device=device, **kwargs) + +def zeros(shape: Union[int, Tuple[int, ...]], + *, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + **kwargs) -> array: + return torch.zeros(shape, dtype=dtype, device=device, **kwargs) + +def empty(shape: Union[int, Tuple[int, ...]], + *, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + **kwargs) -> array: + return torch.empty(shape, dtype=dtype, device=device, **kwargs) + +# tril and triu do not call the keyword argument k + +def tril(x: array, /, *, k: int = 0) -> array: + return torch.tril(x, k) + +def triu(x: array, /, *, k: int = 0) -> array: + return torch.triu(x, k) + +# Functions that aren't in torch https://github.com/pytorch/pytorch/issues/58742 +def expand_dims(x: array, /, *, axis: int = 0) -> array: + return torch.unsqueeze(x, axis) + +def astype(x: array, dtype: Dtype, /, *, copy: bool = True) -> array: + return x.to(dtype, copy=copy) + +def broadcast_arrays(*arrays: array) -> List[array]: + shape = torch.broadcast_shapes(*[a.shape for a in arrays]) + return [torch.broadcast_to(a, shape) for a in arrays] + +# Note that these named tuples aren't actually part of the standard namespace, +# but I don't see any issue with exporting the names here regardless. +from ..common._aliases import (UniqueAllResult, UniqueCountsResult, + UniqueInverseResult) + +# https://github.com/pytorch/pytorch/issues/70920 +def unique_all(x: array) -> UniqueAllResult: + # torch.unique doesn't support returning indices. + # https://github.com/pytorch/pytorch/issues/36748. The workaround + # suggested in that issue doesn't actually function correctly (it relies + # on non-deterministic behavior of scatter()). + raise NotImplementedError("unique_all() not yet implemented for pytorch (see https://github.com/pytorch/pytorch/issues/36748)") + + # values, inverse_indices, counts = torch.unique(x, return_counts=True, return_inverse=True) + # # torch.unique incorrectly gives a 0 count for nan values. + # # https://github.com/pytorch/pytorch/issues/94106 + # counts[torch.isnan(values)] = 1 + # return UniqueAllResult(values, indices, inverse_indices, counts) + +def unique_counts(x: array) -> UniqueCountsResult: + values, counts = torch.unique(x, return_counts=True) + + # torch.unique incorrectly gives a 0 count for nan values. + # https://github.com/pytorch/pytorch/issues/94106 + counts[torch.isnan(values)] = 1 + return UniqueCountsResult(values, counts) + +def unique_inverse(x: array) -> UniqueInverseResult: + values, inverse = torch.unique(x, return_inverse=True) + return UniqueInverseResult(values, inverse) + +def unique_values(x: array) -> array: + return torch.unique(x) + +def matmul(x1: array, x2: array, /, **kwargs) -> array: + # torch.matmul doesn't type promote (but differently from _fix_promotion) + x1, x2 = _fix_promotion(x1, x2, only_scalar=False) + return torch.matmul(x1, x2, **kwargs) + +matrix_transpose = get_xp(torch)(_aliases_matrix_transpose) +_vecdot = get_xp(torch)(_aliases_vecdot) + +def vecdot(x1: array, x2: array, /, *, axis: int = -1) -> array: + x1, x2 = _fix_promotion(x1, x2, only_scalar=False) + return _vecdot(x1, x2, axis=axis) + +# torch.tensordot uses dims instead of axes +def tensordot(x1: array, x2: array, /, *, axes: Union[int, Tuple[Sequence[int], Sequence[int]]] = 2, **kwargs) -> array: + # Note: torch.tensordot fails with integer dtypes when there is only 1 + # element in the axis (https://github.com/pytorch/pytorch/issues/84530). + x1, x2 = _fix_promotion(x1, x2, only_scalar=False) + return torch.tensordot(x1, x2, dims=axes, **kwargs) + + +def isdtype( + dtype: Dtype, kind: Union[Dtype, str, Tuple[Union[Dtype, str], ...]], + *, _tuple=True, # Disallow nested tuples +) -> bool: + """ + Returns a boolean indicating whether a provided dtype is of a specified data type ``kind``. + + Note that outside of this function, this compat library does not yet fully + support complex numbers. + + See + https://data-apis.org/array-api/latest/API_specification/generated/array_api.isdtype.html + for more details + """ + if isinstance(kind, tuple) and _tuple: + return _builtin_any(isdtype(dtype, k, _tuple=False) for k in kind) + elif isinstance(kind, str): + if kind == 'bool': + return dtype == torch.bool + elif kind == 'signed integer': + return dtype in _int_dtypes and dtype.is_signed + elif kind == 'unsigned integer': + return dtype in _int_dtypes and not dtype.is_signed + elif kind == 'integral': + return dtype in _int_dtypes + elif kind == 'real floating': + return dtype.is_floating_point + elif kind == 'complex floating': + return dtype.is_complex + elif kind == 'numeric': + return isdtype(dtype, ('integral', 'real floating', 'complex floating')) + else: + raise ValueError(f"Unrecognized data type kind: {kind!r}") + else: + return dtype == kind + +def take(x: array, indices: array, /, *, axis: Optional[int] = None, **kwargs) -> array: + if axis is None: + if x.ndim != 1: + raise ValueError("axis must be specified when ndim > 1") + axis = 0 + return torch.index_select(x, axis, indices, **kwargs) + +__all__ = ['result_type', 'can_cast', 'permute_dims', 'bitwise_invert', + 'newaxis', 'add', 'atan2', 'bitwise_and', 'bitwise_left_shift', + 'bitwise_or', 'bitwise_right_shift', 'bitwise_xor', 'divide', + 'equal', 'floor_divide', 'greater', 'greater_equal', 'less', + 'less_equal', 'logaddexp', 'multiply', 'not_equal', 'pow', + 'remainder', 'subtract', 'max', 'min', 'sort', 'prod', 'sum', + 'any', 'all', 'mean', 'std', 'var', 'concat', 'squeeze', + 'broadcast_to', 'flip', 'roll', 'nonzero', 'where', 'reshape', + 'arange', 'eye', 'linspace', 'full', 'ones', 'zeros', 'empty', + 'tril', 'triu', 'expand_dims', 'astype', 'broadcast_arrays', + 'UniqueAllResult', 'UniqueCountsResult', 'UniqueInverseResult', + 'unique_all', 'unique_counts', 'unique_inverse', 'unique_values', + 'matmul', 'matrix_transpose', 'vecdot', 'tensordot', 'isdtype', + 'take'] + +_all_ignore = ['torch', 'get_xp'] diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/fft.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/fft.py new file mode 100644 index 0000000000000000000000000000000000000000..3c9117ee57d3534e3e72329d740632c02e936200 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/fft.py @@ -0,0 +1,86 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +if TYPE_CHECKING: + import torch + array = torch.Tensor + from typing import Union, Sequence, Literal + +from torch.fft import * # noqa: F403 +import torch.fft + +# Several torch fft functions do not map axes to dim + +def fftn( + x: array, + /, + *, + s: Sequence[int] = None, + axes: Sequence[int] = None, + norm: Literal["backward", "ortho", "forward"] = "backward", + **kwargs, +) -> array: + return torch.fft.fftn(x, s=s, dim=axes, norm=norm, **kwargs) + +def ifftn( + x: array, + /, + *, + s: Sequence[int] = None, + axes: Sequence[int] = None, + norm: Literal["backward", "ortho", "forward"] = "backward", + **kwargs, +) -> array: + return torch.fft.ifftn(x, s=s, dim=axes, norm=norm, **kwargs) + +def rfftn( + x: array, + /, + *, + s: Sequence[int] = None, + axes: Sequence[int] = None, + norm: Literal["backward", "ortho", "forward"] = "backward", + **kwargs, +) -> array: + return torch.fft.rfftn(x, s=s, dim=axes, norm=norm, **kwargs) + +def irfftn( + x: array, + /, + *, + s: Sequence[int] = None, + axes: Sequence[int] = None, + norm: Literal["backward", "ortho", "forward"] = "backward", + **kwargs, +) -> array: + return torch.fft.irfftn(x, s=s, dim=axes, norm=norm, **kwargs) + +def fftshift( + x: array, + /, + *, + axes: Union[int, Sequence[int]] = None, + **kwargs, +) -> array: + return torch.fft.fftshift(x, dim=axes, **kwargs) + +def ifftshift( + x: array, + /, + *, + axes: Union[int, Sequence[int]] = None, + **kwargs, +) -> array: + return torch.fft.ifftshift(x, dim=axes, **kwargs) + + +__all__ = torch.fft.__all__ + [ + "fftn", + "ifftn", + "rfftn", + "irfftn", + "fftshift", + "ifftshift", +] + +_all_ignore = ['torch'] diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/linalg.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/linalg.py new file mode 100644 index 0000000000000000000000000000000000000000..7e7e241521caf50564a594d84a0b4f7cd0b08c20 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/linalg.py @@ -0,0 +1,89 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +if TYPE_CHECKING: + import torch + array = torch.Tensor + from torch import dtype as Dtype + from typing import Optional, Union, Tuple, Literal + inf = float('inf') + +from ._aliases import _fix_promotion, sum + +from torch.linalg import * # noqa: F403 + +# torch.linalg doesn't define __all__ +# from torch.linalg import __all__ as linalg_all +from torch import linalg as torch_linalg +linalg_all = [i for i in dir(torch_linalg) if not i.startswith('_')] + +# outer is implemented in torch but aren't in the linalg namespace +from torch import outer +# These functions are in both the main and linalg namespaces +from ._aliases import matmul, matrix_transpose, tensordot + +# Note: torch.linalg.cross does not default to axis=-1 (it defaults to the +# first axis with size 3), see https://github.com/pytorch/pytorch/issues/58743 + +# torch.cross also does not support broadcasting when it would add new +# dimensions https://github.com/pytorch/pytorch/issues/39656 +def cross(x1: array, x2: array, /, *, axis: int = -1) -> array: + x1, x2 = _fix_promotion(x1, x2, only_scalar=False) + if not (-min(x1.ndim, x2.ndim) <= axis < max(x1.ndim, x2.ndim)): + raise ValueError(f"axis {axis} out of bounds for cross product of arrays with shapes {x1.shape} and {x2.shape}") + if not (x1.shape[axis] == x2.shape[axis] == 3): + raise ValueError(f"cross product axis must have size 3, got {x1.shape[axis]} and {x2.shape[axis]}") + x1, x2 = torch.broadcast_tensors(x1, x2) + return torch_linalg.cross(x1, x2, dim=axis) + +def vecdot(x1: array, x2: array, /, *, axis: int = -1, **kwargs) -> array: + from ._aliases import isdtype + + x1, x2 = _fix_promotion(x1, x2, only_scalar=False) + + # torch.linalg.vecdot incorrectly allows broadcasting along the contracted dimension + if x1.shape[axis] != x2.shape[axis]: + raise ValueError("x1 and x2 must have the same size along the given axis") + + # torch.linalg.vecdot doesn't support integer dtypes + if isdtype(x1.dtype, 'integral') or isdtype(x2.dtype, 'integral'): + if kwargs: + raise RuntimeError("vecdot kwargs not supported for integral dtypes") + + x1_ = torch.moveaxis(x1, axis, -1) + x2_ = torch.moveaxis(x2, axis, -1) + x1_, x2_ = torch.broadcast_tensors(x1_, x2_) + + res = x1_[..., None, :] @ x2_[..., None] + return res[..., 0, 0] + return torch.linalg.vecdot(x1, x2, dim=axis, **kwargs) + +def solve(x1: array, x2: array, /, **kwargs) -> array: + x1, x2 = _fix_promotion(x1, x2, only_scalar=False) + return torch.linalg.solve(x1, x2, **kwargs) + +# torch.trace doesn't support the offset argument and doesn't support stacking +def trace(x: array, /, *, offset: int = 0, dtype: Optional[Dtype] = None) -> array: + # Use our wrapped sum to make sure it does upcasting correctly + return sum(torch.diagonal(x, offset=offset, dim1=-2, dim2=-1), axis=-1, dtype=dtype) + +def vector_norm( + x: array, + /, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + keepdims: bool = False, + ord: Union[int, float, Literal[inf, -inf]] = 2, + **kwargs, +) -> array: + # torch.vector_norm incorrectly treats axis=() the same as axis=None + if axis == (): + keepdims = True + return torch.linalg.vector_norm(x, ord=ord, axis=axis, keepdim=keepdims, **kwargs) + +__all__ = linalg_all + ['outer', 'matmul', 'matrix_transpose', 'tensordot', + 'cross', 'vecdot', 'solve', 'trace', 'vector_norm'] + +_all_ignore = ['torch_linalg', 'sum'] + +del linalg_all diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/__init__.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..dcc8a6a77e2ba08138ecfe9c65b3a1d5d3ba5f6b --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/__init__.py @@ -0,0 +1,20 @@ +from .main import minimize +from .utils import show_versions + +# PEP0440 compatible formatted version, see: +# https://www.python.org/dev/peps/pep-0440/ +# +# Final release markers: +# X.Y.0 # For first release after an increment in Y +# X.Y.Z # For bugfix releases +# +# Admissible pre-release markers: +# X.YaN # Alpha release +# X.YbN # Beta release +# X.YrcN # Release Candidate +# +# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer. +# 'X.Y.dev0' is the canonical version of 'X.Y.dev'. +__version__ = "1.1.1" + +__all__ = ["minimize", "show_versions"] diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/__init__.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7dc30124451bb327b696c213da46e5e4ea7e25b4 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/__init__.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/framework.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/framework.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..49209f8af1fa5a712caea187fdbde4f1038ac1a9 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/framework.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/main.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/main.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7e54e2e8a38fe3c8638d752b56ec8c8465974ff Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/main.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/models.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/models.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..11c04cce80dec1eca49a7380aff4373e06ff371e Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/models.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/problem.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/problem.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..87759849844eb2f67096a8ca168bdb39007559cd Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/problem.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/settings.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/settings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aeaa1cc0e488615abe09e337ecdf8e4c8c745260 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/settings.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/framework.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/framework.py new file mode 100644 index 0000000000000000000000000000000000000000..d5f4b3cc5b8b25d72dca03a6936587330ef8d0bf --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/framework.py @@ -0,0 +1,1240 @@ +import warnings + +import numpy as np +from scipy.optimize import lsq_linear + +from .models import Models, Quadratic +from .settings import Options, Constants +from .subsolvers import ( + cauchy_geometry, + spider_geometry, + normal_byrd_omojokun, + tangential_byrd_omojokun, + constrained_tangential_byrd_omojokun, +) +from .subsolvers.optim import qr_tangential_byrd_omojokun +from .utils import get_arrays_tol + + +TINY = np.finfo(float).tiny +EPS = np.finfo(float).eps + + +class TrustRegion: + """ + Trust-region framework. + """ + + def __init__(self, pb, options, constants): + """ + Initialize the trust-region framework. + + Parameters + ---------- + pb : `cobyqa.problem.Problem` + Problem to solve. + options : dict + Options of the solver. + constants : dict + Constants of the solver. + + Raises + ------ + `cobyqa.utils.MaxEvalError` + If the maximum number of evaluations is reached. + `cobyqa.utils.TargetSuccess` + If a nearly feasible point has been found with an objective + function value below the target. + `cobyqa.utils.FeasibleSuccess` + If a feasible point has been found for a feasibility problem. + `numpy.linalg.LinAlgError` + If the initial interpolation system is ill-defined. + """ + # Initialize the models. + self._pb = pb + self._models = Models(self._pb, options) + self._constants = constants + + # Set the initial penalty parameter. + self._penalty = 0.0 + + # Set the index of the best interpolation point. + self._best_index = 0 + self.set_best_index() + + # Set the initial Lagrange multipliers. + self._lm_linear_ub = np.zeros(self.m_linear_ub) + self._lm_linear_eq = np.zeros(self.m_linear_eq) + self._lm_nonlinear_ub = np.zeros(self.m_nonlinear_ub) + self._lm_nonlinear_eq = np.zeros(self.m_nonlinear_eq) + self.set_multipliers(self.x_best) + + # Set the initial trust-region radius and the resolution. + self._resolution = options[Options.RHOBEG] + self._radius = self.resolution + + @property + def n(self): + """ + Number of variables. + + Returns + ------- + int + Number of variables. + """ + return self._pb.n + + @property + def m_linear_ub(self): + """ + Number of linear inequality constraints. + + Returns + ------- + int + Number of linear inequality constraints. + """ + return self._pb.m_linear_ub + + @property + def m_linear_eq(self): + """ + Number of linear equality constraints. + + Returns + ------- + int + Number of linear equality constraints. + """ + return self._pb.m_linear_eq + + @property + def m_nonlinear_ub(self): + """ + Number of nonlinear inequality constraints. + + Returns + ------- + int + Number of nonlinear inequality constraints. + """ + return self._pb.m_nonlinear_ub + + @property + def m_nonlinear_eq(self): + """ + Number of nonlinear equality constraints. + + Returns + ------- + int + Number of nonlinear equality constraints. + """ + return self._pb.m_nonlinear_eq + + @property + def radius(self): + """ + Trust-region radius. + + Returns + ------- + float + Trust-region radius. + """ + return self._radius + + @radius.setter + def radius(self, radius): + """ + Set the trust-region radius. + + Parameters + ---------- + radius : float + New trust-region radius. + """ + self._radius = radius + if ( + self.radius + <= self._constants[Constants.DECREASE_RADIUS_THRESHOLD] + * self.resolution + ): + self._radius = self.resolution + + @property + def resolution(self): + """ + Resolution of the trust-region framework. + + The resolution is a lower bound on the trust-region radius. + + Returns + ------- + float + Resolution of the trust-region framework. + """ + return self._resolution + + @resolution.setter + def resolution(self, resolution): + """ + Set the resolution of the trust-region framework. + + Parameters + ---------- + resolution : float + New resolution of the trust-region framework. + """ + self._resolution = resolution + + @property + def penalty(self): + """ + Penalty parameter. + + Returns + ------- + float + Penalty parameter. + """ + return self._penalty + + @property + def models(self): + """ + Models of the objective function and constraints. + + Returns + ------- + `cobyqa.models.Models` + Models of the objective function and constraints. + """ + return self._models + + @property + def best_index(self): + """ + Index of the best interpolation point. + + Returns + ------- + int + Index of the best interpolation point. + """ + return self._best_index + + @property + def x_best(self): + """ + Best interpolation point. + + Its value is interpreted as relative to the origin, not the base point. + + Returns + ------- + `numpy.ndarray` + Best interpolation point. + """ + return self.models.interpolation.point(self.best_index) + + @property + def fun_best(self): + """ + Value of the objective function at `x_best`. + + Returns + ------- + float + Value of the objective function at `x_best`. + """ + return self.models.fun_val[self.best_index] + + @property + def cub_best(self): + """ + Values of the nonlinear inequality constraints at `x_best`. + + Returns + ------- + `numpy.ndarray`, shape (m_nonlinear_ub,) + Values of the nonlinear inequality constraints at `x_best`. + """ + return self.models.cub_val[self.best_index, :] + + @property + def ceq_best(self): + """ + Values of the nonlinear equality constraints at `x_best`. + + Returns + ------- + `numpy.ndarray`, shape (m_nonlinear_eq,) + Values of the nonlinear equality constraints at `x_best`. + """ + return self.models.ceq_val[self.best_index, :] + + def lag_model(self, x): + """ + Evaluate the Lagrangian model at a given point. + + Parameters + ---------- + x : `numpy.ndarray`, shape (n,) + Point at which the Lagrangian model is evaluated. + + Returns + ------- + float + Value of the Lagrangian model at `x`. + """ + return ( + self.models.fun(x) + + self._lm_linear_ub + @ (self._pb.linear.a_ub @ x - self._pb.linear.b_ub) + + self._lm_linear_eq + @ (self._pb.linear.a_eq @ x - self._pb.linear.b_eq) + + self._lm_nonlinear_ub @ self.models.cub(x) + + self._lm_nonlinear_eq @ self.models.ceq(x) + ) + + def lag_model_grad(self, x): + """ + Evaluate the gradient of the Lagrangian model at a given point. + + Parameters + ---------- + x : `numpy.ndarray`, shape (n,) + Point at which the gradient of the Lagrangian model is evaluated. + + Returns + ------- + `numpy.ndarray`, shape (n,) + Gradient of the Lagrangian model at `x`. + """ + return ( + self.models.fun_grad(x) + + self._lm_linear_ub @ self._pb.linear.a_ub + + self._lm_linear_eq @ self._pb.linear.a_eq + + self._lm_nonlinear_ub @ self.models.cub_grad(x) + + self._lm_nonlinear_eq @ self.models.ceq_grad(x) + ) + + def lag_model_hess(self): + """ + Evaluate the Hessian matrix of the Lagrangian model at a given point. + + Returns + ------- + `numpy.ndarray`, shape (n, n) + Hessian matrix of the Lagrangian model at `x`. + """ + hess = self.models.fun_hess() + if self.m_nonlinear_ub > 0: + hess += self._lm_nonlinear_ub @ self.models.cub_hess() + if self.m_nonlinear_eq > 0: + hess += self._lm_nonlinear_eq @ self.models.ceq_hess() + return hess + + def lag_model_hess_prod(self, v): + """ + Evaluate the right product of the Hessian matrix of the Lagrangian + model with a given vector. + + Parameters + ---------- + v : `numpy.ndarray`, shape (n,) + Vector with which the Hessian matrix of the Lagrangian model is + multiplied from the right. + + Returns + ------- + `numpy.ndarray`, shape (n,) + Right product of the Hessian matrix of the Lagrangian model with + `v`. + """ + return ( + self.models.fun_hess_prod(v) + + self._lm_nonlinear_ub @ self.models.cub_hess_prod(v) + + self._lm_nonlinear_eq @ self.models.ceq_hess_prod(v) + ) + + def lag_model_curv(self, v): + """ + Evaluate the curvature of the Lagrangian model along a given direction. + + Parameters + ---------- + v : `numpy.ndarray`, shape (n,) + Direction along which the curvature of the Lagrangian model is + evaluated. + + Returns + ------- + float + Curvature of the Lagrangian model along `v`. + """ + return ( + self.models.fun_curv(v) + + self._lm_nonlinear_ub @ self.models.cub_curv(v) + + self._lm_nonlinear_eq @ self.models.ceq_curv(v) + ) + + def sqp_fun(self, step): + """ + Evaluate the objective function of the SQP subproblem. + + Parameters + ---------- + step : `numpy.ndarray`, shape (n,) + Step along which the objective function of the SQP subproblem is + evaluated. + + Returns + ------- + float + Value of the objective function of the SQP subproblem along `step`. + """ + return step @ ( + self.models.fun_grad(self.x_best) + + 0.5 * self.lag_model_hess_prod(step) + ) + + def sqp_cub(self, step): + """ + Evaluate the linearization of the nonlinear inequality constraints. + + Parameters + ---------- + step : `numpy.ndarray`, shape (n,) + Step along which the linearization of the nonlinear inequality + constraints is evaluated. + + Returns + ------- + `numpy.ndarray`, shape (m_nonlinear_ub,) + Value of the linearization of the nonlinear inequality constraints + along `step`. + """ + return ( + self.models.cub(self.x_best) + + self.models.cub_grad(self.x_best) @ step + ) + + def sqp_ceq(self, step): + """ + Evaluate the linearization of the nonlinear equality constraints. + + Parameters + ---------- + step : `numpy.ndarray`, shape (n,) + Step along which the linearization of the nonlinear equality + constraints is evaluated. + + Returns + ------- + `numpy.ndarray`, shape (m_nonlinear_ub,) + Value of the linearization of the nonlinear equality constraints + along `step`. + """ + return ( + self.models.ceq(self.x_best) + + self.models.ceq_grad(self.x_best) @ step + ) + + def merit(self, x, fun_val=None, cub_val=None, ceq_val=None): + """ + Evaluate the merit function at a given point. + + Parameters + ---------- + x : `numpy.ndarray`, shape (n,) + Point at which the merit function is evaluated. + fun_val : float, optional + Value of the objective function at `x`. If not provided, the + objective function is evaluated at `x`. + cub_val : `numpy.ndarray`, shape (m_nonlinear_ub,), optional + Values of the nonlinear inequality constraints. If not provided, + the nonlinear inequality constraints are evaluated at `x`. + ceq_val : `numpy.ndarray`, shape (m_nonlinear_eq,), optional + Values of the nonlinear equality constraints. If not provided, + the nonlinear equality constraints are evaluated at `x`. + + Returns + ------- + float + Value of the merit function at `x`. + """ + if fun_val is None or cub_val is None or ceq_val is None: + fun_val, cub_val, ceq_val = self._pb(x) + m_val = fun_val + if self._penalty > 0.0: + c_val = self._pb.violation(x, cub_val=cub_val, ceq_val=ceq_val) + if np.count_nonzero(c_val): + m_val += self._penalty * np.linalg.norm(c_val) + return m_val + + def get_constraint_linearizations(self, x): + """ + Get the linearizations of the constraints at a given point. + + Parameters + ---------- + x : `numpy.ndarray`, shape (n,) + Point at which the linearizations of the constraints are evaluated. + + Returns + ------- + `numpy.ndarray`, shape (m_linear_ub + m_nonlinear_ub, n) + Left-hand side matrix of the linearized inequality constraints. + `numpy.ndarray`, shape (m_linear_ub + m_nonlinear_ub,) + Right-hand side vector of the linearized inequality constraints. + `numpy.ndarray`, shape (m_linear_eq + m_nonlinear_eq, n) + Left-hand side matrix of the linearized equality constraints. + `numpy.ndarray`, shape (m_linear_eq + m_nonlinear_eq,) + Right-hand side vector of the linearized equality constraints. + """ + aub = np.block( + [ + [self._pb.linear.a_ub], + [self.models.cub_grad(x)], + ] + ) + bub = np.block( + [ + self._pb.linear.b_ub - self._pb.linear.a_ub @ x, + -self.models.cub(x), + ] + ) + aeq = np.block( + [ + [self._pb.linear.a_eq], + [self.models.ceq_grad(x)], + ] + ) + beq = np.block( + [ + self._pb.linear.b_eq - self._pb.linear.a_eq @ x, + -self.models.ceq(x), + ] + ) + return aub, bub, aeq, beq + + def get_trust_region_step(self, options): + """ + Get the trust-region step. + + The trust-region step is computed by solving the derivative-free + trust-region SQP subproblem using a Byrd-Omojokun composite-step + approach. For more details, see Section 5.2.3 of [1]_. + + Parameters + ---------- + options : dict + Options of the solver. + + Returns + ------- + `numpy.ndarray`, shape (n,) + Normal step. + `numpy.ndarray`, shape (n,) + Tangential step. + + References + ---------- + .. [1] T. M. Ragonneau. *Model-Based Derivative-Free Optimization + Methods and Software*. PhD thesis, Department of Applied + Mathematics, The Hong Kong Polytechnic University, Hong Kong, China, + 2022. URL: https://theses.lib.polyu.edu.hk/handle/200/12294. + """ + # Evaluate the linearizations of the constraints. + aub, bub, aeq, beq = self.get_constraint_linearizations(self.x_best) + xl = self._pb.bounds.xl - self.x_best + xu = self._pb.bounds.xu - self.x_best + + # Evaluate the normal step. + radius = self._constants[Constants.BYRD_OMOJOKUN_FACTOR] * self.radius + normal_step = normal_byrd_omojokun( + aub, + bub, + aeq, + beq, + xl, + xu, + radius, + options[Options.DEBUG], + **self._constants, + ) + if options[Options.DEBUG]: + tol = get_arrays_tol(xl, xu) + if (np.any(normal_step + tol < xl) + or np.any(xu < normal_step - tol)): + warnings.warn( + "the normal step does not respect the bound constraint.", + RuntimeWarning, + 2, + ) + if np.linalg.norm(normal_step) > 1.1 * radius: + warnings.warn( + "the normal step does not respect the trust-region " + "constraint.", + RuntimeWarning, + 2, + ) + + # Evaluate the tangential step. + radius = np.sqrt(self.radius**2.0 - normal_step @ normal_step) + xl -= normal_step + xu -= normal_step + bub = np.maximum(bub - aub @ normal_step, 0.0) + g_best = self.models.fun_grad(self.x_best) + self.lag_model_hess_prod( + normal_step + ) + if self._pb.type in ["unconstrained", "bound-constrained"]: + tangential_step = tangential_byrd_omojokun( + g_best, + self.lag_model_hess_prod, + xl, + xu, + radius, + options[Options.DEBUG], + **self._constants, + ) + else: + tangential_step = constrained_tangential_byrd_omojokun( + g_best, + self.lag_model_hess_prod, + xl, + xu, + aub, + bub, + aeq, + radius, + options["debug"], + **self._constants, + ) + if options[Options.DEBUG]: + tol = get_arrays_tol(xl, xu) + if np.any(tangential_step + tol < xl) or np.any( + xu < tangential_step - tol + ): + warnings.warn( + "The tangential step does not respect the bound " + "constraints.", + RuntimeWarning, + 2, + ) + if ( + np.linalg.norm(normal_step + tangential_step) + > 1.1 * np.sqrt(2.0) * self.radius + ): + warnings.warn( + "The trial step does not respect the trust-region " + "constraint.", + RuntimeWarning, + 2, + ) + return normal_step, tangential_step + + def get_geometry_step(self, k_new, options): + """ + Get the geometry-improving step. + + Three different geometry-improving steps are computed and the best one + is returned. For more details, see Section 5.2.7 of [1]_. + + Parameters + ---------- + k_new : int + Index of the interpolation point to be modified. + options : dict + Options of the solver. + + Returns + ------- + `numpy.ndarray`, shape (n,) + Geometry-improving step. + + Raises + ------ + `numpy.linalg.LinAlgError` + If the computation of a determinant fails. + + References + ---------- + .. [1] T. M. Ragonneau. *Model-Based Derivative-Free Optimization + Methods and Software*. PhD thesis, Department of Applied + Mathematics, The Hong Kong Polytechnic University, Hong Kong, China, + 2022. URL: https://theses.lib.polyu.edu.hk/handle/200/12294. + """ + if options[Options.DEBUG]: + assert ( + k_new != self.best_index + ), "The index `k_new` must be different from the best index." + + # Build the k_new-th Lagrange polynomial. + coord_vec = np.squeeze(np.eye(1, self.models.npt, k_new)) + lag = Quadratic( + self.models.interpolation, + coord_vec, + options[Options.DEBUG], + ) + g_lag = lag.grad(self.x_best, self.models.interpolation) + + # Compute a simple constrained Cauchy step. + xl = self._pb.bounds.xl - self.x_best + xu = self._pb.bounds.xu - self.x_best + step = cauchy_geometry( + 0.0, + g_lag, + lambda v: lag.curv(v, self.models.interpolation), + xl, + xu, + self.radius, + options[Options.DEBUG], + ) + sigma = self.models.determinants(self.x_best + step, k_new) + + # Compute the solution on the straight lines joining the interpolation + # points to the k-th one, and choose it if it provides a larger value + # of the determinant of the interpolation system in absolute value. + xpt = ( + self.models.interpolation.xpt + - self.models.interpolation.xpt[:, self.best_index, np.newaxis] + ) + xpt[:, [0, self.best_index]] = xpt[:, [self.best_index, 0]] + step_alt = spider_geometry( + 0.0, + g_lag, + lambda v: lag.curv(v, self.models.interpolation), + xpt[:, 1:], + xl, + xu, + self.radius, + options[Options.DEBUG], + ) + sigma_alt = self.models.determinants(self.x_best + step_alt, k_new) + if abs(sigma_alt) > abs(sigma): + step = step_alt + sigma = sigma_alt + + # Compute a Cauchy step on the tangent space of the active constraints. + if self._pb.type in [ + "linearly constrained", + "nonlinearly constrained", + ]: + aub, bub, aeq, beq = ( + self.get_constraint_linearizations(self.x_best)) + tol_bd = get_arrays_tol(xl, xu) + tol_ub = get_arrays_tol(bub) + free_xl = xl <= -tol_bd + free_xu = xu >= tol_bd + free_ub = bub >= tol_ub + + # Compute the Cauchy step. + n_act, q = qr_tangential_byrd_omojokun( + aub, + aeq, + free_xl, + free_xu, + free_ub, + ) + g_lag_proj = q[:, n_act:] @ (q[:, n_act:].T @ g_lag) + norm_g_lag_proj = np.linalg.norm(g_lag_proj) + if 0 < n_act < self._pb.n and norm_g_lag_proj > TINY * self.radius: + step_alt = (self.radius / norm_g_lag_proj) * g_lag_proj + if lag.curv(step_alt, self.models.interpolation) < 0.0: + step_alt = -step_alt + + # Evaluate the constraint violation at the Cauchy step. + cbd = np.block([xl - step_alt, step_alt - xu]) + cub = aub @ step_alt - bub + ceq = aeq @ step_alt - beq + maxcv_val = max( + np.max(array, initial=0.0) + for array in [cbd, cub, np.abs(ceq)] + ) + + # Accept the new step if it is nearly feasible and do not + # drastically worsen the determinant of the interpolation + # system in absolute value. + tol = np.max(np.abs(step_alt[~free_xl]), initial=0.0) + tol = np.max(np.abs(step_alt[~free_xu]), initial=tol) + tol = np.max(np.abs(aub[~free_ub, :] @ step_alt), initial=tol) + tol = min(10.0 * tol, 1e-2 * np.linalg.norm(step_alt)) + if maxcv_val <= tol: + sigma_alt = self.models.determinants( + self.x_best + step_alt, k_new + ) + if abs(sigma_alt) >= 0.1 * abs(sigma): + step = np.clip(step_alt, xl, xu) + + if options[Options.DEBUG]: + tol = get_arrays_tol(xl, xu) + if np.any(step + tol < xl) or np.any(xu < step - tol): + warnings.warn( + "The geometry step does not respect the bound " + "constraints.", + RuntimeWarning, + 2, + ) + if np.linalg.norm(step) > 1.1 * self.radius: + warnings.warn( + "The geometry step does not respect the " + "trust-region constraint.", + RuntimeWarning, + 2, + ) + return step + + def get_second_order_correction_step(self, step, options): + """ + Get the second-order correction step. + + Parameters + ---------- + step : `numpy.ndarray`, shape (n,) + Trust-region step. + options : dict + Options of the solver. + + Returns + ------- + `numpy.ndarray`, shape (n,) + Second-order correction step. + """ + # Evaluate the linearizations of the constraints. + aub, bub, aeq, beq = self.get_constraint_linearizations(self.x_best) + xl = self._pb.bounds.xl - self.x_best + xu = self._pb.bounds.xu - self.x_best + radius = np.linalg.norm(step) + soc_step = normal_byrd_omojokun( + aub, + bub, + aeq, + beq, + xl, + xu, + radius, + options[Options.DEBUG], + **self._constants, + ) + if options[Options.DEBUG]: + tol = get_arrays_tol(xl, xu) + if np.any(soc_step + tol < xl) or np.any(xu < soc_step - tol): + warnings.warn( + "The second-order correction step does not " + "respect the bound constraints.", + RuntimeWarning, + 2, + ) + if np.linalg.norm(soc_step) > 1.1 * radius: + warnings.warn( + "The second-order correction step does not " + "respect the trust-region constraint.", + RuntimeWarning, + 2, + ) + return soc_step + + def get_reduction_ratio(self, step, fun_val, cub_val, ceq_val): + """ + Get the reduction ratio. + + Parameters + ---------- + step : `numpy.ndarray`, shape (n,) + Trust-region step. + fun_val : float + Objective function value at the trial point. + cub_val : `numpy.ndarray`, shape (m_nonlinear_ub,) + Nonlinear inequality constraint values at the trial point. + ceq_val : `numpy.ndarray`, shape (m_nonlinear_eq,) + Nonlinear equality constraint values at the trial point. + + Returns + ------- + float + Reduction ratio. + """ + merit_old = self.merit( + self.x_best, + self.fun_best, + self.cub_best, + self.ceq_best, + ) + merit_new = self.merit(self.x_best + step, fun_val, cub_val, ceq_val) + merit_model_old = self.merit( + self.x_best, + 0.0, + self.models.cub(self.x_best), + self.models.ceq(self.x_best), + ) + merit_model_new = self.merit( + self.x_best + step, + self.sqp_fun(step), + self.sqp_cub(step), + self.sqp_ceq(step), + ) + if abs(merit_model_old - merit_model_new) > TINY * abs( + merit_old - merit_new + ): + return (merit_old - merit_new) / abs( + merit_model_old - merit_model_new + ) + else: + return -1.0 + + def increase_penalty(self, step): + """ + Increase the penalty parameter. + + Parameters + ---------- + step : `numpy.ndarray`, shape (n,) + Trust-region step. + """ + aub, bub, aeq, beq = self.get_constraint_linearizations(self.x_best) + viol_diff = max( + np.linalg.norm( + np.block( + [ + np.maximum(0.0, -bub), + beq, + ] + ) + ) + - np.linalg.norm( + np.block( + [ + np.maximum(0.0, aub @ step - bub), + aeq @ step - beq, + ] + ) + ), + 0.0, + ) + sqp_val = self.sqp_fun(step) + + threshold = np.linalg.norm( + np.block( + [ + self._lm_linear_ub, + self._lm_linear_eq, + self._lm_nonlinear_ub, + self._lm_nonlinear_eq, + ] + ) + ) + if abs(viol_diff) > TINY * abs(sqp_val): + threshold = max(threshold, sqp_val / viol_diff) + best_index_save = self.best_index + if ( + self._penalty + <= self._constants[Constants.PENALTY_INCREASE_THRESHOLD] + * threshold + ): + self._penalty = max( + self._constants[Constants.PENALTY_INCREASE_FACTOR] * threshold, + 1.0, + ) + self.set_best_index() + return best_index_save == self.best_index + + def decrease_penalty(self): + """ + Decrease the penalty parameter. + """ + self._penalty = min(self._penalty, self._get_low_penalty()) + self.set_best_index() + + def set_best_index(self): + """ + Set the index of the best point. + """ + best_index = self.best_index + m_best = self.merit( + self.x_best, + self.models.fun_val[best_index], + self.models.cub_val[best_index, :], + self.models.ceq_val[best_index, :], + ) + r_best = self._pb.maxcv( + self.x_best, + self.models.cub_val[best_index, :], + self.models.ceq_val[best_index, :], + ) + tol = ( + 10.0 + * EPS + * max(self.models.n, self.models.npt) + * max(abs(m_best), 1.0) + ) + for k in range(self.models.npt): + if k != self.best_index: + x_val = self.models.interpolation.point(k) + m_val = self.merit( + x_val, + self.models.fun_val[k], + self.models.cub_val[k, :], + self.models.ceq_val[k, :], + ) + r_val = self._pb.maxcv( + x_val, + self.models.cub_val[k, :], + self.models.ceq_val[k, :], + ) + if m_val < m_best or (m_val < m_best + tol and r_val < r_best): + best_index = k + m_best = m_val + r_best = r_val + self._best_index = best_index + + def get_index_to_remove(self, x_new=None): + """ + Get the index of the interpolation point to remove. + + If `x_new` is not provided, the index returned should be used during + the geometry-improvement phase. Otherwise, the index returned is the + best index for included `x_new` in the interpolation set. + + Parameters + ---------- + x_new : `numpy.ndarray`, shape (n,), optional + New point to be included in the interpolation set. + + Returns + ------- + int + Index of the interpolation point to remove. + float + Distance between `x_best` and the removed point. + + Raises + ------ + `numpy.linalg.LinAlgError` + If the computation of a determinant fails. + """ + dist_sq = np.sum( + ( + self.models.interpolation.xpt + - self.models.interpolation.xpt[:, self.best_index, np.newaxis] + ) + ** 2.0, + axis=0, + ) + if x_new is None: + sigma = 1.0 + weights = dist_sq + else: + sigma = self.models.determinants(x_new) + weights = ( + np.maximum( + 1.0, + dist_sq + / max( + self._constants[Constants.LOW_RADIUS_FACTOR] + * self.radius, + self.resolution, + ) + ** 2.0, + ) + ** 3.0 + ) + weights[self.best_index] = -1.0 # do not remove the best point + k_max = np.argmax(weights * np.abs(sigma)) + return k_max, np.sqrt(dist_sq[k_max]) + + def update_radius(self, step, ratio): + """ + Update the trust-region radius. + + Parameters + ---------- + step : `numpy.ndarray`, shape (n,) + Trust-region step. + ratio : float + Reduction ratio. + """ + s_norm = np.linalg.norm(step) + if ratio <= self._constants[Constants.LOW_RATIO]: + self.radius *= self._constants[Constants.DECREASE_RADIUS_FACTOR] + elif ratio <= self._constants[Constants.HIGH_RATIO]: + self.radius = max( + self._constants[Constants.DECREASE_RADIUS_FACTOR] + * self.radius, + s_norm, + ) + else: + self.radius = min( + self._constants[Constants.INCREASE_RADIUS_FACTOR] + * self.radius, + max( + self._constants[Constants.DECREASE_RADIUS_FACTOR] + * self.radius, + self._constants[Constants.INCREASE_RADIUS_THRESHOLD] + * s_norm, + ), + ) + + def enhance_resolution(self, options): + """ + Enhance the resolution of the trust-region framework. + + Parameters + ---------- + options : dict + Options of the solver. + """ + if ( + self._constants[Constants.LARGE_RESOLUTION_THRESHOLD] + * options[Options.RHOEND] + < self.resolution + ): + self.resolution *= self._constants[ + Constants.DECREASE_RESOLUTION_FACTOR + ] + elif ( + self._constants[Constants.MODERATE_RESOLUTION_THRESHOLD] + * options[Options.RHOEND] + < self.resolution + ): + self.resolution = np.sqrt(self.resolution + * options[Options.RHOEND]) + else: + self.resolution = options[Options.RHOEND] + + # Reduce the trust-region radius. + self._radius = max( + self._constants[Constants.DECREASE_RADIUS_FACTOR] * self._radius, + self.resolution, + ) + + def shift_x_base(self, options): + """ + Shift the base point to `x_best`. + + Parameters + ---------- + options : dict + Options of the solver. + """ + self.models.shift_x_base(np.copy(self.x_best), options) + + def set_multipliers(self, x): + """ + Set the Lagrange multipliers. + + This method computes and set the Lagrange multipliers of the linear and + nonlinear constraints to be the QP multipliers. + + Parameters + ---------- + x : `numpy.ndarray`, shape (n,) + Point at which the Lagrange multipliers are computed. + """ + # Build the constraints of the least-squares problem. + incl_linear_ub = self._pb.linear.a_ub @ x >= self._pb.linear.b_ub + incl_nonlinear_ub = self.cub_best >= 0.0 + incl_xl = self._pb.bounds.xl >= x + incl_xu = self._pb.bounds.xu <= x + m_linear_ub = np.count_nonzero(incl_linear_ub) + m_nonlinear_ub = np.count_nonzero(incl_nonlinear_ub) + m_xl = np.count_nonzero(incl_xl) + m_xu = np.count_nonzero(incl_xu) + + if ( + m_linear_ub + m_nonlinear_ub + self.m_linear_eq + + self.m_nonlinear_eq > 0 + ): + identity = np.eye(self._pb.n) + c_jac = np.r_[ + -identity[incl_xl, :], + identity[incl_xu, :], + self._pb.linear.a_ub[incl_linear_ub, :], + self.models.cub_grad(x, incl_nonlinear_ub), + self._pb.linear.a_eq, + self.models.ceq_grad(x), + ] + + # Solve the least-squares problem. + g_best = self.models.fun_grad(x) + xl_lm = np.full(c_jac.shape[0], -np.inf) + xl_lm[: m_xl + m_xu + m_linear_ub + m_nonlinear_ub] = 0.0 + res = lsq_linear( + c_jac.T, + -g_best, + bounds=(xl_lm, np.inf), + method="bvls", + ) + + # Extract the Lagrange multipliers. + self._lm_linear_ub[incl_linear_ub] = res.x[ + m_xl + m_xu:m_xl + m_xu + m_linear_ub + ] + self._lm_linear_ub[~incl_linear_ub] = 0.0 + self._lm_nonlinear_ub[incl_nonlinear_ub] = res.x[ + m_xl + + m_xu + + m_linear_ub:m_xl + + m_xu + + m_linear_ub + + m_nonlinear_ub + ] + self._lm_nonlinear_ub[~incl_nonlinear_ub] = 0.0 + self._lm_linear_eq[:] = res.x[ + m_xl + + m_xu + + m_linear_ub + + m_nonlinear_ub:m_xl + + m_xu + + m_linear_ub + + m_nonlinear_ub + + self.m_linear_eq + ] + self._lm_nonlinear_eq[:] = res.x[ + m_xl + m_xu + m_linear_ub + m_nonlinear_ub + self.m_linear_eq: + ] + + def _get_low_penalty(self): + r_val_ub = np.c_[ + ( + self.models.interpolation.x_base[np.newaxis, :] + + self.models.interpolation.xpt.T + ) + @ self._pb.linear.a_ub.T + - self._pb.linear.b_ub[np.newaxis, :], + self.models.cub_val, + ] + r_val_eq = ( + self.models.interpolation.x_base[np.newaxis, :] + + self.models.interpolation.xpt.T + ) @ self._pb.linear.a_eq.T - self._pb.linear.b_eq[np.newaxis, :] + r_val_eq = np.block( + [ + r_val_eq, + -r_val_eq, + self.models.ceq_val, + -self.models.ceq_val, + ] + ) + r_val = np.block([r_val_ub, r_val_eq]) + c_min = np.nanmin(r_val, axis=0) + c_max = np.nanmax(r_val, axis=0) + indices = ( + c_min + < self._constants[Constants.THRESHOLD_RATIO_CONSTRAINTS] * c_max + ) + if np.any(indices): + f_min = np.nanmin(self.models.fun_val) + f_max = np.nanmax(self.models.fun_val) + c_min_neg = np.minimum(0.0, c_min[indices]) + c_diff = np.min(c_max[indices] - c_min_neg) + if c_diff > TINY * (f_max - f_min): + penalty = (f_max - f_min) / c_diff + else: + penalty = np.inf + else: + penalty = 0.0 + return penalty diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/main.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/main.py new file mode 100644 index 0000000000000000000000000000000000000000..aa34bbbf9a4695cd0de131829f6bef7e76248bd0 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/main.py @@ -0,0 +1,1488 @@ +import warnings + +import numpy as np +from scipy.optimize import ( + Bounds, + LinearConstraint, + NonlinearConstraint, + OptimizeResult, +) + +from .framework import TrustRegion +from .problem import ( + ObjectiveFunction, + BoundConstraints, + LinearConstraints, + NonlinearConstraints, + Problem, +) +from .utils import ( + MaxEvalError, + TargetSuccess, + CallbackSuccess, + FeasibleSuccess, + exact_1d_array, +) +from .settings import ( + ExitStatus, + Options, + Constants, + DEFAULT_OPTIONS, + DEFAULT_CONSTANTS, + PRINT_OPTIONS, +) + + +def minimize( + fun, + x0, + args=(), + bounds=None, + constraints=(), + callback=None, + options=None, + **kwargs, +): + r""" + Minimize a scalar function using the COBYQA method. + + The Constrained Optimization BY Quadratic Approximations (COBYQA) method is + a derivative-free optimization method designed to solve general nonlinear + optimization problems. A complete description of COBYQA is given in [3]_. + + Parameters + ---------- + fun : {callable, None} + Objective function to be minimized. + + ``fun(x, *args) -> float`` + + where ``x`` is an array with shape (n,) and `args` is a tuple. If `fun` + is ``None``, the objective function is assumed to be the zero function, + resulting in a feasibility problem. + x0 : array_like, shape (n,) + Initial guess. + args : tuple, optional + Extra arguments passed to the objective function. + bounds : {`scipy.optimize.Bounds`, array_like, shape (n, 2)}, optional + Bound constraints of the problem. It can be one of the cases below. + + #. An instance of `scipy.optimize.Bounds`. For the time being, the + argument ``keep_feasible`` is disregarded, and all the constraints + are considered unrelaxable and will be enforced. + #. An array with shape (n, 2). The bound constraints for ``x[i]`` are + ``bounds[i][0] <= x[i] <= bounds[i][1]``. Set ``bounds[i][0]`` to + :math:`-\infty` if there is no lower bound, and set ``bounds[i][1]`` + to :math:`\infty` if there is no upper bound. + + The COBYQA method always respect the bound constraints. + constraints : {Constraint, list}, optional + General constraints of the problem. It can be one of the cases below. + + #. An instance of `scipy.optimize.LinearConstraint`. The argument + ``keep_feasible`` is disregarded. + #. An instance of `scipy.optimize.NonlinearConstraint`. The arguments + ``jac``, ``hess``, ``keep_feasible``, ``finite_diff_rel_step``, and + ``finite_diff_jac_sparsity`` are disregarded. + + #. A list, each of whose elements are described in the cases above. + + callback : callable, optional + A callback executed at each objective function evaluation. The method + terminates if a ``StopIteration`` exception is raised by the callback + function. Its signature can be one of the following: + + ``callback(intermediate_result)`` + + where ``intermediate_result`` is a keyword parameter that contains an + instance of `scipy.optimize.OptimizeResult`, with attributes ``x`` + and ``fun``, being the point at which the objective function is + evaluated and the value of the objective function, respectively. The + name of the parameter must be ``intermediate_result`` for the callback + to be passed an instance of `scipy.optimize.OptimizeResult`. + + Alternatively, the callback function can have the signature: + + ``callback(xk)`` + + where ``xk`` is the point at which the objective function is evaluated. + Introspection is used to determine which of the signatures to invoke. + options : dict, optional + Options passed to the solver. Accepted keys are: + + disp : bool, optional + Whether to print information about the optimization procedure. + maxfev : int, optional + Maximum number of function evaluations. + maxiter : int, optional + Maximum number of iterations. + target : float, optional + Target on the objective function value. The optimization + procedure is terminated when the objective function value of a + feasible point is less than or equal to this target. + feasibility_tol : float, optional + Tolerance on the constraint violation. If the maximum + constraint violation at a point is less than or equal to this + tolerance, the point is considered feasible. + radius_init : float, optional + Initial trust-region radius. Typically, this value should be in + the order of one tenth of the greatest expected change to `x0`. + radius_final : float, optional + Final trust-region radius. It should indicate the accuracy + required in the final values of the variables. + nb_points : int, optional + Number of interpolation points used to build the quadratic + models of the objective and constraint functions. + scale : bool, optional + Whether to scale the variables according to the bounds. + filter_size : int, optional + Maximum number of points in the filter. The filter is used to + select the best point returned by the optimization procedure. + store_history : bool, optional + Whether to store the history of the function evaluations. + history_size : int, optional + Maximum number of function evaluations to store in the history. + debug : bool, optional + Whether to perform additional checks during the optimization + procedure. This option should be used only for debugging + purposes and is highly discouraged to general users. + + Other constants (from the keyword arguments) are described below. They + are not intended to be changed by general users. They should only be + changed by users with a deep understanding of the algorithm, who want + to experiment with different settings. + + Returns + ------- + `scipy.optimize.OptimizeResult` + Result of the optimization procedure, with the following fields: + + message : str + Description of the cause of the termination. + success : bool + Whether the optimization procedure terminated successfully. + status : int + Termination status of the optimization procedure. + x : `numpy.ndarray`, shape (n,) + Solution point. + fun : float + Objective function value at the solution point. + maxcv : float + Maximum constraint violation at the solution point. + nfev : int + Number of function evaluations. + nit : int + Number of iterations. + + If ``store_history`` is True, the result also has the following fields: + + fun_history : `numpy.ndarray`, shape (nfev,) + History of the objective function values. + maxcv_history : `numpy.ndarray`, shape (nfev,) + History of the maximum constraint violations. + + A description of the termination statuses is given below. + + .. list-table:: + :widths: 25 75 + :header-rows: 1 + + * - Exit status + - Description + * - 0 + - The lower bound for the trust-region radius has been reached. + * - 1 + - The target objective function value has been reached. + * - 2 + - All variables are fixed by the bound constraints. + * - 3 + - The callback requested to stop the optimization procedure. + * - 4 + - The feasibility problem received has been solved successfully. + * - 5 + - The maximum number of function evaluations has been exceeded. + * - 6 + - The maximum number of iterations has been exceeded. + * - -1 + - The bound constraints are infeasible. + * - -2 + - A linear algebra error occurred. + + Other Parameters + ---------------- + decrease_radius_factor : float, optional + Factor by which the trust-region radius is reduced when the reduction + ratio is low or negative. + increase_radius_factor : float, optional + Factor by which the trust-region radius is increased when the reduction + ratio is large. + increase_radius_threshold : float, optional + Threshold that controls the increase of the trust-region radius when + the reduction ratio is large. + decrease_radius_threshold : float, optional + Threshold used to determine whether the trust-region radius should be + reduced to the resolution. + decrease_resolution_factor : float, optional + Factor by which the resolution is reduced when the current value is far + from its final value. + large_resolution_threshold : float, optional + Threshold used to determine whether the resolution is far from its + final value. + moderate_resolution_threshold : float, optional + Threshold used to determine whether the resolution is close to its + final value. + low_ratio : float, optional + Threshold used to determine whether the reduction ratio is low. + high_ratio : float, optional + Threshold used to determine whether the reduction ratio is high. + very_low_ratio : float, optional + Threshold used to determine whether the reduction ratio is very low. + This is used to determine whether the models should be reset. + penalty_increase_threshold : float, optional + Threshold used to determine whether the penalty parameter should be + increased. + penalty_increase_factor : float, optional + Factor by which the penalty parameter is increased. + short_step_threshold : float, optional + Factor used to determine whether the trial step is too short. + low_radius_factor : float, optional + Factor used to determine which interpolation point should be removed + from the interpolation set at each iteration. + byrd_omojokun_factor : float, optional + Factor by which the trust-region radius is reduced for the computations + of the normal step in the Byrd-Omojokun composite-step approach. + threshold_ratio_constraints : float, optional + Threshold used to determine which constraints should be taken into + account when decreasing the penalty parameter. + large_shift_factor : float, optional + Factor used to determine whether the point around which the quadratic + models are built should be updated. + large_gradient_factor : float, optional + Factor used to determine whether the models should be reset. + resolution_factor : float, optional + Factor by which the resolution is decreased. + improve_tcg : bool, optional + Whether to improve the steps computed by the truncated conjugate + gradient method when the trust-region boundary is reached. + + References + ---------- + .. [1] J. Nocedal and S. J. Wright. *Numerical Optimization*. Springer Ser. + Oper. Res. Financ. Eng. Springer, New York, NY, USA, second edition, + 2006. `doi:10.1007/978-0-387-40065-5 + `_. + .. [2] M. J. D. Powell. A direct search optimization method that models the + objective and constraint functions by linear interpolation. In S. Gomez + and J.-P. Hennart, editors, *Advances in Optimization and Numerical + Analysis*, volume 275 of Math. Appl., pages 51--67. Springer, Dordrecht, + Netherlands, 1994. `doi:10.1007/978-94-015-8330-5_4 + `_. + .. [3] T. M. Ragonneau. *Model-Based Derivative-Free Optimization Methods + and Software*. PhD thesis, Department of Applied Mathematics, The Hong + Kong Polytechnic University, Hong Kong, China, 2022. URL: + https://theses.lib.polyu.edu.hk/handle/200/12294. + + Examples + -------- + To demonstrate how to use `minimize`, we first minimize the Rosenbrock + function implemented in `scipy.optimize` in an unconstrained setting. + + .. testsetup:: + + import numpy as np + np.set_printoptions(precision=3, suppress=True) + + >>> from cobyqa import minimize + >>> from scipy.optimize import rosen + + To solve the problem using COBYQA, run: + + >>> x0 = [1.3, 0.7, 0.8, 1.9, 1.2] + >>> res = minimize(rosen, x0) + >>> res.x + array([1., 1., 1., 1., 1.]) + + To see how bound and constraints are handled using `minimize`, we solve + Example 16.4 of [1]_, defined as + + .. math:: + + \begin{aligned} + \min_{x \in \mathbb{R}^2} & \quad (x_1 - 1)^2 + (x_2 - 2.5)^2\\ + \text{s.t.} & \quad -x_1 + 2x_2 \le 2,\\ + & \quad x_1 + 2x_2 \le 6,\\ + & \quad x_1 - 2x_2 \le 2,\\ + & \quad x_1 \ge 0,\\ + & \quad x_2 \ge 0. + \end{aligned} + + >>> import numpy as np + >>> from scipy.optimize import Bounds, LinearConstraint + + Its objective function can be implemented as: + + >>> def fun(x): + ... return (x[0] - 1.0)**2 + (x[1] - 2.5)**2 + + This problem can be solved using `minimize` as: + + >>> x0 = [2.0, 0.0] + >>> bounds = Bounds([0.0, 0.0], np.inf) + >>> constraints = LinearConstraint([ + ... [-1.0, 2.0], + ... [1.0, 2.0], + ... [1.0, -2.0], + ... ], -np.inf, [2.0, 6.0, 2.0]) + >>> res = minimize(fun, x0, bounds=bounds, constraints=constraints) + >>> res.x + array([1.4, 1.7]) + + To see how nonlinear constraints are handled, we solve Problem (F) of [2]_, + defined as + + .. math:: + + \begin{aligned} + \min_{x \in \mathbb{R}^2} & \quad -x_1 - x_2\\ + \text{s.t.} & \quad x_1^2 - x_2 \le 0,\\ + & \quad x_1^2 + x_2^2 \le 1. + \end{aligned} + + >>> from scipy.optimize import NonlinearConstraint + + Its objective and constraint functions can be implemented as: + + >>> def fun(x): + ... return -x[0] - x[1] + >>> + >>> def cub(x): + ... return [x[0]**2 - x[1], x[0]**2 + x[1]**2] + + This problem can be solved using `minimize` as: + + >>> x0 = [1.0, 1.0] + >>> constraints = NonlinearConstraint(cub, -np.inf, [0.0, 1.0]) + >>> res = minimize(fun, x0, constraints=constraints) + >>> res.x + array([0.707, 0.707]) + + Finally, to see how to supply linear and nonlinear constraints + simultaneously, we solve Problem (G) of [2]_, defined as + + .. math:: + + \begin{aligned} + \min_{x \in \mathbb{R}^3} & \quad x_3\\ + \text{s.t.} & \quad 5x_1 - x_2 + x_3 \ge 0,\\ + & \quad -5x_1 - x_2 + x_3 \ge 0,\\ + & \quad x_1^2 + x_2^2 + 4x_2 \le x_3. + \end{aligned} + + Its objective and nonlinear constraint functions can be implemented as: + + >>> def fun(x): + ... return x[2] + >>> + >>> def cub(x): + ... return x[0]**2 + x[1]**2 + 4.0*x[1] - x[2] + + This problem can be solved using `minimize` as: + + >>> x0 = [1.0, 1.0, 1.0] + >>> constraints = [ + ... LinearConstraint( + ... [[5.0, -1.0, 1.0], [-5.0, -1.0, 1.0]], + ... [0.0, 0.0], + ... np.inf, + ... ), + ... NonlinearConstraint(cub, -np.inf, 0.0), + ... ] + >>> res = minimize(fun, x0, constraints=constraints) + >>> res.x + array([ 0., -3., -3.]) + """ + # Get basic options that are needed for the initialization. + if options is None: + options = {} + else: + options = dict(options) + verbose = options.get(Options.VERBOSE, DEFAULT_OPTIONS[Options.VERBOSE]) + verbose = bool(verbose) + feasibility_tol = options.get( + Options.FEASIBILITY_TOL, + DEFAULT_OPTIONS[Options.FEASIBILITY_TOL], + ) + feasibility_tol = float(feasibility_tol) + scale = options.get(Options.SCALE, DEFAULT_OPTIONS[Options.SCALE]) + scale = bool(scale) + store_history = options.get( + Options.STORE_HISTORY, + DEFAULT_OPTIONS[Options.STORE_HISTORY], + ) + store_history = bool(store_history) + if Options.HISTORY_SIZE in options and options[Options.HISTORY_SIZE] <= 0: + raise ValueError("The size of the history must be positive.") + history_size = options.get( + Options.HISTORY_SIZE, + DEFAULT_OPTIONS[Options.HISTORY_SIZE], + ) + history_size = int(history_size) + if Options.FILTER_SIZE in options and options[Options.FILTER_SIZE] <= 0: + raise ValueError("The size of the filter must be positive.") + filter_size = options.get( + Options.FILTER_SIZE, + DEFAULT_OPTIONS[Options.FILTER_SIZE], + ) + filter_size = int(filter_size) + debug = options.get(Options.DEBUG, DEFAULT_OPTIONS[Options.DEBUG]) + debug = bool(debug) + + # Initialize the objective function. + if not isinstance(args, tuple): + args = (args,) + obj = ObjectiveFunction(fun, verbose, debug, *args) + + # Initialize the bound constraints. + if not hasattr(x0, "__len__"): + x0 = [x0] + n_orig = len(x0) + bounds = BoundConstraints(_get_bounds(bounds, n_orig)) + + # Initialize the constraints. + linear_constraints, nonlinear_constraints = _get_constraints(constraints) + linear = LinearConstraints(linear_constraints, n_orig, debug) + nonlinear = NonlinearConstraints(nonlinear_constraints, verbose, debug) + + # Initialize the problem (and remove the fixed variables). + pb = Problem( + obj, + x0, + bounds, + linear, + nonlinear, + callback, + feasibility_tol, + scale, + store_history, + history_size, + filter_size, + debug, + ) + + # Set the default options. + _set_default_options(options, pb.n) + constants = _set_default_constants(**kwargs) + + # Initialize the models and skip the computations whenever possible. + if not pb.bounds.is_feasible: + # The bound constraints are infeasible. + return _build_result( + pb, + 0.0, + False, + ExitStatus.INFEASIBLE_ERROR, + 0, + options, + ) + elif pb.n == 0: + # All variables are fixed by the bound constraints. + return _build_result( + pb, + 0.0, + True, + ExitStatus.FIXED_SUCCESS, + 0, + options, + ) + if verbose: + print("Starting the optimization procedure.") + print(f"Initial trust-region radius: {options[Options.RHOBEG]}.") + print(f"Final trust-region radius: {options[Options.RHOEND]}.") + print( + f"Maximum number of function evaluations: " + f"{options[Options.MAX_EVAL]}." + ) + print(f"Maximum number of iterations: {options[Options.MAX_ITER]}.") + print() + try: + framework = TrustRegion(pb, options, constants) + except TargetSuccess: + # The target on the objective function value has been reached + return _build_result( + pb, + 0.0, + True, + ExitStatus.TARGET_SUCCESS, + 0, + options, + ) + except CallbackSuccess: + # The callback raised a StopIteration exception. + return _build_result( + pb, + 0.0, + True, + ExitStatus.CALLBACK_SUCCESS, + 0, + options, + ) + except FeasibleSuccess: + # The feasibility problem has been solved successfully. + return _build_result( + pb, + 0.0, + True, + ExitStatus.FEASIBLE_SUCCESS, + 0, + options, + ) + except MaxEvalError: + # The maximum number of function evaluations has been exceeded. + return _build_result( + pb, + 0.0, + False, + ExitStatus.MAX_ITER_WARNING, + 0, + options, + ) + except np.linalg.LinAlgError: + # The construction of the initial interpolation set failed. + return _build_result( + pb, + 0.0, + False, + ExitStatus.LINALG_ERROR, + 0, + options, + ) + + # Start the optimization procedure. + success = False + n_iter = 0 + k_new = None + n_short_steps = 0 + n_very_short_steps = 0 + n_alt_models = 0 + while True: + # Stop the optimization procedure if the maximum number of iterations + # has been exceeded. We do not write the main loop as a for loop + # because we want to access the number of iterations outside the loop. + if n_iter >= options[Options.MAX_ITER]: + status = ExitStatus.MAX_ITER_WARNING + break + n_iter += 1 + + # Update the point around which the quadratic models are built. + if ( + np.linalg.norm( + framework.x_best - framework.models.interpolation.x_base + ) + >= constants[Constants.LARGE_SHIFT_FACTOR] * framework.radius + ): + framework.shift_x_base(options) + + # Evaluate the trial step. + radius_save = framework.radius + normal_step, tangential_step = framework.get_trust_region_step(options) + step = normal_step + tangential_step + s_norm = np.linalg.norm(step) + + # If the trial step is too short, we do not attempt to evaluate the + # objective and constraint functions. Instead, we reduce the + # trust-region radius and check whether the resolution should be + # enhanced and whether the geometry of the interpolation set should be + # improved. Otherwise, we entertain a classical iteration. The + # criterion for performing an exceptional jump is taken from NEWUOA. + if ( + s_norm + <= constants[Constants.SHORT_STEP_THRESHOLD] * framework.resolution + ): + framework.radius *= constants[Constants.DECREASE_RESOLUTION_FACTOR] + if radius_save > framework.resolution: + n_short_steps = 0 + n_very_short_steps = 0 + else: + n_short_steps += 1 + n_very_short_steps += 1 + if s_norm > 0.1 * framework.resolution: + n_very_short_steps = 0 + enhance_resolution = n_short_steps >= 5 or n_very_short_steps >= 3 + if enhance_resolution: + n_short_steps = 0 + n_very_short_steps = 0 + improve_geometry = False + else: + try: + k_new, dist_new = framework.get_index_to_remove() + except np.linalg.LinAlgError: + status = ExitStatus.LINALG_ERROR + break + improve_geometry = dist_new > max( + framework.radius, + constants[Constants.RESOLUTION_FACTOR] + * framework.resolution, + ) + else: + # Increase the penalty parameter if necessary. + same_best_point = framework.increase_penalty(step) + if same_best_point: + # Evaluate the objective and constraint functions. + try: + fun_val, cub_val, ceq_val = _eval( + pb, + framework, + step, + options, + ) + except TargetSuccess: + status = ExitStatus.TARGET_SUCCESS + success = True + break + except FeasibleSuccess: + status = ExitStatus.FEASIBLE_SUCCESS + success = True + break + except CallbackSuccess: + status = ExitStatus.CALLBACK_SUCCESS + success = True + break + except MaxEvalError: + status = ExitStatus.MAX_EVAL_WARNING + break + + # Perform a second-order correction step if necessary. + merit_old = framework.merit( + framework.x_best, + framework.fun_best, + framework.cub_best, + framework.ceq_best, + ) + merit_new = framework.merit( + framework.x_best + step, fun_val, cub_val, ceq_val + ) + if ( + pb.type == "nonlinearly constrained" + and merit_new > merit_old + and np.linalg.norm(normal_step) + > constants[Constants.BYRD_OMOJOKUN_FACTOR] ** 2.0 + * framework.radius + ): + soc_step = framework.get_second_order_correction_step( + step, options + ) + if np.linalg.norm(soc_step) > 0.0: + step += soc_step + + # Evaluate the objective and constraint functions. + try: + fun_val, cub_val, ceq_val = _eval( + pb, + framework, + step, + options, + ) + except TargetSuccess: + status = ExitStatus.TARGET_SUCCESS + success = True + break + except FeasibleSuccess: + status = ExitStatus.FEASIBLE_SUCCESS + success = True + break + except CallbackSuccess: + status = ExitStatus.CALLBACK_SUCCESS + success = True + break + except MaxEvalError: + status = ExitStatus.MAX_EVAL_WARNING + break + + # Calculate the reduction ratio. + ratio = framework.get_reduction_ratio( + step, + fun_val, + cub_val, + ceq_val, + ) + + # Choose an interpolation point to remove. + try: + k_new = framework.get_index_to_remove( + framework.x_best + step + )[0] + except np.linalg.LinAlgError: + status = ExitStatus.LINALG_ERROR + break + + # Update the interpolation set. + try: + ill_conditioned = framework.models.update_interpolation( + k_new, framework.x_best + step, fun_val, cub_val, + ceq_val + ) + except np.linalg.LinAlgError: + status = ExitStatus.LINALG_ERROR + break + framework.set_best_index() + + # Update the trust-region radius. + framework.update_radius(step, ratio) + + # Attempt to replace the models by the alternative ones. + if framework.radius <= framework.resolution: + if ratio >= constants[Constants.VERY_LOW_RATIO]: + n_alt_models = 0 + else: + n_alt_models += 1 + grad = framework.models.fun_grad(framework.x_best) + try: + grad_alt = framework.models.fun_alt_grad( + framework.x_best + ) + except np.linalg.LinAlgError: + status = ExitStatus.LINALG_ERROR + break + if np.linalg.norm(grad) < constants[ + Constants.LARGE_GRADIENT_FACTOR + ] * np.linalg.norm(grad_alt): + n_alt_models = 0 + if n_alt_models >= 3: + try: + framework.models.reset_models() + except np.linalg.LinAlgError: + status = ExitStatus.LINALG_ERROR + break + n_alt_models = 0 + + # Update the Lagrange multipliers. + framework.set_multipliers(framework.x_best + step) + + # Check whether the resolution should be enhanced. + try: + k_new, dist_new = framework.get_index_to_remove() + except np.linalg.LinAlgError: + status = ExitStatus.LINALG_ERROR + break + improve_geometry = ( + ill_conditioned + or ratio <= constants[Constants.LOW_RATIO] + and dist_new + > max( + framework.radius, + constants[Constants.RESOLUTION_FACTOR] + * framework.resolution, + ) + ) + enhance_resolution = ( + radius_save <= framework.resolution + and ratio <= constants[Constants.LOW_RATIO] + and not improve_geometry + ) + else: + # When increasing the penalty parameter, the best point so far + # may change. In this case, we restart the iteration. + enhance_resolution = False + improve_geometry = False + + # Reduce the resolution if necessary. + if enhance_resolution: + if framework.resolution <= options[Options.RHOEND]: + success = True + status = ExitStatus.RADIUS_SUCCESS + break + framework.enhance_resolution(options) + framework.decrease_penalty() + + if verbose: + maxcv_val = pb.maxcv( + framework.x_best, framework.cub_best, framework.ceq_best + ) + _print_step( + f"New trust-region radius: {framework.resolution}", + pb, + pb.build_x(framework.x_best), + framework.fun_best, + maxcv_val, + pb.n_eval, + n_iter, + ) + print() + + # Improve the geometry of the interpolation set if necessary. + if improve_geometry: + try: + step = framework.get_geometry_step(k_new, options) + except np.linalg.LinAlgError: + status = ExitStatus.LINALG_ERROR + break + + # Evaluate the objective and constraint functions. + try: + fun_val, cub_val, ceq_val = _eval(pb, framework, step, options) + except TargetSuccess: + status = ExitStatus.TARGET_SUCCESS + success = True + break + except FeasibleSuccess: + status = ExitStatus.FEASIBLE_SUCCESS + success = True + break + except CallbackSuccess: + status = ExitStatus.CALLBACK_SUCCESS + success = True + break + except MaxEvalError: + status = ExitStatus.MAX_EVAL_WARNING + break + + # Update the interpolation set. + try: + framework.models.update_interpolation( + k_new, + framework.x_best + step, + fun_val, + cub_val, + ceq_val, + ) + except np.linalg.LinAlgError: + status = ExitStatus.LINALG_ERROR + break + framework.set_best_index() + + return _build_result( + pb, + framework.penalty, + success, + status, + n_iter, + options, + ) + + +def _get_bounds(bounds, n): + """ + Uniformize the bounds. + """ + if bounds is None: + return Bounds(np.full(n, -np.inf), np.full(n, np.inf)) + elif isinstance(bounds, Bounds): + if bounds.lb.shape != (n,) or bounds.ub.shape != (n,): + raise ValueError(f"The bounds must have {n} elements.") + return bounds + elif hasattr(bounds, "__len__"): + bounds = np.asarray(bounds) + if bounds.shape != (n, 2): + raise ValueError( + "The shape of the bounds is not compatible with " + "the number of variables." + ) + return Bounds(bounds[:, 0], bounds[:, 1]) + else: + raise TypeError( + "The bounds must be an instance of " + "scipy.optimize.Bounds or an array-like object." + ) + + +def _get_constraints(constraints): + """ + Extract the linear and nonlinear constraints. + """ + if isinstance(constraints, dict) or not hasattr(constraints, "__len__"): + constraints = (constraints,) + + # Extract the linear and nonlinear constraints. + linear_constraints = [] + nonlinear_constraints = [] + for constraint in constraints: + if isinstance(constraint, LinearConstraint): + lb = exact_1d_array( + constraint.lb, + "The lower bound of the linear constraints must be a vector.", + ) + ub = exact_1d_array( + constraint.ub, + "The upper bound of the linear constraints must be a vector.", + ) + linear_constraints.append( + LinearConstraint( + constraint.A, + *np.broadcast_arrays(lb, ub), + ) + ) + elif isinstance(constraint, NonlinearConstraint): + lb = exact_1d_array( + constraint.lb, + "The lower bound of the " + "nonlinear constraints must be a " + "vector.", + ) + ub = exact_1d_array( + constraint.ub, + "The upper bound of the " + "nonlinear constraints must be a " + "vector.", + ) + nonlinear_constraints.append( + NonlinearConstraint( + constraint.fun, + *np.broadcast_arrays(lb, ub), + ) + ) + elif isinstance(constraint, dict): + if "type" not in constraint or constraint["type"] not in ( + "eq", + "ineq", + ): + raise ValueError('The constraint type must be "eq" or "ineq".') + if "fun" not in constraint or not callable(constraint["fun"]): + raise ValueError("The constraint function must be callable.") + nonlinear_constraints.append( + { + "fun": constraint["fun"], + "type": constraint["type"], + "args": constraint.get("args", ()), + } + ) + else: + raise TypeError( + "The constraints must be instances of " + "scipy.optimize.LinearConstraint, " + "scipy.optimize.NonlinearConstraint, or dict." + ) + return linear_constraints, nonlinear_constraints + + +def _set_default_options(options, n): + """ + Set the default options. + """ + if Options.RHOBEG in options and options[Options.RHOBEG] <= 0.0: + raise ValueError("The initial trust-region radius must be positive.") + if Options.RHOEND in options and options[Options.RHOEND] < 0.0: + raise ValueError("The final trust-region radius must be nonnegative.") + if Options.RHOBEG in options and Options.RHOEND in options: + if options[Options.RHOBEG] < options[Options.RHOEND]: + raise ValueError( + "The initial trust-region radius must be greater " + "than or equal to the final trust-region radius." + ) + elif Options.RHOBEG in options: + options[Options.RHOEND.value] = np.min( + [ + DEFAULT_OPTIONS[Options.RHOEND], + options[Options.RHOBEG], + ] + ) + elif Options.RHOEND in options: + options[Options.RHOBEG.value] = np.max( + [ + DEFAULT_OPTIONS[Options.RHOBEG], + options[Options.RHOEND], + ] + ) + else: + options[Options.RHOBEG.value] = DEFAULT_OPTIONS[Options.RHOBEG] + options[Options.RHOEND.value] = DEFAULT_OPTIONS[Options.RHOEND] + options[Options.RHOBEG.value] = float(options[Options.RHOBEG]) + options[Options.RHOEND.value] = float(options[Options.RHOEND]) + if Options.NPT in options and options[Options.NPT] <= 0: + raise ValueError("The number of interpolation points must be " + "positive.") + if ( + Options.NPT in options + and options[Options.NPT] > ((n + 1) * (n + 2)) // 2 + ): + raise ValueError( + f"The number of interpolation points must be at most " + f"{((n + 1) * (n + 2)) // 2}." + ) + options.setdefault(Options.NPT.value, DEFAULT_OPTIONS[Options.NPT](n)) + options[Options.NPT.value] = int(options[Options.NPT]) + if Options.MAX_EVAL in options and options[Options.MAX_EVAL] <= 0: + raise ValueError( + "The maximum number of function evaluations must be positive." + ) + options.setdefault( + Options.MAX_EVAL.value, + np.max( + [ + DEFAULT_OPTIONS[Options.MAX_EVAL](n), + options[Options.NPT] + 1, + ] + ), + ) + options[Options.MAX_EVAL.value] = int(options[Options.MAX_EVAL]) + if Options.MAX_ITER in options and options[Options.MAX_ITER] <= 0: + raise ValueError("The maximum number of iterations must be positive.") + options.setdefault( + Options.MAX_ITER.value, + DEFAULT_OPTIONS[Options.MAX_ITER](n), + ) + options[Options.MAX_ITER.value] = int(options[Options.MAX_ITER]) + options.setdefault(Options.TARGET.value, DEFAULT_OPTIONS[Options.TARGET]) + options[Options.TARGET.value] = float(options[Options.TARGET]) + options.setdefault( + Options.FEASIBILITY_TOL.value, + DEFAULT_OPTIONS[Options.FEASIBILITY_TOL], + ) + options[Options.FEASIBILITY_TOL.value] = float( + options[Options.FEASIBILITY_TOL] + ) + options.setdefault(Options.VERBOSE.value, DEFAULT_OPTIONS[Options.VERBOSE]) + options[Options.VERBOSE.value] = bool(options[Options.VERBOSE]) + options.setdefault(Options.SCALE.value, DEFAULT_OPTIONS[Options.SCALE]) + options[Options.SCALE.value] = bool(options[Options.SCALE]) + options.setdefault( + Options.FILTER_SIZE.value, + DEFAULT_OPTIONS[Options.FILTER_SIZE], + ) + options[Options.FILTER_SIZE.value] = int(options[Options.FILTER_SIZE]) + options.setdefault( + Options.STORE_HISTORY.value, + DEFAULT_OPTIONS[Options.STORE_HISTORY], + ) + options[Options.STORE_HISTORY.value] = bool(options[Options.STORE_HISTORY]) + options.setdefault( + Options.HISTORY_SIZE.value, + DEFAULT_OPTIONS[Options.HISTORY_SIZE], + ) + options[Options.HISTORY_SIZE.value] = int(options[Options.HISTORY_SIZE]) + options.setdefault(Options.DEBUG.value, DEFAULT_OPTIONS[Options.DEBUG]) + options[Options.DEBUG.value] = bool(options[Options.DEBUG]) + + # Check whether they are any unknown options. + for key in options: + if key not in Options.__members__.values(): + warnings.warn(f"Unknown option: {key}.", RuntimeWarning, 3) + + +def _set_default_constants(**kwargs): + """ + Set the default constants. + """ + constants = dict(kwargs) + constants.setdefault( + Constants.DECREASE_RADIUS_FACTOR.value, + DEFAULT_CONSTANTS[Constants.DECREASE_RADIUS_FACTOR], + ) + constants[Constants.DECREASE_RADIUS_FACTOR.value] = float( + constants[Constants.DECREASE_RADIUS_FACTOR] + ) + if ( + constants[Constants.DECREASE_RADIUS_FACTOR] <= 0.0 + or constants[Constants.DECREASE_RADIUS_FACTOR] >= 1.0 + ): + raise ValueError( + "The constant decrease_radius_factor must be in the interval " + "(0, 1)." + ) + constants.setdefault( + Constants.INCREASE_RADIUS_THRESHOLD.value, + DEFAULT_CONSTANTS[Constants.INCREASE_RADIUS_THRESHOLD], + ) + constants[Constants.INCREASE_RADIUS_THRESHOLD.value] = float( + constants[Constants.INCREASE_RADIUS_THRESHOLD] + ) + if constants[Constants.INCREASE_RADIUS_THRESHOLD] <= 1.0: + raise ValueError( + "The constant increase_radius_threshold must be greater than 1." + ) + if ( + Constants.INCREASE_RADIUS_FACTOR in constants + and constants[Constants.INCREASE_RADIUS_FACTOR] <= 1.0 + ): + raise ValueError( + "The constant increase_radius_factor must be greater than 1." + ) + if ( + Constants.DECREASE_RADIUS_THRESHOLD in constants + and constants[Constants.DECREASE_RADIUS_THRESHOLD] <= 1.0 + ): + raise ValueError( + "The constant decrease_radius_threshold must be greater than 1." + ) + if ( + Constants.INCREASE_RADIUS_FACTOR in constants + and Constants.DECREASE_RADIUS_THRESHOLD in constants + ): + if ( + constants[Constants.DECREASE_RADIUS_THRESHOLD] + >= constants[Constants.INCREASE_RADIUS_FACTOR] + ): + raise ValueError( + "The constant decrease_radius_threshold must be " + "less than increase_radius_factor." + ) + elif Constants.INCREASE_RADIUS_FACTOR in constants: + constants[Constants.DECREASE_RADIUS_THRESHOLD.value] = np.min( + [ + DEFAULT_CONSTANTS[Constants.DECREASE_RADIUS_THRESHOLD], + 0.5 * (1.0 + constants[Constants.INCREASE_RADIUS_FACTOR]), + ] + ) + elif Constants.DECREASE_RADIUS_THRESHOLD in constants: + constants[Constants.INCREASE_RADIUS_FACTOR.value] = np.max( + [ + DEFAULT_CONSTANTS[Constants.INCREASE_RADIUS_FACTOR], + 2.0 * constants[Constants.DECREASE_RADIUS_THRESHOLD], + ] + ) + else: + constants[Constants.INCREASE_RADIUS_FACTOR.value] = DEFAULT_CONSTANTS[ + Constants.INCREASE_RADIUS_FACTOR + ] + constants[Constants.DECREASE_RADIUS_THRESHOLD.value] = ( + DEFAULT_CONSTANTS[Constants.DECREASE_RADIUS_THRESHOLD]) + constants.setdefault( + Constants.DECREASE_RESOLUTION_FACTOR.value, + DEFAULT_CONSTANTS[Constants.DECREASE_RESOLUTION_FACTOR], + ) + constants[Constants.DECREASE_RESOLUTION_FACTOR.value] = float( + constants[Constants.DECREASE_RESOLUTION_FACTOR] + ) + if ( + constants[Constants.DECREASE_RESOLUTION_FACTOR] <= 0.0 + or constants[Constants.DECREASE_RESOLUTION_FACTOR] >= 1.0 + ): + raise ValueError( + "The constant decrease_resolution_factor must be in the interval " + "(0, 1)." + ) + if ( + Constants.LARGE_RESOLUTION_THRESHOLD in constants + and constants[Constants.LARGE_RESOLUTION_THRESHOLD] <= 1.0 + ): + raise ValueError( + "The constant large_resolution_threshold must be greater than 1." + ) + if ( + Constants.MODERATE_RESOLUTION_THRESHOLD in constants + and constants[Constants.MODERATE_RESOLUTION_THRESHOLD] <= 1.0 + ): + raise ValueError( + "The constant moderate_resolution_threshold must be greater than " + "1." + ) + if ( + Constants.LARGE_RESOLUTION_THRESHOLD in constants + and Constants.MODERATE_RESOLUTION_THRESHOLD in constants + ): + if ( + constants[Constants.MODERATE_RESOLUTION_THRESHOLD] + > constants[Constants.LARGE_RESOLUTION_THRESHOLD] + ): + raise ValueError( + "The constant moderate_resolution_threshold " + "must be at most large_resolution_threshold." + ) + elif Constants.LARGE_RESOLUTION_THRESHOLD in constants: + constants[Constants.MODERATE_RESOLUTION_THRESHOLD.value] = np.min( + [ + DEFAULT_CONSTANTS[Constants.MODERATE_RESOLUTION_THRESHOLD], + constants[Constants.LARGE_RESOLUTION_THRESHOLD], + ] + ) + elif Constants.MODERATE_RESOLUTION_THRESHOLD in constants: + constants[Constants.LARGE_RESOLUTION_THRESHOLD.value] = np.max( + [ + DEFAULT_CONSTANTS[Constants.LARGE_RESOLUTION_THRESHOLD], + constants[Constants.MODERATE_RESOLUTION_THRESHOLD], + ] + ) + else: + constants[Constants.LARGE_RESOLUTION_THRESHOLD.value] = ( + DEFAULT_CONSTANTS[Constants.LARGE_RESOLUTION_THRESHOLD] + ) + constants[Constants.MODERATE_RESOLUTION_THRESHOLD.value] = ( + DEFAULT_CONSTANTS[Constants.MODERATE_RESOLUTION_THRESHOLD] + ) + if Constants.LOW_RATIO in constants and ( + constants[Constants.LOW_RATIO] <= 0.0 + or constants[Constants.LOW_RATIO] >= 1.0 + ): + raise ValueError( + "The constant low_ratio must be in the interval (0, 1)." + ) + if Constants.HIGH_RATIO in constants and ( + constants[Constants.HIGH_RATIO] <= 0.0 + or constants[Constants.HIGH_RATIO] >= 1.0 + ): + raise ValueError( + "The constant high_ratio must be in the interval (0, 1)." + ) + if Constants.LOW_RATIO in constants and Constants.HIGH_RATIO in constants: + if constants[Constants.LOW_RATIO] > constants[Constants.HIGH_RATIO]: + raise ValueError( + "The constant low_ratio must be at most high_ratio." + ) + elif Constants.LOW_RATIO in constants: + constants[Constants.HIGH_RATIO.value] = np.max( + [ + DEFAULT_CONSTANTS[Constants.HIGH_RATIO], + constants[Constants.LOW_RATIO], + ] + ) + elif Constants.HIGH_RATIO in constants: + constants[Constants.LOW_RATIO.value] = np.min( + [ + DEFAULT_CONSTANTS[Constants.LOW_RATIO], + constants[Constants.HIGH_RATIO], + ] + ) + else: + constants[Constants.LOW_RATIO.value] = DEFAULT_CONSTANTS[ + Constants.LOW_RATIO + ] + constants[Constants.HIGH_RATIO.value] = DEFAULT_CONSTANTS[ + Constants.HIGH_RATIO + ] + constants.setdefault( + Constants.VERY_LOW_RATIO.value, + DEFAULT_CONSTANTS[Constants.VERY_LOW_RATIO], + ) + constants[Constants.VERY_LOW_RATIO.value] = float( + constants[Constants.VERY_LOW_RATIO] + ) + if ( + constants[Constants.VERY_LOW_RATIO] <= 0.0 + or constants[Constants.VERY_LOW_RATIO] >= 1.0 + ): + raise ValueError( + "The constant very_low_ratio must be in the interval (0, 1)." + ) + if ( + Constants.PENALTY_INCREASE_THRESHOLD in constants + and constants[Constants.PENALTY_INCREASE_THRESHOLD] < 1.0 + ): + raise ValueError( + "The constant penalty_increase_threshold must be " + "greater than or equal to 1." + ) + if ( + Constants.PENALTY_INCREASE_FACTOR in constants + and constants[Constants.PENALTY_INCREASE_FACTOR] <= 1.0 + ): + raise ValueError( + "The constant penalty_increase_factor must be greater than 1." + ) + if ( + Constants.PENALTY_INCREASE_THRESHOLD in constants + and Constants.PENALTY_INCREASE_FACTOR in constants + ): + if ( + constants[Constants.PENALTY_INCREASE_FACTOR] + < constants[Constants.PENALTY_INCREASE_THRESHOLD] + ): + raise ValueError( + "The constant penalty_increase_factor must be " + "greater than or equal to " + "penalty_increase_threshold." + ) + elif Constants.PENALTY_INCREASE_THRESHOLD in constants: + constants[Constants.PENALTY_INCREASE_FACTOR.value] = np.max( + [ + DEFAULT_CONSTANTS[Constants.PENALTY_INCREASE_FACTOR], + constants[Constants.PENALTY_INCREASE_THRESHOLD], + ] + ) + elif Constants.PENALTY_INCREASE_FACTOR in constants: + constants[Constants.PENALTY_INCREASE_THRESHOLD.value] = np.min( + [ + DEFAULT_CONSTANTS[Constants.PENALTY_INCREASE_THRESHOLD], + constants[Constants.PENALTY_INCREASE_FACTOR], + ] + ) + else: + constants[Constants.PENALTY_INCREASE_THRESHOLD.value] = ( + DEFAULT_CONSTANTS[Constants.PENALTY_INCREASE_THRESHOLD] + ) + constants[Constants.PENALTY_INCREASE_FACTOR.value] = DEFAULT_CONSTANTS[ + Constants.PENALTY_INCREASE_FACTOR + ] + constants.setdefault( + Constants.SHORT_STEP_THRESHOLD.value, + DEFAULT_CONSTANTS[Constants.SHORT_STEP_THRESHOLD], + ) + constants[Constants.SHORT_STEP_THRESHOLD.value] = float( + constants[Constants.SHORT_STEP_THRESHOLD] + ) + if ( + constants[Constants.SHORT_STEP_THRESHOLD] <= 0.0 + or constants[Constants.SHORT_STEP_THRESHOLD] >= 1.0 + ): + raise ValueError( + "The constant short_step_threshold must be in the interval (0, 1)." + ) + constants.setdefault( + Constants.LOW_RADIUS_FACTOR.value, + DEFAULT_CONSTANTS[Constants.LOW_RADIUS_FACTOR], + ) + constants[Constants.LOW_RADIUS_FACTOR.value] = float( + constants[Constants.LOW_RADIUS_FACTOR] + ) + if ( + constants[Constants.LOW_RADIUS_FACTOR] <= 0.0 + or constants[Constants.LOW_RADIUS_FACTOR] >= 1.0 + ): + raise ValueError( + "The constant low_radius_factor must be in the interval (0, 1)." + ) + constants.setdefault( + Constants.BYRD_OMOJOKUN_FACTOR.value, + DEFAULT_CONSTANTS[Constants.BYRD_OMOJOKUN_FACTOR], + ) + constants[Constants.BYRD_OMOJOKUN_FACTOR.value] = float( + constants[Constants.BYRD_OMOJOKUN_FACTOR] + ) + if ( + constants[Constants.BYRD_OMOJOKUN_FACTOR] <= 0.0 + or constants[Constants.BYRD_OMOJOKUN_FACTOR] >= 1.0 + ): + raise ValueError( + "The constant byrd_omojokun_factor must be in the interval (0, 1)." + ) + constants.setdefault( + Constants.THRESHOLD_RATIO_CONSTRAINTS.value, + DEFAULT_CONSTANTS[Constants.THRESHOLD_RATIO_CONSTRAINTS], + ) + constants[Constants.THRESHOLD_RATIO_CONSTRAINTS.value] = float( + constants[Constants.THRESHOLD_RATIO_CONSTRAINTS] + ) + if constants[Constants.THRESHOLD_RATIO_CONSTRAINTS] <= 1.0: + raise ValueError( + "The constant threshold_ratio_constraints must be greater than 1." + ) + constants.setdefault( + Constants.LARGE_SHIFT_FACTOR.value, + DEFAULT_CONSTANTS[Constants.LARGE_SHIFT_FACTOR], + ) + constants[Constants.LARGE_SHIFT_FACTOR.value] = float( + constants[Constants.LARGE_SHIFT_FACTOR] + ) + if constants[Constants.LARGE_SHIFT_FACTOR] < 0.0: + raise ValueError("The constant large_shift_factor must be " + "nonnegative.") + constants.setdefault( + Constants.LARGE_GRADIENT_FACTOR.value, + DEFAULT_CONSTANTS[Constants.LARGE_GRADIENT_FACTOR], + ) + constants[Constants.LARGE_GRADIENT_FACTOR.value] = float( + constants[Constants.LARGE_GRADIENT_FACTOR] + ) + if constants[Constants.LARGE_GRADIENT_FACTOR] <= 1.0: + raise ValueError( + "The constant large_gradient_factor must be greater than 1." + ) + constants.setdefault( + Constants.RESOLUTION_FACTOR.value, + DEFAULT_CONSTANTS[Constants.RESOLUTION_FACTOR], + ) + constants[Constants.RESOLUTION_FACTOR.value] = float( + constants[Constants.RESOLUTION_FACTOR] + ) + if constants[Constants.RESOLUTION_FACTOR] <= 1.0: + raise ValueError( + "The constant resolution_factor must be greater than 1." + ) + constants.setdefault( + Constants.IMPROVE_TCG.value, + DEFAULT_CONSTANTS[Constants.IMPROVE_TCG], + ) + constants[Constants.IMPROVE_TCG.value] = bool( + constants[Constants.IMPROVE_TCG] + ) + + # Check whether they are any unknown options. + for key in kwargs: + if key not in Constants.__members__.values(): + warnings.warn(f"Unknown constant: {key}.", RuntimeWarning, 3) + return constants + + +def _eval(pb, framework, step, options): + """ + Evaluate the objective and constraint functions. + """ + if pb.n_eval >= options[Options.MAX_EVAL]: + raise MaxEvalError + x_eval = framework.x_best + step + fun_val, cub_val, ceq_val = pb(x_eval) + r_val = pb.maxcv(x_eval, cub_val, ceq_val) + if ( + fun_val <= options[Options.TARGET] + and r_val <= options[Options.FEASIBILITY_TOL] + ): + raise TargetSuccess + if pb.is_feasibility and r_val <= options[Options.FEASIBILITY_TOL]: + raise FeasibleSuccess + return fun_val, cub_val, ceq_val + + +def _build_result(pb, penalty, success, status, n_iter, options): + """ + Build the result of the optimization process. + """ + # Build the result. + x, fun, maxcv = pb.best_eval(penalty) + success = success and np.isfinite(fun) and np.isfinite(maxcv) + if status not in [ExitStatus.TARGET_SUCCESS, ExitStatus.FEASIBLE_SUCCESS]: + success = success and maxcv <= options[Options.FEASIBILITY_TOL] + result = OptimizeResult() + result.message = { + ExitStatus.RADIUS_SUCCESS: "The lower bound for the trust-region " + "radius has been reached", + ExitStatus.TARGET_SUCCESS: "The target objective function value has " + "been reached", + ExitStatus.FIXED_SUCCESS: "All variables are fixed by the bound " + "constraints", + ExitStatus.CALLBACK_SUCCESS: "The callback requested to stop the " + "optimization procedure", + ExitStatus.FEASIBLE_SUCCESS: "The feasibility problem received has " + "been solved successfully", + ExitStatus.MAX_EVAL_WARNING: "The maximum number of function " + "evaluations has been exceeded", + ExitStatus.MAX_ITER_WARNING: "The maximum number of iterations has " + "been exceeded", + ExitStatus.INFEASIBLE_ERROR: "The bound constraints are infeasible", + ExitStatus.LINALG_ERROR: "A linear algebra error occurred", + }.get(status, "Unknown exit status") + result.success = success + result.status = status.value + result.x = pb.build_x(x) + result.fun = fun + result.maxcv = maxcv + result.nfev = pb.n_eval + result.nit = n_iter + if options[Options.STORE_HISTORY]: + result.fun_history = pb.fun_history + result.maxcv_history = pb.maxcv_history + + # Print the result if requested. + if options[Options.VERBOSE]: + _print_step( + result.message, + pb, + result.x, + result.fun, + result.maxcv, + result.nfev, + result.nit, + ) + return result + + +def _print_step(message, pb, x, fun_val, r_val, n_eval, n_iter): + """ + Print information about the current state of the optimization process. + """ + print() + print(f"{message}.") + print(f"Number of function evaluations: {n_eval}.") + print(f"Number of iterations: {n_iter}.") + if not pb.is_feasibility: + print(f"Least value of {pb.fun_name}: {fun_val}.") + print(f"Maximum constraint violation: {r_val}.") + with np.printoptions(**PRINT_OPTIONS): + print(f"Corresponding point: {x}.") diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/models.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/models.py new file mode 100644 index 0000000000000000000000000000000000000000..04ecb5c5551ccfe46c71c268c0fea0add419b840 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/models.py @@ -0,0 +1,1525 @@ +import warnings + +import numpy as np +from scipy.linalg import eigh + +from .settings import Options +from .utils import MaxEvalError, TargetSuccess, FeasibleSuccess + + +EPS = np.finfo(float).eps + + +class Interpolation: + """ + Interpolation set. + + This class stores a base point around which the models are expanded and the + interpolation points. The coordinates of the interpolation points are + relative to the base point. + """ + + def __init__(self, pb, options): + """ + Initialize the interpolation set. + + Parameters + ---------- + pb : `cobyqa.problem.Problem` + Problem to be solved. + options : dict + Options of the solver. + """ + # Reduce the initial trust-region radius if necessary. + self._debug = options[Options.DEBUG] + max_radius = 0.5 * np.min(pb.bounds.xu - pb.bounds.xl) + if options[Options.RHOBEG] > max_radius: + options[Options.RHOBEG.value] = max_radius + options[Options.RHOEND.value] = np.min( + [ + options[Options.RHOEND], + max_radius, + ] + ) + + # Set the initial point around which the models are expanded. + self._x_base = np.copy(pb.x0) + very_close_xl_idx = ( + self.x_base <= pb.bounds.xl + 0.5 * options[Options.RHOBEG] + ) + self.x_base[very_close_xl_idx] = pb.bounds.xl[very_close_xl_idx] + close_xl_idx = ( + pb.bounds.xl + 0.5 * options[Options.RHOBEG] < self.x_base + ) & (self.x_base <= pb.bounds.xl + options[Options.RHOBEG]) + self.x_base[close_xl_idx] = np.minimum( + pb.bounds.xl[close_xl_idx] + options[Options.RHOBEG], + pb.bounds.xu[close_xl_idx], + ) + very_close_xu_idx = ( + self.x_base >= pb.bounds.xu - 0.5 * options[Options.RHOBEG] + ) + self.x_base[very_close_xu_idx] = pb.bounds.xu[very_close_xu_idx] + close_xu_idx = ( + self.x_base < pb.bounds.xu - 0.5 * options[Options.RHOBEG] + ) & (pb.bounds.xu - options[Options.RHOBEG] <= self.x_base) + self.x_base[close_xu_idx] = np.maximum( + pb.bounds.xu[close_xu_idx] - options[Options.RHOBEG], + pb.bounds.xl[close_xu_idx], + ) + + # Set the initial interpolation set. + self._xpt = np.zeros((pb.n, options[Options.NPT])) + for k in range(1, options[Options.NPT]): + if k <= pb.n: + if very_close_xu_idx[k - 1]: + self.xpt[k - 1, k] = -options[Options.RHOBEG] + else: + self.xpt[k - 1, k] = options[Options.RHOBEG] + elif k <= 2 * pb.n: + if very_close_xl_idx[k - pb.n - 1]: + self.xpt[k - pb.n - 1, k] = 2.0 * options[Options.RHOBEG] + elif very_close_xu_idx[k - pb.n - 1]: + self.xpt[k - pb.n - 1, k] = -2.0 * options[Options.RHOBEG] + else: + self.xpt[k - pb.n - 1, k] = -options[Options.RHOBEG] + else: + spread = (k - pb.n - 1) // pb.n + k1 = k - (1 + spread) * pb.n - 1 + k2 = (k1 + spread) % pb.n + self.xpt[k1, k] = self.xpt[k1, k1 + 1] + self.xpt[k2, k] = self.xpt[k2, k2 + 1] + + @property + def n(self): + """ + Number of variables. + + Returns + ------- + int + Number of variables. + """ + return self.xpt.shape[0] + + @property + def npt(self): + """ + Number of interpolation points. + + Returns + ------- + int + Number of interpolation points. + """ + return self.xpt.shape[1] + + @property + def xpt(self): + """ + Interpolation points. + + Returns + ------- + `numpy.ndarray`, shape (n, npt) + Interpolation points. + """ + return self._xpt + + @xpt.setter + def xpt(self, xpt): + """ + Set the interpolation points. + + Parameters + ---------- + xpt : `numpy.ndarray`, shape (n, npt) + New interpolation points. + """ + if self._debug: + assert xpt.shape == ( + self.n, + self.npt, + ), "The shape of `xpt` is not valid." + self._xpt = xpt + + @property + def x_base(self): + """ + Base point around which the models are expanded. + + Returns + ------- + `numpy.ndarray`, shape (n,) + Base point around which the models are expanded. + """ + return self._x_base + + @x_base.setter + def x_base(self, x_base): + """ + Set the base point around which the models are expanded. + + Parameters + ---------- + x_base : `numpy.ndarray`, shape (n,) + New base point around which the models are expanded. + """ + if self._debug: + assert x_base.shape == ( + self.n, + ), "The shape of `x_base` is not valid." + self._x_base = x_base + + def point(self, k): + """ + Get the `k`-th interpolation point. + + The return point is relative to the origin. + + Parameters + ---------- + k : int + Index of the interpolation point. + + Returns + ------- + `numpy.ndarray`, shape (n,) + `k`-th interpolation point. + """ + if self._debug: + assert 0 <= k < self.npt, "The index `k` is not valid." + return self.x_base + self.xpt[:, k] + + +_cache = {"xpt": None, "a": None, "right_scaling": None, "eigh": None} + + +def build_system(interpolation): + """ + Build the left-hand side matrix of the interpolation system. The + matrix below stores W * diag(right_scaling), + where W is the theoretical matrix of the interpolation system. The + right scaling matrices is chosen to keep the elements in + the matrix well-balanced. + + Parameters + ---------- + interpolation : `cobyqa.models.Interpolation` + Interpolation set. + """ + + # Compute the scaled directions from the base point to the + # interpolation points. We scale the directions to avoid numerical + # difficulties. + if _cache["xpt"] is not None and np.array_equal( + interpolation.xpt, _cache["xpt"] + ): + return _cache["a"], _cache["right_scaling"], _cache["eigh"] + + scale = np.max(np.linalg.norm(interpolation.xpt, axis=0), initial=EPS) + xpt_scale = interpolation.xpt / scale + + n, npt = xpt_scale.shape + a = np.zeros((npt + n + 1, npt + n + 1)) + a[:npt, :npt] = 0.5 * (xpt_scale.T @ xpt_scale) ** 2.0 + a[:npt, npt] = 1.0 + a[:npt, npt + 1:] = xpt_scale.T + a[npt, :npt] = 1.0 + a[npt + 1:, :npt] = xpt_scale + + # Build the left and right scaling diagonal matrices. + right_scaling = np.empty(npt + n + 1) + right_scaling[:npt] = 1.0 / scale**2.0 + right_scaling[npt] = scale**2.0 + right_scaling[npt + 1:] = scale + + eig_values, eig_vectors = eigh(a, check_finite=False) + + _cache["xpt"] = np.copy(interpolation.xpt) + _cache["a"] = np.copy(a) + _cache["right_scaling"] = np.copy(right_scaling) + _cache["eigh"] = (eig_values, eig_vectors) + + return a, right_scaling, (eig_values, eig_vectors) + + +class Quadratic: + """ + Quadratic model. + + This class stores the Hessian matrix of the quadratic model using the + implicit/explicit representation designed by Powell for NEWUOA [1]_. + + References + ---------- + .. [1] M. J. D. Powell. The NEWUOA software for unconstrained optimization + without derivatives. In G. Di Pillo and M. Roma, editors, *Large-Scale + Nonlinear Optimization*, volume 83 of Nonconvex Optim. Appl., pages + 255--297. Springer, Boston, MA, USA, 2006. `doi:10.1007/0-387-30065-1_16 + `_. + """ + + def __init__(self, interpolation, values, debug): + """ + Initialize the quadratic model. + + Parameters + ---------- + interpolation : `cobyqa.models.Interpolation` + Interpolation set. + values : `numpy.ndarray`, shape (npt,) + Values of the interpolated function at the interpolation points. + debug : bool + Whether to make debugging tests during the execution. + + Raises + ------ + `numpy.linalg.LinAlgError` + If the interpolation system is ill-defined. + """ + self._debug = debug + if self._debug: + assert values.shape == ( + interpolation.npt, + ), "The shape of `values` is not valid." + if interpolation.npt < interpolation.n + 1: + raise ValueError( + f"The number of interpolation points must be at least " + f"{interpolation.n + 1}." + ) + self._const, self._grad, self._i_hess, _ = self._get_model( + interpolation, + values, + ) + self._e_hess = np.zeros((self.n, self.n)) + + def __call__(self, x, interpolation): + """ + Evaluate the quadratic model at a given point. + + Parameters + ---------- + x : `numpy.ndarray`, shape (n,) + Point at which the quadratic model is evaluated. + interpolation : `cobyqa.models.Interpolation` + Interpolation set. + + Returns + ------- + float + Value of the quadratic model at `x`. + """ + if self._debug: + assert x.shape == (self.n,), "The shape of `x` is not valid." + x_diff = x - interpolation.x_base + return ( + self._const + + self._grad @ x_diff + + 0.5 + * ( + self._i_hess @ (interpolation.xpt.T @ x_diff) ** 2.0 + + x_diff @ self._e_hess @ x_diff + ) + ) + + @property + def n(self): + """ + Number of variables. + + Returns + ------- + int + Number of variables. + """ + return self._grad.size + + @property + def npt(self): + """ + Number of interpolation points used to define the quadratic model. + + Returns + ------- + int + Number of interpolation points used to define the quadratic model. + """ + return self._i_hess.size + + def grad(self, x, interpolation): + """ + Evaluate the gradient of the quadratic model at a given point. + + Parameters + ---------- + x : `numpy.ndarray`, shape (n,) + Point at which the gradient of the quadratic model is evaluated. + interpolation : `cobyqa.models.Interpolation` + Interpolation set. + + Returns + ------- + `numpy.ndarray`, shape (n,) + Gradient of the quadratic model at `x`. + """ + if self._debug: + assert x.shape == (self.n,), "The shape of `x` is not valid." + x_diff = x - interpolation.x_base + return self._grad + self.hess_prod(x_diff, interpolation) + + def hess(self, interpolation): + """ + Evaluate the Hessian matrix of the quadratic model. + + Parameters + ---------- + interpolation : `cobyqa.models.Interpolation` + Interpolation set. + + Returns + ------- + `numpy.ndarray`, shape (n, n) + Hessian matrix of the quadratic model. + """ + return self._e_hess + interpolation.xpt @ ( + self._i_hess[:, np.newaxis] * interpolation.xpt.T + ) + + def hess_prod(self, v, interpolation): + """ + Evaluate the right product of the Hessian matrix of the quadratic model + with a given vector. + + Parameters + ---------- + v : `numpy.ndarray`, shape (n,) + Vector with which the Hessian matrix of the quadratic model is + multiplied from the right. + interpolation : `cobyqa.models.Interpolation` + Interpolation set. + + Returns + ------- + `numpy.ndarray`, shape (n,) + Right product of the Hessian matrix of the quadratic model with + `v`. + """ + if self._debug: + assert v.shape == (self.n,), "The shape of `v` is not valid." + return self._e_hess @ v + interpolation.xpt @ ( + self._i_hess * (interpolation.xpt.T @ v) + ) + + def curv(self, v, interpolation): + """ + Evaluate the curvature of the quadratic model along a given direction. + + Parameters + ---------- + v : `numpy.ndarray`, shape (n,) + Direction along which the curvature of the quadratic model is + evaluated. + interpolation : `cobyqa.models.Interpolation` + Interpolation set. + + Returns + ------- + float + Curvature of the quadratic model along `v`. + """ + if self._debug: + assert v.shape == (self.n,), "The shape of `v` is not valid." + return ( + v @ self._e_hess @ v + + self._i_hess @ (interpolation.xpt.T @ v) ** 2.0 + ) + + def update(self, interpolation, k_new, dir_old, values_diff): + """ + Update the quadratic model. + + This method applies the derivative-free symmetric Broyden update to the + quadratic model. The `knew`-th interpolation point must be updated + before calling this method. + + Parameters + ---------- + interpolation : `cobyqa.models.Interpolation` + Updated interpolation set. + k_new : int + Index of the updated interpolation point. + dir_old : `numpy.ndarray`, shape (n,) + Value of ``interpolation.xpt[:, k_new]`` before the update. + values_diff : `numpy.ndarray`, shape (npt,) + Differences between the values of the interpolated nonlinear + function and the previous quadratic model at the updated + interpolation points. + + Raises + ------ + `numpy.linalg.LinAlgError` + If the interpolation system is ill-defined. + """ + if self._debug: + assert 0 <= k_new < self.npt, "The index `k_new` is not valid." + assert dir_old.shape == ( + self.n, + ), "The shape of `dir_old` is not valid." + assert values_diff.shape == ( + self.npt, + ), "The shape of `values_diff` is not valid." + + # Forward the k_new-th element of the implicit Hessian matrix to the + # explicit Hessian matrix. This must be done because the implicit + # Hessian matrix is related to the interpolation points, and the + # k_new-th interpolation point is modified. + self._e_hess += self._i_hess[k_new] * np.outer(dir_old, dir_old) + self._i_hess[k_new] = 0.0 + + # Update the quadratic model. + const, grad, i_hess, ill_conditioned = self._get_model( + interpolation, + values_diff, + ) + self._const += const + self._grad += grad + self._i_hess += i_hess + return ill_conditioned + + def shift_x_base(self, interpolation, new_x_base): + """ + Shift the point around which the quadratic model is defined. + + Parameters + ---------- + interpolation : `cobyqa.models.Interpolation` + Previous interpolation set. + new_x_base : `numpy.ndarray`, shape (n,) + Point that will replace ``interpolation.x_base``. + """ + if self._debug: + assert new_x_base.shape == ( + self.n, + ), "The shape of `new_x_base` is not valid." + self._const = self(new_x_base, interpolation) + self._grad = self.grad(new_x_base, interpolation) + shift = new_x_base - interpolation.x_base + update = np.outer( + shift, + (interpolation.xpt - 0.5 * shift[:, np.newaxis]) @ self._i_hess, + ) + self._e_hess += update + update.T + + @staticmethod + def solve_systems(interpolation, rhs): + """ + Solve the interpolation systems. + + Parameters + ---------- + interpolation : `cobyqa.models.Interpolation` + Interpolation set. + rhs : `numpy.ndarray`, shape (npt + n + 1, m) + Right-hand side vectors of the ``m`` interpolation systems. + + Returns + ------- + `numpy.ndarray`, shape (npt + n + 1, m) + Solutions of the interpolation systems. + `numpy.ndarray`, shape (m, ) + Whether the interpolation systems are ill-conditioned. + + Raises + ------ + `numpy.linalg.LinAlgError` + If the interpolation systems are ill-defined. + """ + n, npt = interpolation.xpt.shape + assert ( + rhs.ndim == 2 and rhs.shape[0] == npt + n + 1 + ), "The shape of `rhs` is not valid." + + # Build the left-hand side matrix of the interpolation system. The + # matrix below stores diag(left_scaling) * W * diag(right_scaling), + # where W is the theoretical matrix of the interpolation system. The + # left and right scaling matrices are chosen to keep the elements in + # the matrix well-balanced. + a, right_scaling, eig = build_system(interpolation) + + # Build the solution. After a discussion with Mike Saunders and Alexis + # Montoison during their visit to the Hong Kong Polytechnic University + # in 2024, we decided to use the eigendecomposition of the symmetric + # matrix a. This is more stable than the previously employed LBL + # decomposition, and allows us to directly detect ill-conditioning of + # the system and to build the least-squares solution if necessary. + # Numerical experiments have shown that this strategy improves the + # performance of the solver. + rhs_scaled = rhs * right_scaling[:, np.newaxis] + if not (np.all(np.isfinite(a)) and np.all(np.isfinite(rhs_scaled))): + raise np.linalg.LinAlgError( + "The interpolation system is ill-defined." + ) + + # calculated in build_system + eig_values, eig_vectors = eig + + large_eig_values = np.abs(eig_values) > EPS + eig_vectors = eig_vectors[:, large_eig_values] + inv_eig_values = 1.0 / eig_values[large_eig_values] + ill_conditioned = ~np.all(large_eig_values, 0) + left_scaled_solutions = eig_vectors @ ( + (eig_vectors.T @ rhs_scaled) * inv_eig_values[:, np.newaxis] + ) + return ( + left_scaled_solutions * right_scaling[:, np.newaxis], + ill_conditioned, + ) + + @staticmethod + def _get_model(interpolation, values): + """ + Solve the interpolation system. + + Parameters + ---------- + interpolation : `cobyqa.models.Interpolation` + Interpolation set. + values : `numpy.ndarray`, shape (npt,) + Values of the interpolated function at the interpolation points. + + Returns + ------- + float + Constant term of the quadratic model. + `numpy.ndarray`, shape (n,) + Gradient of the quadratic model at ``interpolation.x_base``. + `numpy.ndarray`, shape (npt,) + Implicit Hessian matrix of the quadratic model. + + Raises + ------ + `numpy.linalg.LinAlgError` + If the interpolation system is ill-defined. + """ + assert values.shape == ( + interpolation.npt, + ), "The shape of `values` is not valid." + n, npt = interpolation.xpt.shape + x, ill_conditioned = Quadratic.solve_systems( + interpolation, + np.block( + [ + [ + values, + np.zeros(n + 1), + ] + ] + ).T, + ) + return x[npt, 0], x[npt + 1:, 0], x[:npt, 0], ill_conditioned + + +class Models: + """ + Models for a nonlinear optimization problem. + """ + + def __init__(self, pb, options): + """ + Initialize the models. + + Parameters + ---------- + pb : `cobyqa.problem.Problem` + Problem to be solved. + options : dict + Options of the solver. + + Raises + ------ + `cobyqa.utils.MaxEvalError` + If the maximum number of evaluations is reached. + `cobyqa.utils.TargetSuccess` + If a nearly feasible point has been found with an objective + function value below the target. + `cobyqa.utils.FeasibleSuccess` + If a feasible point has been found for a feasibility problem. + `numpy.linalg.LinAlgError` + If the interpolation system is ill-defined. + """ + # Set the initial interpolation set. + self._debug = options[Options.DEBUG] + self._interpolation = Interpolation(pb, options) + + # Evaluate the nonlinear functions at the initial interpolation points. + x_eval = self.interpolation.point(0) + fun_init, cub_init, ceq_init = pb(x_eval) + self._fun_val = np.full(options[Options.NPT], np.nan) + self._cub_val = np.full((options[Options.NPT], cub_init.size), np.nan) + self._ceq_val = np.full((options[Options.NPT], ceq_init.size), np.nan) + for k in range(options[Options.NPT]): + if k >= options[Options.MAX_EVAL]: + raise MaxEvalError + if k == 0: + self.fun_val[k] = fun_init + self.cub_val[k, :] = cub_init + self.ceq_val[k, :] = ceq_init + else: + x_eval = self.interpolation.point(k) + self.fun_val[k], self.cub_val[k, :], self.ceq_val[k, :] = pb( + x_eval + ) + + # Stop the iterations if the problem is a feasibility problem and + # the current interpolation point is feasible. + if ( + pb.is_feasibility + and pb.maxcv( + self.interpolation.point(k), + self.cub_val[k, :], + self.ceq_val[k, :], + ) + <= options[Options.FEASIBILITY_TOL] + ): + raise FeasibleSuccess + + # Stop the iterations if the current interpolation point is nearly + # feasible and has an objective function value below the target. + if ( + self._fun_val[k] <= options[Options.TARGET] + and pb.maxcv( + self.interpolation.point(k), + self.cub_val[k, :], + self.ceq_val[k, :], + ) + <= options[Options.FEASIBILITY_TOL] + ): + raise TargetSuccess + + # Build the initial quadratic models. + self._fun = Quadratic( + self.interpolation, + self._fun_val, + options[Options.DEBUG], + ) + self._cub = np.empty(self.m_nonlinear_ub, dtype=Quadratic) + self._ceq = np.empty(self.m_nonlinear_eq, dtype=Quadratic) + for i in range(self.m_nonlinear_ub): + self._cub[i] = Quadratic( + self.interpolation, + self.cub_val[:, i], + options[Options.DEBUG], + ) + for i in range(self.m_nonlinear_eq): + self._ceq[i] = Quadratic( + self.interpolation, + self.ceq_val[:, i], + options[Options.DEBUG], + ) + if self._debug: + self._check_interpolation_conditions() + + @property + def n(self): + """ + Dimension of the problem. + + Returns + ------- + int + Dimension of the problem. + """ + return self.interpolation.n + + @property + def npt(self): + """ + Number of interpolation points. + + Returns + ------- + int + Number of interpolation points. + """ + return self.interpolation.npt + + @property + def m_nonlinear_ub(self): + """ + Number of nonlinear inequality constraints. + + Returns + ------- + int + Number of nonlinear inequality constraints. + """ + return self.cub_val.shape[1] + + @property + def m_nonlinear_eq(self): + """ + Number of nonlinear equality constraints. + + Returns + ------- + int + Number of nonlinear equality constraints. + """ + return self.ceq_val.shape[1] + + @property + def interpolation(self): + """ + Interpolation set. + + Returns + ------- + `cobyqa.models.Interpolation` + Interpolation set. + """ + return self._interpolation + + @property + def fun_val(self): + """ + Values of the objective function at the interpolation points. + + Returns + ------- + `numpy.ndarray`, shape (npt,) + Values of the objective function at the interpolation points. + """ + return self._fun_val + + @property + def cub_val(self): + """ + Values of the nonlinear inequality constraint functions at the + interpolation points. + + Returns + ------- + `numpy.ndarray`, shape (npt, m_nonlinear_ub) + Values of the nonlinear inequality constraint functions at the + interpolation points. + """ + return self._cub_val + + @property + def ceq_val(self): + """ + Values of the nonlinear equality constraint functions at the + interpolation points. + + Returns + ------- + `numpy.ndarray`, shape (npt, m_nonlinear_eq) + Values of the nonlinear equality constraint functions at the + interpolation points. + """ + return self._ceq_val + + def fun(self, x): + """ + Evaluate the quadratic model of the objective function at a given + point. + + Parameters + ---------- + x : `numpy.ndarray`, shape (n,) + Point at which to evaluate the quadratic model of the objective + function. + + Returns + ------- + float + Value of the quadratic model of the objective function at `x`. + """ + if self._debug: + assert x.shape == (self.n,), "The shape of `x` is not valid." + return self._fun(x, self.interpolation) + + def fun_grad(self, x): + """ + Evaluate the gradient of the quadratic model of the objective function + at a given point. + + Parameters + ---------- + x : `numpy.ndarray`, shape (n,) + Point at which to evaluate the gradient of the quadratic model of + the objective function. + + Returns + ------- + `numpy.ndarray`, shape (n,) + Gradient of the quadratic model of the objective function at `x`. + """ + if self._debug: + assert x.shape == (self.n,), "The shape of `x` is not valid." + return self._fun.grad(x, self.interpolation) + + def fun_hess(self): + """ + Evaluate the Hessian matrix of the quadratic model of the objective + function. + + Returns + ------- + `numpy.ndarray`, shape (n, n) + Hessian matrix of the quadratic model of the objective function. + """ + return self._fun.hess(self.interpolation) + + def fun_hess_prod(self, v): + """ + Evaluate the right product of the Hessian matrix of the quadratic model + of the objective function with a given vector. + + Parameters + ---------- + v : `numpy.ndarray`, shape (n,) + Vector with which the Hessian matrix of the quadratic model of the + objective function is multiplied from the right. + + Returns + ------- + `numpy.ndarray`, shape (n,) + Right product of the Hessian matrix of the quadratic model of the + objective function with `v`. + """ + if self._debug: + assert v.shape == (self.n,), "The shape of `v` is not valid." + return self._fun.hess_prod(v, self.interpolation) + + def fun_curv(self, v): + """ + Evaluate the curvature of the quadratic model of the objective function + along a given direction. + + Parameters + ---------- + v : `numpy.ndarray`, shape (n,) + Direction along which the curvature of the quadratic model of the + objective function is evaluated. + + Returns + ------- + float + Curvature of the quadratic model of the objective function along + `v`. + """ + if self._debug: + assert v.shape == (self.n,), "The shape of `v` is not valid." + return self._fun.curv(v, self.interpolation) + + def fun_alt_grad(self, x): + """ + Evaluate the gradient of the alternative quadratic model of the + objective function at a given point. + + Parameters + ---------- + x : `numpy.ndarray`, shape (n,) + Point at which to evaluate the gradient of the alternative + quadratic model of the objective function. + + Returns + ------- + `numpy.ndarray`, shape (n,) + Gradient of the alternative quadratic model of the objective + function at `x`. + + Raises + ------ + `numpy.linalg.LinAlgError` + If the interpolation system is ill-defined. + """ + if self._debug: + assert x.shape == (self.n,), "The shape of `x` is not valid." + model = Quadratic(self.interpolation, self.fun_val, self._debug) + return model.grad(x, self.interpolation) + + def cub(self, x, mask=None): + """ + Evaluate the quadratic models of the nonlinear inequality functions at + a given point. + + Parameters + ---------- + x : `numpy.ndarray`, shape (n,) + Point at which to evaluate the quadratic models of the nonlinear + inequality functions. + mask : `numpy.ndarray`, shape (m_nonlinear_ub,), optional + Mask of the quadratic models to consider. + + Returns + ------- + `numpy.ndarray` + Values of the quadratic model of the nonlinear inequality + functions. + """ + if self._debug: + assert x.shape == (self.n,), "The shape of `x` is not valid." + assert mask is None or mask.shape == ( + self.m_nonlinear_ub, + ), "The shape of `mask` is not valid." + return np.array( + [model(x, self.interpolation) for model in self._get_cub(mask)] + ) + + def cub_grad(self, x, mask=None): + """ + Evaluate the gradients of the quadratic models of the nonlinear + inequality functions at a given point. + + Parameters + ---------- + x : `numpy.ndarray`, shape (n,) + Point at which to evaluate the gradients of the quadratic models of + the nonlinear inequality functions. + mask : `numpy.ndarray`, shape (m_nonlinear_eq,), optional + Mask of the quadratic models to consider. + + Returns + ------- + `numpy.ndarray` + Gradients of the quadratic model of the nonlinear inequality + functions. + """ + if self._debug: + assert x.shape == (self.n,), "The shape of `x` is not valid." + assert mask is None or mask.shape == ( + self.m_nonlinear_ub, + ), "The shape of `mask` is not valid." + return np.reshape( + [model.grad(x, self.interpolation) + for model in self._get_cub(mask)], + (-1, self.n), + ) + + def cub_hess(self, mask=None): + """ + Evaluate the Hessian matrices of the quadratic models of the nonlinear + inequality functions. + + Parameters + ---------- + mask : `numpy.ndarray`, shape (m_nonlinear_ub,), optional + Mask of the quadratic models to consider. + + Returns + ------- + `numpy.ndarray` + Hessian matrices of the quadratic models of the nonlinear + inequality functions. + """ + if self._debug: + assert mask is None or mask.shape == ( + self.m_nonlinear_ub, + ), "The shape of `mask` is not valid." + return np.reshape( + [model.hess(self.interpolation) for model in self._get_cub(mask)], + (-1, self.n, self.n), + ) + + def cub_hess_prod(self, v, mask=None): + """ + Evaluate the right product of the Hessian matrices of the quadratic + models of the nonlinear inequality functions with a given vector. + + Parameters + ---------- + v : `numpy.ndarray`, shape (n,) + Vector with which the Hessian matrices of the quadratic models of + the nonlinear inequality functions are multiplied from the right. + mask : `numpy.ndarray`, shape (m_nonlinear_ub,), optional + Mask of the quadratic models to consider. + + Returns + ------- + `numpy.ndarray` + Right products of the Hessian matrices of the quadratic models of + the nonlinear inequality functions with `v`. + """ + if self._debug: + assert v.shape == (self.n,), "The shape of `v` is not valid." + assert mask is None or mask.shape == ( + self.m_nonlinear_ub, + ), "The shape of `mask` is not valid." + return np.reshape( + [ + model.hess_prod(v, self.interpolation) + for model in self._get_cub(mask) + ], + (-1, self.n), + ) + + def cub_curv(self, v, mask=None): + """ + Evaluate the curvature of the quadratic models of the nonlinear + inequality functions along a given direction. + + Parameters + ---------- + v : `numpy.ndarray`, shape (n,) + Direction along which the curvature of the quadratic models of the + nonlinear inequality functions is evaluated. + mask : `numpy.ndarray`, shape (m_nonlinear_ub,), optional + Mask of the quadratic models to consider. + + Returns + ------- + `numpy.ndarray` + Curvature of the quadratic models of the nonlinear inequality + functions along `v`. + """ + if self._debug: + assert v.shape == (self.n,), "The shape of `v` is not valid." + assert mask is None or mask.shape == ( + self.m_nonlinear_ub, + ), "The shape of `mask` is not valid." + return np.array( + [model.curv(v, self.interpolation) + for model in self._get_cub(mask)] + ) + + def ceq(self, x, mask=None): + """ + Evaluate the quadratic models of the nonlinear equality functions at a + given point. + + Parameters + ---------- + x : `numpy.ndarray`, shape (n,) + Point at which to evaluate the quadratic models of the nonlinear + equality functions. + mask : `numpy.ndarray`, shape (m_nonlinear_eq,), optional + Mask of the quadratic models to consider. + + Returns + ------- + `numpy.ndarray` + Values of the quadratic model of the nonlinear equality functions. + """ + if self._debug: + assert x.shape == (self.n,), "The shape of `x` is not valid." + assert mask is None or mask.shape == ( + self.m_nonlinear_eq, + ), "The shape of `mask` is not valid." + return np.array( + [model(x, self.interpolation) for model in self._get_ceq(mask)] + ) + + def ceq_grad(self, x, mask=None): + """ + Evaluate the gradients of the quadratic models of the nonlinear + equality functions at a given point. + + Parameters + ---------- + x : `numpy.ndarray`, shape (n,) + Point at which to evaluate the gradients of the quadratic models of + the nonlinear equality functions. + mask : `numpy.ndarray`, shape (m_nonlinear_eq,), optional + Mask of the quadratic models to consider. + + Returns + ------- + `numpy.ndarray` + Gradients of the quadratic model of the nonlinear equality + functions. + """ + if self._debug: + assert x.shape == (self.n,), "The shape of `x` is not valid." + assert mask is None or mask.shape == ( + self.m_nonlinear_eq, + ), "The shape of `mask` is not valid." + return np.reshape( + [model.grad(x, self.interpolation) + for model in self._get_ceq(mask)], + (-1, self.n), + ) + + def ceq_hess(self, mask=None): + """ + Evaluate the Hessian matrices of the quadratic models of the nonlinear + equality functions. + + Parameters + ---------- + mask : `numpy.ndarray`, shape (m_nonlinear_eq,), optional + Mask of the quadratic models to consider. + + Returns + ------- + `numpy.ndarray` + Hessian matrices of the quadratic models of the nonlinear equality + functions. + """ + if self._debug: + assert mask is None or mask.shape == ( + self.m_nonlinear_eq, + ), "The shape of `mask` is not valid." + return np.reshape( + [model.hess(self.interpolation) for model in self._get_ceq(mask)], + (-1, self.n, self.n), + ) + + def ceq_hess_prod(self, v, mask=None): + """ + Evaluate the right product of the Hessian matrices of the quadratic + models of the nonlinear equality functions with a given vector. + + Parameters + ---------- + v : `numpy.ndarray`, shape (n,) + Vector with which the Hessian matrices of the quadratic models of + the nonlinear equality functions are multiplied from the right. + mask : `numpy.ndarray`, shape (m_nonlinear_eq,), optional + Mask of the quadratic models to consider. + + Returns + ------- + `numpy.ndarray` + Right products of the Hessian matrices of the quadratic models of + the nonlinear equality functions with `v`. + """ + if self._debug: + assert v.shape == (self.n,), "The shape of `v` is not valid." + assert mask is None or mask.shape == ( + self.m_nonlinear_eq, + ), "The shape of `mask` is not valid." + return np.reshape( + [ + model.hess_prod(v, self.interpolation) + for model in self._get_ceq(mask) + ], + (-1, self.n), + ) + + def ceq_curv(self, v, mask=None): + """ + Evaluate the curvature of the quadratic models of the nonlinear + equality functions along a given direction. + + Parameters + ---------- + v : `numpy.ndarray`, shape (n,) + Direction along which the curvature of the quadratic models of the + nonlinear equality functions is evaluated. + mask : `numpy.ndarray`, shape (m_nonlinear_eq,), optional + Mask of the quadratic models to consider. + + Returns + ------- + `numpy.ndarray` + Curvature of the quadratic models of the nonlinear equality + functions along `v`. + """ + if self._debug: + assert v.shape == (self.n,), "The shape of `v` is not valid." + assert mask is None or mask.shape == ( + self.m_nonlinear_eq, + ), "The shape of `mask` is not valid." + return np.array( + [model.curv(v, self.interpolation) + for model in self._get_ceq(mask)] + ) + + def reset_models(self): + """ + Set the quadratic models of the objective function, nonlinear + inequality constraints, and nonlinear equality constraints to the + alternative quadratic models. + + Raises + ------ + `numpy.linalg.LinAlgError` + If the interpolation system is ill-defined. + """ + self._fun = Quadratic(self.interpolation, self.fun_val, self._debug) + for i in range(self.m_nonlinear_ub): + self._cub[i] = Quadratic( + self.interpolation, + self.cub_val[:, i], + self._debug, + ) + for i in range(self.m_nonlinear_eq): + self._ceq[i] = Quadratic( + self.interpolation, + self.ceq_val[:, i], + self._debug, + ) + if self._debug: + self._check_interpolation_conditions() + + def update_interpolation(self, k_new, x_new, fun_val, cub_val, ceq_val): + """ + Update the interpolation set. + + This method updates the interpolation set by replacing the `knew`-th + interpolation point with `xnew`. It also updates the function values + and the quadratic models. + + Parameters + ---------- + k_new : int + Index of the updated interpolation point. + x_new : `numpy.ndarray`, shape (n,) + New interpolation point. Its value is interpreted as relative to + the origin, not the base point. + fun_val : float + Value of the objective function at `x_new`. + Objective function value at `x_new`. + cub_val : `numpy.ndarray`, shape (m_nonlinear_ub,) + Values of the nonlinear inequality constraints at `x_new`. + ceq_val : `numpy.ndarray`, shape (m_nonlinear_eq,) + Values of the nonlinear equality constraints at `x_new`. + + Raises + ------ + `numpy.linalg.LinAlgError` + If the interpolation system is ill-defined. + """ + if self._debug: + assert 0 <= k_new < self.npt, "The index `k_new` is not valid." + assert x_new.shape == (self.n,), \ + "The shape of `x_new` is not valid." + assert isinstance(fun_val, float), \ + "The function value is not valid." + assert cub_val.shape == ( + self.m_nonlinear_ub, + ), "The shape of `cub_val` is not valid." + assert ceq_val.shape == ( + self.m_nonlinear_eq, + ), "The shape of `ceq_val` is not valid." + + # Compute the updates in the interpolation conditions. + fun_diff = np.zeros(self.npt) + cub_diff = np.zeros(self.cub_val.shape) + ceq_diff = np.zeros(self.ceq_val.shape) + fun_diff[k_new] = fun_val - self.fun(x_new) + cub_diff[k_new, :] = cub_val - self.cub(x_new) + ceq_diff[k_new, :] = ceq_val - self.ceq(x_new) + + # Update the function values. + self.fun_val[k_new] = fun_val + self.cub_val[k_new, :] = cub_val + self.ceq_val[k_new, :] = ceq_val + + # Update the interpolation set. + dir_old = np.copy(self.interpolation.xpt[:, k_new]) + self.interpolation.xpt[:, k_new] = x_new - self.interpolation.x_base + + # Update the quadratic models. + ill_conditioned = self._fun.update( + self.interpolation, + k_new, + dir_old, + fun_diff, + ) + for i in range(self.m_nonlinear_ub): + ill_conditioned = ill_conditioned or self._cub[i].update( + self.interpolation, + k_new, + dir_old, + cub_diff[:, i], + ) + for i in range(self.m_nonlinear_eq): + ill_conditioned = ill_conditioned or self._ceq[i].update( + self.interpolation, + k_new, + dir_old, + ceq_diff[:, i], + ) + if self._debug: + self._check_interpolation_conditions() + return ill_conditioned + + def determinants(self, x_new, k_new=None): + """ + Compute the normalized determinants of the new interpolation systems. + + Parameters + ---------- + x_new : `numpy.ndarray`, shape (n,) + New interpolation point. Its value is interpreted as relative to + the origin, not the base point. + k_new : int, optional + Index of the updated interpolation point. If `k_new` is not + specified, all the possible determinants are computed. + + Returns + ------- + {float, `numpy.ndarray`, shape (npt,)} + Determinant(s) of the new interpolation system. + + Raises + ------ + `numpy.linalg.LinAlgError` + If the interpolation system is ill-defined. + + Notes + ----- + The determinants are normalized by the determinant of the current + interpolation system. For stability reasons, the calculations are done + using the formula (2.12) in [1]_. + + References + ---------- + .. [1] M. J. D. Powell. On updating the inverse of a KKT matrix. + Technical Report DAMTP 2004/NA01, Department of Applied Mathematics + and Theoretical Physics, University of Cambridge, Cambridge, UK, + 2004. + """ + if self._debug: + assert x_new.shape == (self.n,), \ + "The shape of `x_new` is not valid." + assert ( + k_new is None or 0 <= k_new < self.npt + ), "The index `k_new` is not valid." + + # Compute the values independent of k_new. + shift = x_new - self.interpolation.x_base + new_col = np.empty((self.npt + self.n + 1, 1)) + new_col[: self.npt, 0] = ( + 0.5 * (self.interpolation.xpt.T @ shift) ** 2.0) + new_col[self.npt, 0] = 1.0 + new_col[self.npt + 1:, 0] = shift + inv_new_col = Quadratic.solve_systems(self.interpolation, new_col)[0] + beta = 0.5 * (shift @ shift) ** 2.0 - new_col[:, 0] @ inv_new_col[:, 0] + + # Compute the values that depend on k. + if k_new is None: + coord_vec = np.eye(self.npt + self.n + 1, self.npt) + alpha = np.diag( + Quadratic.solve_systems( + self.interpolation, + coord_vec, + )[0] + ) + tau = inv_new_col[: self.npt, 0] + else: + coord_vec = np.eye(self.npt + self.n + 1, 1, -k_new) + alpha = Quadratic.solve_systems( + self.interpolation, + coord_vec, + )[ + 0 + ][k_new, 0] + tau = inv_new_col[k_new, 0] + return alpha * beta + tau**2.0 + + def shift_x_base(self, new_x_base, options): + """ + Shift the base point without changing the interpolation set. + + Parameters + ---------- + new_x_base : `numpy.ndarray`, shape (n,) + New base point. + options : dict + Options of the solver. + """ + if self._debug: + assert new_x_base.shape == ( + self.n, + ), "The shape of `new_x_base` is not valid." + + # Update the models. + self._fun.shift_x_base(self.interpolation, new_x_base) + for model in self._cub: + model.shift_x_base(self.interpolation, new_x_base) + for model in self._ceq: + model.shift_x_base(self.interpolation, new_x_base) + + # Update the base point and the interpolation points. + shift = new_x_base - self.interpolation.x_base + self.interpolation.x_base += shift + self.interpolation.xpt -= shift[:, np.newaxis] + if options[Options.DEBUG]: + self._check_interpolation_conditions() + + def _get_cub(self, mask=None): + """ + Get the quadratic models of the nonlinear inequality constraints. + + Parameters + ---------- + mask : `numpy.ndarray`, shape (m_nonlinear_ub,), optional + Mask of the quadratic models to return. + + Returns + ------- + `numpy.ndarray` + Quadratic models of the nonlinear inequality constraints. + """ + return self._cub if mask is None else self._cub[mask] + + def _get_ceq(self, mask=None): + """ + Get the quadratic models of the nonlinear equality constraints. + + Parameters + ---------- + mask : `numpy.ndarray`, shape (m_nonlinear_eq,), optional + Mask of the quadratic models to return. + + Returns + ------- + `numpy.ndarray` + Quadratic models of the nonlinear equality constraints. + """ + return self._ceq if mask is None else self._ceq[mask] + + def _check_interpolation_conditions(self): + """ + Check the interpolation conditions of all quadratic models. + """ + error_fun = 0.0 + error_cub = 0.0 + error_ceq = 0.0 + for k in range(self.npt): + error_fun = np.max( + [ + error_fun, + np.abs( + self.fun(self.interpolation.point(k)) - self.fun_val[k] + ), + ] + ) + error_cub = np.max( + np.abs( + self.cub(self.interpolation.point(k)) - self.cub_val[k, :] + ), + initial=error_cub, + ) + error_ceq = np.max( + np.abs( + self.ceq(self.interpolation.point(k)) - self.ceq_val[k, :] + ), + initial=error_ceq, + ) + tol = 10.0 * np.sqrt(EPS) * max(self.n, self.npt) + if error_fun > tol * np.max(np.abs(self.fun_val), initial=1.0): + warnings.warn( + "The interpolation conditions for the objective function are " + "not satisfied.", + RuntimeWarning, + 2, + ) + if error_cub > tol * np.max(np.abs(self.cub_val), initial=1.0): + warnings.warn( + "The interpolation conditions for the inequality constraint " + "function are not satisfied.", + RuntimeWarning, + 2, + ) + if error_ceq > tol * np.max(np.abs(self.ceq_val), initial=1.0): + warnings.warn( + "The interpolation conditions for the equality constraint " + "function are not satisfied.", + RuntimeWarning, + 2, + ) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/problem.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/problem.py new file mode 100644 index 0000000000000000000000000000000000000000..d298ee2e27dfa7524e010fe1787b19888f5a496c --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/problem.py @@ -0,0 +1,1287 @@ +from contextlib import suppress +from inspect import signature +import copy + +import numpy as np +from scipy.optimize import ( + Bounds, + LinearConstraint, + NonlinearConstraint, + OptimizeResult, +) +from scipy.optimize._constraints import PreparedConstraint + + +from .settings import PRINT_OPTIONS, BARRIER +from .utils import CallbackSuccess, get_arrays_tol +from .utils import exact_1d_array + + +class ObjectiveFunction: + """ + Real-valued objective function. + """ + + def __init__(self, fun, verbose, debug, *args): + """ + Initialize the objective function. + + Parameters + ---------- + fun : {callable, None} + Function to evaluate, or None. + + ``fun(x, *args) -> float`` + + where ``x`` is an array with shape (n,) and `args` is a tuple. + verbose : bool + Whether to print the function evaluations. + debug : bool + Whether to make debugging tests during the execution. + *args : tuple + Additional arguments to be passed to the function. + """ + if debug: + assert fun is None or callable(fun) + assert isinstance(verbose, bool) + assert isinstance(debug, bool) + + self._fun = fun + self._verbose = verbose + self._args = args + self._n_eval = 0 + + def __call__(self, x): + """ + Evaluate the objective function. + + Parameters + ---------- + x : array_like, shape (n,) + Point at which the objective function is evaluated. + + Returns + ------- + float + Function value at `x`. + """ + x = np.array(x, dtype=float) + if self._fun is None: + f = 0.0 + else: + f = float(np.squeeze(self._fun(x, *self._args))) + self._n_eval += 1 + if self._verbose: + with np.printoptions(**PRINT_OPTIONS): + print(f"{self.name}({x}) = {f}") + return f + + @property + def n_eval(self): + """ + Number of function evaluations. + + Returns + ------- + int + Number of function evaluations. + """ + return self._n_eval + + @property + def name(self): + """ + Name of the objective function. + + Returns + ------- + str + Name of the objective function. + """ + name = "" + if self._fun is not None: + try: + name = self._fun.__name__ + except AttributeError: + name = "fun" + return name + + +class BoundConstraints: + """ + Bound constraints ``xl <= x <= xu``. + """ + + def __init__(self, bounds): + """ + Initialize the bound constraints. + + Parameters + ---------- + bounds : scipy.optimize.Bounds + Bound constraints. + """ + self._xl = np.array(bounds.lb, float) + self._xu = np.array(bounds.ub, float) + + # Remove the ill-defined bounds. + self.xl[np.isnan(self.xl)] = -np.inf + self.xu[np.isnan(self.xu)] = np.inf + + self.is_feasible = ( + np.all(self.xl <= self.xu) + and np.all(self.xl < np.inf) + and np.all(self.xu > -np.inf) + ) + self.m = np.count_nonzero(self.xl > -np.inf) + np.count_nonzero( + self.xu < np.inf + ) + self.pcs = PreparedConstraint(bounds, np.ones(bounds.lb.size)) + + @property + def xl(self): + """ + Lower bound. + + Returns + ------- + `numpy.ndarray`, shape (n,) + Lower bound. + """ + return self._xl + + @property + def xu(self): + """ + Upper bound. + + Returns + ------- + `numpy.ndarray`, shape (n,) + Upper bound. + """ + return self._xu + + def maxcv(self, x): + """ + Evaluate the maximum constraint violation. + + Parameters + ---------- + x : array_like, shape (n,) + Point at which the maximum constraint violation is evaluated. + + Returns + ------- + float + Maximum constraint violation at `x`. + """ + x = np.asarray(x, dtype=float) + return self.violation(x) + + def violation(self, x): + # shortcut for no bounds + if self.is_feasible: + return np.array([0]) + else: + return self.pcs.violation(x) + + def project(self, x): + """ + Project a point onto the feasible set. + + Parameters + ---------- + x : array_like, shape (n,) + Point to be projected. + + Returns + ------- + `numpy.ndarray`, shape (n,) + Projection of `x` onto the feasible set. + """ + return np.clip(x, self.xl, self.xu) if self.is_feasible else x + + +class LinearConstraints: + """ + Linear constraints ``a_ub @ x <= b_ub`` and ``a_eq @ x == b_eq``. + """ + + def __init__(self, constraints, n, debug): + """ + Initialize the linear constraints. + + Parameters + ---------- + constraints : list of LinearConstraint + Linear constraints. + n : int + Number of variables. + debug : bool + Whether to make debugging tests during the execution. + """ + if debug: + assert isinstance(constraints, list) + for constraint in constraints: + assert isinstance(constraint, LinearConstraint) + assert isinstance(debug, bool) + + self._a_ub = np.empty((0, n)) + self._b_ub = np.empty(0) + self._a_eq = np.empty((0, n)) + self._b_eq = np.empty(0) + for constraint in constraints: + is_equality = np.abs( + constraint.ub - constraint.lb + ) <= get_arrays_tol(constraint.lb, constraint.ub) + if np.any(is_equality): + self._a_eq = np.vstack((self.a_eq, constraint.A[is_equality])) + self._b_eq = np.concatenate( + ( + self.b_eq, + 0.5 + * ( + constraint.lb[is_equality] + + constraint.ub[is_equality] + ), + ) + ) + if not np.all(is_equality): + self._a_ub = np.vstack( + ( + self.a_ub, + constraint.A[~is_equality], + -constraint.A[~is_equality], + ) + ) + self._b_ub = np.concatenate( + ( + self.b_ub, + constraint.ub[~is_equality], + -constraint.lb[~is_equality], + ) + ) + + # Remove the ill-defined constraints. + self.a_ub[np.isnan(self.a_ub)] = 0.0 + self.a_eq[np.isnan(self.a_eq)] = 0.0 + undef_ub = np.isnan(self.b_ub) | np.isinf(self.b_ub) + undef_eq = np.isnan(self.b_eq) + self._a_ub = self.a_ub[~undef_ub, :] + self._b_ub = self.b_ub[~undef_ub] + self._a_eq = self.a_eq[~undef_eq, :] + self._b_eq = self.b_eq[~undef_eq] + self.pcs = [ + PreparedConstraint(c, np.ones(n)) for c in constraints if c.A.size + ] + + @property + def a_ub(self): + """ + Left-hand side matrix of the linear inequality constraints. + + Returns + ------- + `numpy.ndarray`, shape (m, n) + Left-hand side matrix of the linear inequality constraints. + """ + return self._a_ub + + @property + def b_ub(self): + """ + Right-hand side vector of the linear inequality constraints. + + Returns + ------- + `numpy.ndarray`, shape (m, n) + Right-hand side vector of the linear inequality constraints. + """ + return self._b_ub + + @property + def a_eq(self): + """ + Left-hand side matrix of the linear equality constraints. + + Returns + ------- + `numpy.ndarray`, shape (m, n) + Left-hand side matrix of the linear equality constraints. + """ + return self._a_eq + + @property + def b_eq(self): + """ + Right-hand side vector of the linear equality constraints. + + Returns + ------- + `numpy.ndarray`, shape (m, n) + Right-hand side vector of the linear equality constraints. + """ + return self._b_eq + + @property + def m_ub(self): + """ + Number of linear inequality constraints. + + Returns + ------- + int + Number of linear inequality constraints. + """ + return self.b_ub.size + + @property + def m_eq(self): + """ + Number of linear equality constraints. + + Returns + ------- + int + Number of linear equality constraints. + """ + return self.b_eq.size + + def maxcv(self, x): + """ + Evaluate the maximum constraint violation. + + Parameters + ---------- + x : array_like, shape (n,) + Point at which the maximum constraint violation is evaluated. + + Returns + ------- + float + Maximum constraint violation at `x`. + """ + return np.max(self.violation(x), initial=0.0) + + def violation(self, x): + if len(self.pcs): + return np.concatenate([pc.violation(x) for pc in self.pcs]) + return np.array([]) + + +class NonlinearConstraints: + """ + Nonlinear constraints ``c_ub(x) <= 0`` and ``c_eq(x) == b_eq``. + """ + + def __init__(self, constraints, verbose, debug): + """ + Initialize the nonlinear constraints. + + Parameters + ---------- + constraints : list + Nonlinear constraints. + verbose : bool + Whether to print the function evaluations. + debug : bool + Whether to make debugging tests during the execution. + """ + if debug: + assert isinstance(constraints, list) + for constraint in constraints: + assert isinstance(constraint, NonlinearConstraint) + assert isinstance(verbose, bool) + assert isinstance(debug, bool) + + self._constraints = constraints + self.pcs = [] + self._verbose = verbose + + # map of indexes for equality and inequality constraints + self._map_ub = None + self._map_eq = None + self._m_ub = self._m_eq = None + + def __call__(self, x): + """ + Calculates the residual (slack) for the constraints. + + Parameters + ---------- + x : array_like, shape (n,) + Point at which the constraints are evaluated. + + Returns + ------- + `numpy.ndarray`, shape (m_nonlinear_ub,) + Nonlinear inequality constraint slack values. + `numpy.ndarray`, shape (m_nonlinear_eq,) + Nonlinear equality constraint slack values. + """ + if not len(self._constraints): + self._m_eq = self._m_ub = 0 + return np.array([]), np.array([]) + + x = np.array(x, dtype=float) + # first time around the constraints haven't been prepared + if not len(self.pcs): + self._map_ub = [] + self._map_eq = [] + self._m_eq = 0 + self._m_ub = 0 + + for constraint in self._constraints: + if not callable(constraint.jac): + # having a callable constraint function prevents + # constraint.fun from being evaluated when preparing + # constraint + c = copy.copy(constraint) + c.jac = lambda x0: x0 + c.hess = lambda x0, v: 0.0 + pc = PreparedConstraint(c, x) + else: + pc = PreparedConstraint(constraint, x) + # we're going to be using the same x value again immediately + # after this initialisation + pc.fun.f_updated = True + + self.pcs.append(pc) + idx = np.arange(pc.fun.m) + + # figure out equality and inequality maps + lb, ub = pc.bounds[0], pc.bounds[1] + arr_tol = get_arrays_tol(lb, ub) + is_equality = np.abs(ub - lb) <= arr_tol + self._map_eq.append(idx[is_equality]) + self._map_ub.append(idx[~is_equality]) + + # these values will be corrected to their proper values later + self._m_eq += np.count_nonzero(is_equality) + self._m_ub += np.count_nonzero(~is_equality) + + c_ub = [] + c_eq = [] + for i, pc in enumerate(self.pcs): + val = pc.fun.fun(x) + if self._verbose: + with np.printoptions(**PRINT_OPTIONS): + with suppress(AttributeError): + fun_name = self._constraints[i].fun.__name__ + print(f"{fun_name}({x}) = {val}") + + # separate violations into c_eq and c_ub + eq_idx = self._map_eq[i] + ub_idx = self._map_ub[i] + + ub_val = val[ub_idx] + if len(ub_idx): + xl = pc.bounds[0][ub_idx] + xu = pc.bounds[1][ub_idx] + + # calculate slack within lower bound + finite_xl = xl > -np.inf + _v = xl[finite_xl] - ub_val[finite_xl] + c_ub.append(_v) + + # calculate slack within lower bound + finite_xu = xu < np.inf + _v = ub_val[finite_xu] - xu[finite_xu] + c_ub.append(_v) + + # equality constraints taken from midpoint between lb and ub + eq_val = val[eq_idx] + if len(eq_idx): + midpoint = 0.5 * (pc.bounds[1][eq_idx] + pc.bounds[0][eq_idx]) + eq_val -= midpoint + c_eq.append(eq_val) + + if self._m_eq: + c_eq = np.concatenate(c_eq) + else: + c_eq = np.array([]) + + if self._m_ub: + c_ub = np.concatenate(c_ub) + else: + c_ub = np.array([]) + + self._m_ub = c_ub.size + self._m_eq = c_eq.size + + return c_ub, c_eq + + @property + def m_ub(self): + """ + Number of nonlinear inequality constraints. + + Returns + ------- + int + Number of nonlinear inequality constraints. + + Raises + ------ + ValueError + If the number of nonlinear inequality constraints is unknown. + """ + if self._m_ub is None: + raise ValueError( + "The number of nonlinear inequality constraints is unknown." + ) + else: + return self._m_ub + + @property + def m_eq(self): + """ + Number of nonlinear equality constraints. + + Returns + ------- + int + Number of nonlinear equality constraints. + + Raises + ------ + ValueError + If the number of nonlinear equality constraints is unknown. + """ + if self._m_eq is None: + raise ValueError( + "The number of nonlinear equality constraints is unknown." + ) + else: + return self._m_eq + + @property + def n_eval(self): + """ + Number of function evaluations. + + Returns + ------- + int + Number of function evaluations. + """ + if len(self.pcs): + return self.pcs[0].fun.nfev + else: + return 0 + + def maxcv(self, x, cub_val=None, ceq_val=None): + """ + Evaluate the maximum constraint violation. + + Parameters + ---------- + x : array_like, shape (n,) + Point at which the maximum constraint violation is evaluated. + cub_val : array_like, shape (m_nonlinear_ub,), optional + Values of the nonlinear inequality constraints. If not provided, + the nonlinear inequality constraints are evaluated at `x`. + ceq_val : array_like, shape (m_nonlinear_eq,), optional + Values of the nonlinear equality constraints. If not provided, + the nonlinear equality constraints are evaluated at `x`. + + Returns + ------- + float + Maximum constraint violation at `x`. + """ + return np.max( + self.violation(x, cub_val=cub_val, ceq_val=ceq_val), initial=0.0 + ) + + def violation(self, x, cub_val=None, ceq_val=None): + return np.concatenate([pc.violation(x) for pc in self.pcs]) + + +class Problem: + """ + Optimization problem. + """ + + def __init__( + self, + obj, + x0, + bounds, + linear, + nonlinear, + callback, + feasibility_tol, + scale, + store_history, + history_size, + filter_size, + debug, + ): + """ + Initialize the nonlinear problem. + + The problem is preprocessed to remove all the variables that are fixed + by the bound constraints. + + Parameters + ---------- + obj : ObjectiveFunction + Objective function. + x0 : array_like, shape (n,) + Initial guess. + bounds : BoundConstraints + Bound constraints. + linear : LinearConstraints + Linear constraints. + nonlinear : NonlinearConstraints + Nonlinear constraints. + callback : {callable, None} + Callback function. + feasibility_tol : float + Tolerance on the constraint violation. + scale : bool + Whether to scale the problem according to the bounds. + store_history : bool + Whether to store the function evaluations. + history_size : int + Maximum number of function evaluations to store. + filter_size : int + Maximum number of points in the filter. + debug : bool + Whether to make debugging tests during the execution. + """ + if debug: + assert isinstance(obj, ObjectiveFunction) + assert isinstance(bounds, BoundConstraints) + assert isinstance(linear, LinearConstraints) + assert isinstance(nonlinear, NonlinearConstraints) + assert isinstance(feasibility_tol, float) + assert isinstance(scale, bool) + assert isinstance(store_history, bool) + assert isinstance(history_size, int) + if store_history: + assert history_size > 0 + assert isinstance(filter_size, int) + assert filter_size > 0 + assert isinstance(debug, bool) + + self._obj = obj + self._linear = linear + self._nonlinear = nonlinear + if callback is not None: + if not callable(callback): + raise TypeError("The callback must be a callable function.") + self._callback = callback + + # Check the consistency of the problem. + x0 = exact_1d_array(x0, "The initial guess must be a vector.") + n = x0.size + if bounds.xl.size != n: + raise ValueError(f"The bounds must have {n} elements.") + if linear.a_ub.shape[1] != n: + raise ValueError( + f"The left-hand side matrices of the linear constraints must " + f"have {n} columns." + ) + + # Check which variables are fixed. + tol = get_arrays_tol(bounds.xl, bounds.xu) + self._fixed_idx = (bounds.xl <= bounds.xu) & ( + np.abs(bounds.xl - bounds.xu) < tol + ) + self._fixed_val = 0.5 * ( + bounds.xl[self._fixed_idx] + bounds.xu[self._fixed_idx] + ) + self._fixed_val = np.clip( + self._fixed_val, + bounds.xl[self._fixed_idx], + bounds.xu[self._fixed_idx], + ) + + # Set the bound constraints. + self._orig_bounds = bounds + self._bounds = BoundConstraints( + Bounds(bounds.xl[~self._fixed_idx], bounds.xu[~self._fixed_idx]) + ) + + # Set the initial guess. + self._x0 = self._bounds.project(x0[~self._fixed_idx]) + + # Set the linear constraints. + b_eq = linear.b_eq - linear.a_eq[:, self._fixed_idx] @ self._fixed_val + self._linear = LinearConstraints( + [ + LinearConstraint( + linear.a_ub[:, ~self._fixed_idx], + -np.inf, + linear.b_ub + - linear.a_ub[:, self._fixed_idx] @ self._fixed_val, + ), + LinearConstraint(linear.a_eq[:, ~self._fixed_idx], b_eq, b_eq), + ], + self.n, + debug, + ) + + # Scale the problem if necessary. + scale = ( + scale + and self._bounds.is_feasible + and np.all(np.isfinite(self._bounds.xl)) + and np.all(np.isfinite(self._bounds.xu)) + ) + if scale: + self._scaling_factor = 0.5 * (self._bounds.xu - self._bounds.xl) + self._scaling_shift = 0.5 * (self._bounds.xu + self._bounds.xl) + self._bounds = BoundConstraints( + Bounds(-np.ones(self.n), np.ones(self.n)) + ) + b_eq = self._linear.b_eq - self._linear.a_eq @ self._scaling_shift + self._linear = LinearConstraints( + [ + LinearConstraint( + self._linear.a_ub @ np.diag(self._scaling_factor), + -np.inf, + self._linear.b_ub + - self._linear.a_ub @ self._scaling_shift, + ), + LinearConstraint( + self._linear.a_eq @ np.diag(self._scaling_factor), + b_eq, + b_eq, + ), + ], + self.n, + debug, + ) + self._x0 = (self._x0 - self._scaling_shift) / self._scaling_factor + else: + self._scaling_factor = np.ones(self.n) + self._scaling_shift = np.zeros(self.n) + + # Set the initial filter. + self._feasibility_tol = feasibility_tol + self._filter_size = filter_size + self._fun_filter = [] + self._maxcv_filter = [] + self._x_filter = [] + + # Set the initial history. + self._store_history = store_history + self._history_size = history_size + self._fun_history = [] + self._maxcv_history = [] + self._x_history = [] + + def __call__(self, x): + """ + Evaluate the objective and nonlinear constraint functions. + + Parameters + ---------- + x : array_like, shape (n,) + Point at which the functions are evaluated. + + Returns + ------- + float + Objective function value. + `numpy.ndarray`, shape (m_nonlinear_ub,) + Nonlinear inequality constraint function values. + `numpy.ndarray`, shape (m_nonlinear_eq,) + Nonlinear equality constraint function values. + + Raises + ------ + `cobyqa.utils.CallbackSuccess` + If the callback function raises a ``StopIteration``. + """ + # Evaluate the objective and nonlinear constraint functions. + x = np.asarray(x, dtype=float) + x_full = self.build_x(x) + fun_val = self._obj(x_full) + cub_val, ceq_val = self._nonlinear(x_full) + maxcv_val = self.maxcv(x, cub_val, ceq_val) + if self._store_history: + self._fun_history.append(fun_val) + self._maxcv_history.append(maxcv_val) + self._x_history.append(x) + if len(self._fun_history) > self._history_size: + self._fun_history.pop(0) + self._maxcv_history.pop(0) + self._x_history.pop(0) + + # Add the point to the filter if it is not dominated by any point. + if np.isnan(fun_val) and np.isnan(maxcv_val): + include_point = len(self._fun_filter) == 0 + elif np.isnan(fun_val): + include_point = all( + np.isnan(fun_filter) + and maxcv_val < maxcv_filter + or np.isnan(maxcv_filter) + for fun_filter, maxcv_filter in zip( + self._fun_filter, + self._maxcv_filter, + ) + ) + elif np.isnan(maxcv_val): + include_point = all( + np.isnan(maxcv_filter) + and fun_val < fun_filter + or np.isnan(fun_filter) + for fun_filter, maxcv_filter in zip( + self._fun_filter, + self._maxcv_filter, + ) + ) + else: + include_point = all( + fun_val < fun_filter or maxcv_val < maxcv_filter + for fun_filter, maxcv_filter in zip( + self._fun_filter, + self._maxcv_filter, + ) + ) + if include_point: + self._fun_filter.append(fun_val) + self._maxcv_filter.append(maxcv_val) + self._x_filter.append(x) + + # Remove the points in the filter that are dominated by the new + # point. We must iterate in reverse order to avoid problems when + # removing elements from the list. + for k in range(len(self._fun_filter) - 2, -1, -1): + if np.isnan(fun_val): + remove_point = np.isnan(self._fun_filter[k]) + elif np.isnan(maxcv_val): + remove_point = np.isnan(self._maxcv_filter[k]) + else: + remove_point = ( + np.isnan(self._fun_filter[k]) + or np.isnan(self._maxcv_filter[k]) + or fun_val <= self._fun_filter[k] + and maxcv_val <= self._maxcv_filter[k] + ) + if remove_point: + self._fun_filter.pop(k) + self._maxcv_filter.pop(k) + self._x_filter.pop(k) + + # Keep only the most recent points in the filter. + if len(self._fun_filter) > self._filter_size: + self._fun_filter.pop(0) + self._maxcv_filter.pop(0) + self._x_filter.pop(0) + + # Evaluate the callback function after updating the filter to ensure + # that the current point can be returned by the method. + if self._callback is not None: + sig = signature(self._callback) + try: + if set(sig.parameters) == {"intermediate_result"}: + intermediate_result = OptimizeResult(x=x_full, fun=fun_val) + self._callback(intermediate_result=intermediate_result) + else: + self._callback(x_full) + except StopIteration as exc: + raise CallbackSuccess from exc + + # Apply the extreme barriers and return. + if np.isnan(fun_val): + fun_val = BARRIER + cub_val[np.isnan(cub_val)] = BARRIER + ceq_val[np.isnan(ceq_val)] = BARRIER + fun_val = max(min(fun_val, BARRIER), -BARRIER) + cub_val = np.maximum(np.minimum(cub_val, BARRIER), -BARRIER) + ceq_val = np.maximum(np.minimum(ceq_val, BARRIER), -BARRIER) + return fun_val, cub_val, ceq_val + + @property + def n(self): + """ + Number of variables. + + Returns + ------- + int + Number of variables. + """ + return self.x0.size + + @property + def n_orig(self): + """ + Number of variables in the original problem (with fixed variables). + + Returns + ------- + int + Number of variables in the original problem (with fixed variables). + """ + return self._fixed_idx.size + + @property + def x0(self): + """ + Initial guess. + + Returns + ------- + `numpy.ndarray`, shape (n,) + Initial guess. + """ + return self._x0 + + @property + def n_eval(self): + """ + Number of function evaluations. + + Returns + ------- + int + Number of function evaluations. + """ + return self._obj.n_eval + + @property + def fun_name(self): + """ + Name of the objective function. + + Returns + ------- + str + Name of the objective function. + """ + return self._obj.name + + @property + def bounds(self): + """ + Bound constraints. + + Returns + ------- + BoundConstraints + Bound constraints. + """ + return self._bounds + + @property + def linear(self): + """ + Linear constraints. + + Returns + ------- + LinearConstraints + Linear constraints. + """ + return self._linear + + @property + def m_bounds(self): + """ + Number of bound constraints. + + Returns + ------- + int + Number of bound constraints. + """ + return self.bounds.m + + @property + def m_linear_ub(self): + """ + Number of linear inequality constraints. + + Returns + ------- + int + Number of linear inequality constraints. + """ + return self.linear.m_ub + + @property + def m_linear_eq(self): + """ + Number of linear equality constraints. + + Returns + ------- + int + Number of linear equality constraints. + """ + return self.linear.m_eq + + @property + def m_nonlinear_ub(self): + """ + Number of nonlinear inequality constraints. + + Returns + ------- + int + Number of nonlinear inequality constraints. + + Raises + ------ + ValueError + If the number of nonlinear inequality constraints is not known. + """ + return self._nonlinear.m_ub + + @property + def m_nonlinear_eq(self): + """ + Number of nonlinear equality constraints. + + Returns + ------- + int + Number of nonlinear equality constraints. + + Raises + ------ + ValueError + If the number of nonlinear equality constraints is not known. + """ + return self._nonlinear.m_eq + + @property + def fun_history(self): + """ + History of objective function evaluations. + + Returns + ------- + `numpy.ndarray`, shape (n_eval,) + History of objective function evaluations. + """ + return np.array(self._fun_history, dtype=float) + + @property + def maxcv_history(self): + """ + History of maximum constraint violations. + + Returns + ------- + `numpy.ndarray`, shape (n_eval,) + History of maximum constraint violations. + """ + return np.array(self._maxcv_history, dtype=float) + + @property + def type(self): + """ + Type of the problem. + + The problem can be either 'unconstrained', 'bound-constrained', + 'linearly constrained', or 'nonlinearly constrained'. + + Returns + ------- + str + Type of the problem. + """ + try: + if self.m_nonlinear_ub > 0 or self.m_nonlinear_eq > 0: + return "nonlinearly constrained" + elif self.m_linear_ub > 0 or self.m_linear_eq > 0: + return "linearly constrained" + elif self.m_bounds > 0: + return "bound-constrained" + else: + return "unconstrained" + except ValueError: + # The number of nonlinear constraints is not known. It may be zero + # if the user provided a nonlinear inequality and/or equality + # constraint function that returns an empty array. However, as this + # is not known before the first call to the function, we assume + # that the problem is nonlinearly constrained. + return "nonlinearly constrained" + + @property + def is_feasibility(self): + """ + Whether the problem is a feasibility problem. + + Returns + ------- + bool + Whether the problem is a feasibility problem. + """ + return self.fun_name == "" + + def build_x(self, x): + """ + Build the full vector of variables from the reduced vector. + + Parameters + ---------- + x : array_like, shape (n,) + Reduced vector of variables. + + Returns + ------- + `numpy.ndarray`, shape (n_orig,) + Full vector of variables. + """ + x_full = np.empty(self.n_orig) + x_full[self._fixed_idx] = self._fixed_val + x_full[~self._fixed_idx] = (x * self._scaling_factor + + self._scaling_shift) + return self._orig_bounds.project(x_full) + + def maxcv(self, x, cub_val=None, ceq_val=None): + """ + Evaluate the maximum constraint violation. + + Parameters + ---------- + x : array_like, shape (n,) + Point at which the maximum constraint violation is evaluated. + cub_val : array_like, shape (m_nonlinear_ub,), optional + Values of the nonlinear inequality constraints. If not provided, + the nonlinear inequality constraints are evaluated at `x`. + ceq_val : array_like, shape (m_nonlinear_eq,), optional + Values of the nonlinear equality constraints. If not provided, + the nonlinear equality constraints are evaluated at `x`. + + Returns + ------- + float + Maximum constraint violation at `x`. + """ + violation = self.violation(x, cub_val=cub_val, ceq_val=ceq_val) + if np.count_nonzero(violation): + return np.max(violation, initial=0.0) + else: + return 0.0 + + def violation(self, x, cub_val=None, ceq_val=None): + violation = [] + if not self.bounds.is_feasible: + b = self.bounds.violation(x) + violation.append(b) + + if len(self.linear.pcs): + lc = self.linear.violation(x) + violation.append(lc) + if len(self._nonlinear.pcs): + nlc = self._nonlinear.violation(x, cub_val, ceq_val) + violation.append(nlc) + + if len(violation): + return np.concatenate(violation) + + def best_eval(self, penalty): + """ + Return the best point in the filter and the corresponding objective and + nonlinear constraint function evaluations. + + Parameters + ---------- + penalty : float + Penalty parameter + + Returns + ------- + `numpy.ndarray`, shape (n,) + Best point. + float + Corresponding objective function value. + float + Corresponding maximum constraint violation. + """ + # If the filter is empty, i.e., if no function evaluation has been + # performed, we evaluate the objective and nonlinear constraint + # functions at the initial guess. + if len(self._fun_filter) == 0: + self(self.x0) + + # Find the best point in the filter. + fun_filter = np.array(self._fun_filter) + maxcv_filter = np.array(self._maxcv_filter) + x_filter = np.array(self._x_filter) + finite_idx = np.isfinite(maxcv_filter) + if np.any(finite_idx): + # At least one point has a finite maximum constraint violation. + feasible_idx = maxcv_filter <= self._feasibility_tol + if np.any(feasible_idx) and not np.all( + np.isnan(fun_filter[feasible_idx]) + ): + # At least one point is feasible and has a well-defined + # objective function value. We select the point with the least + # objective function value. If there is a tie, we select the + # point with the least maximum constraint violation. If there + # is still a tie, we select the most recent point. + fun_min_idx = feasible_idx & ( + fun_filter <= np.nanmin(fun_filter[feasible_idx]) + ) + if np.count_nonzero(fun_min_idx) > 1: + fun_min_idx &= maxcv_filter <= np.min( + maxcv_filter[fun_min_idx] + ) + i = np.flatnonzero(fun_min_idx)[-1] + elif np.any(feasible_idx): + # At least one point is feasible but no feasible point has a + # well-defined objective function value. We select the most + # recent feasible point. + i = np.flatnonzero(feasible_idx)[-1] + else: + # No point is feasible. We first compute the merit function + # value for each point. + merit_filter = np.full_like(fun_filter, np.nan) + merit_filter[finite_idx] = ( + fun_filter[finite_idx] + penalty * maxcv_filter[finite_idx] + ) + if np.all(np.isnan(merit_filter)): + # No point has a well-defined merit function value. In + # other words, among the points with a well-defined maximum + # constraint violation, none has a well-defined objective + # function value. We select the point with the least + # maximum constraint violation. If there is a tie, we + # select the most recent point. + min_maxcv_idx = maxcv_filter <= np.nanmin(maxcv_filter) + i = np.flatnonzero(min_maxcv_idx)[-1] + else: + # At least one point has a well-defined merit function + # value. We select the point with the least merit function + # value. If there is a tie, we select the point with the + # least maximum constraint violation. If there is still a + # tie, we select the point with the least objective + # function value. If there is still a tie, we select the + # most recent point. + merit_min_idx = merit_filter <= np.nanmin(merit_filter) + if np.count_nonzero(merit_min_idx) > 1: + merit_min_idx &= maxcv_filter <= np.min( + maxcv_filter[merit_min_idx] + ) + + if np.count_nonzero(merit_min_idx) > 1: + merit_min_idx &= fun_filter <= np.min( + fun_filter[merit_min_idx] + ) + i = np.flatnonzero(merit_min_idx)[-1] + elif not np.all(np.isnan(fun_filter)): + # No maximum constraint violation is well-defined but at least one + # point has a well-defined objective function value. We select the + # point with the least objective function value. If there is a tie, + # we select the most recent point. + fun_min_idx = fun_filter <= np.nanmin(fun_filter) + i = np.flatnonzero(fun_min_idx)[-1] + else: + # No point has a well-defined maximum constraint violation or + # objective function value. We select the most recent point. + i = len(fun_filter) - 1 + return ( + self.bounds.project(x_filter[i, :]), + fun_filter[i], + maxcv_filter[i], + ) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/settings.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/settings.py new file mode 100644 index 0000000000000000000000000000000000000000..6394822826e094a803a485556a298e342bf260ac --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/settings.py @@ -0,0 +1,132 @@ +import sys +from enum import Enum + +import numpy as np + + +# Exit status. +class ExitStatus(Enum): + """ + Exit statuses. + """ + + RADIUS_SUCCESS = 0 + TARGET_SUCCESS = 1 + FIXED_SUCCESS = 2 + CALLBACK_SUCCESS = 3 + FEASIBLE_SUCCESS = 4 + MAX_EVAL_WARNING = 5 + MAX_ITER_WARNING = 6 + INFEASIBLE_ERROR = -1 + LINALG_ERROR = -2 + + +class Options(str, Enum): + """ + Options. + """ + + DEBUG = "debug" + FEASIBILITY_TOL = "feasibility_tol" + FILTER_SIZE = "filter_size" + HISTORY_SIZE = "history_size" + MAX_EVAL = "maxfev" + MAX_ITER = "maxiter" + NPT = "nb_points" + RHOBEG = "radius_init" + RHOEND = "radius_final" + SCALE = "scale" + STORE_HISTORY = "store_history" + TARGET = "target" + VERBOSE = "disp" + + +class Constants(str, Enum): + """ + Constants. + """ + + DECREASE_RADIUS_FACTOR = "decrease_radius_factor" + INCREASE_RADIUS_FACTOR = "increase_radius_factor" + INCREASE_RADIUS_THRESHOLD = "increase_radius_threshold" + DECREASE_RADIUS_THRESHOLD = "decrease_radius_threshold" + DECREASE_RESOLUTION_FACTOR = "decrease_resolution_factor" + LARGE_RESOLUTION_THRESHOLD = "large_resolution_threshold" + MODERATE_RESOLUTION_THRESHOLD = "moderate_resolution_threshold" + LOW_RATIO = "low_ratio" + HIGH_RATIO = "high_ratio" + VERY_LOW_RATIO = "very_low_ratio" + PENALTY_INCREASE_THRESHOLD = "penalty_increase_threshold" + PENALTY_INCREASE_FACTOR = "penalty_increase_factor" + SHORT_STEP_THRESHOLD = "short_step_threshold" + LOW_RADIUS_FACTOR = "low_radius_factor" + BYRD_OMOJOKUN_FACTOR = "byrd_omojokun_factor" + THRESHOLD_RATIO_CONSTRAINTS = "threshold_ratio_constraints" + LARGE_SHIFT_FACTOR = "large_shift_factor" + LARGE_GRADIENT_FACTOR = "large_gradient_factor" + RESOLUTION_FACTOR = "resolution_factor" + IMPROVE_TCG = "improve_tcg" + + +# Default options. +DEFAULT_OPTIONS = { + Options.DEBUG.value: False, + Options.FEASIBILITY_TOL.value: np.sqrt(np.finfo(float).eps), + Options.FILTER_SIZE.value: sys.maxsize, + Options.HISTORY_SIZE.value: sys.maxsize, + Options.MAX_EVAL.value: lambda n: 500 * n, + Options.MAX_ITER.value: lambda n: 1000 * n, + Options.NPT.value: lambda n: 2 * n + 1, + Options.RHOBEG.value: 1.0, + Options.RHOEND.value: 1e-6, + Options.SCALE.value: False, + Options.STORE_HISTORY.value: False, + Options.TARGET.value: -np.inf, + Options.VERBOSE.value: False, +} + +# Default constants. +DEFAULT_CONSTANTS = { + Constants.DECREASE_RADIUS_FACTOR.value: 0.5, + Constants.INCREASE_RADIUS_FACTOR.value: np.sqrt(2.0), + Constants.INCREASE_RADIUS_THRESHOLD.value: 2.0, + Constants.DECREASE_RADIUS_THRESHOLD.value: 1.4, + Constants.DECREASE_RESOLUTION_FACTOR.value: 0.1, + Constants.LARGE_RESOLUTION_THRESHOLD.value: 250.0, + Constants.MODERATE_RESOLUTION_THRESHOLD.value: 16.0, + Constants.LOW_RATIO.value: 0.1, + Constants.HIGH_RATIO.value: 0.7, + Constants.VERY_LOW_RATIO.value: 0.01, + Constants.PENALTY_INCREASE_THRESHOLD.value: 1.5, + Constants.PENALTY_INCREASE_FACTOR.value: 2.0, + Constants.SHORT_STEP_THRESHOLD.value: 0.5, + Constants.LOW_RADIUS_FACTOR.value: 0.1, + Constants.BYRD_OMOJOKUN_FACTOR.value: 0.8, + Constants.THRESHOLD_RATIO_CONSTRAINTS.value: 2.0, + Constants.LARGE_SHIFT_FACTOR.value: 10.0, + Constants.LARGE_GRADIENT_FACTOR.value: 10.0, + Constants.RESOLUTION_FACTOR.value: 2.0, + Constants.IMPROVE_TCG.value: True, +} + +# Printing options. +PRINT_OPTIONS = { + "threshold": 6, + "edgeitems": 2, + "linewidth": sys.maxsize, + "formatter": { + "float_kind": lambda x: np.format_float_scientific( + x, + precision=3, + unique=False, + pad_left=2, + ) + }, +} + +# Constants. +BARRIER = 2.0 ** min( + 100, + np.finfo(float).maxexp // 2, + -np.finfo(float).minexp // 2, +) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/__init__.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..01a1ad3c6f4cb5c0c9b99d1ce35fea92e7618ff5 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/__init__.py @@ -0,0 +1,14 @@ +from .geometry import cauchy_geometry, spider_geometry +from .optim import ( + tangential_byrd_omojokun, + constrained_tangential_byrd_omojokun, + normal_byrd_omojokun, +) + +__all__ = [ + "cauchy_geometry", + "spider_geometry", + "tangential_byrd_omojokun", + "constrained_tangential_byrd_omojokun", + "normal_byrd_omojokun", +] diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/__pycache__/__init__.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9972791224fcd2d60408fd5dca6bfe09505681b6 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/__pycache__/__init__.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/__pycache__/geometry.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/__pycache__/geometry.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e919eed1f302eabe9636505f739194cb569a55ed Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/__pycache__/geometry.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/__pycache__/optim.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/__pycache__/optim.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..472196d7f57b5569a30be76bde4ce0ba6fe7d066 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/__pycache__/optim.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/geometry.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/geometry.py new file mode 100644 index 0000000000000000000000000000000000000000..7b67fd7c813ee493b18720d1daf71324d72330b6 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/geometry.py @@ -0,0 +1,387 @@ +import inspect + +import numpy as np + +from ..utils import get_arrays_tol + + +TINY = np.finfo(float).tiny + + +def cauchy_geometry(const, grad, curv, xl, xu, delta, debug): + r""" + Maximize approximately the absolute value of a quadratic function subject + to bound constraints in a trust region. + + This function solves approximately + + .. math:: + + \max_{s \in \mathbb{R}^n} \quad \bigg\lvert c + g^{\mathsf{T}} s + + \frac{1}{2} s^{\mathsf{T}} H s \bigg\rvert \quad \text{s.t.} \quad + \left\{ \begin{array}{l} + l \le s \le u,\\ + \lVert s \rVert \le \Delta, + \end{array} \right. + + by maximizing the objective function along the constrained Cauchy + direction. + + Parameters + ---------- + const : float + Constant :math:`c` as shown above. + grad : `numpy.ndarray`, shape (n,) + Gradient :math:`g` as shown above. + curv : callable + Curvature of :math:`H` along any vector. + + ``curv(s) -> float`` + + returns :math:`s^{\mathsf{T}} H s`. + xl : `numpy.ndarray`, shape (n,) + Lower bounds :math:`l` as shown above. + xu : `numpy.ndarray`, shape (n,) + Upper bounds :math:`u` as shown above. + delta : float + Trust-region radius :math:`\Delta` as shown above. + debug : bool + Whether to make debugging tests during the execution. + + Returns + ------- + `numpy.ndarray`, shape (n,) + Approximate solution :math:`s`. + + Notes + ----- + This function is described as the first alternative in Section 6.5 of [1]_. + It is assumed that the origin is feasible with respect to the bound + constraints and that `delta` is finite and positive. + + References + ---------- + .. [1] T. M. Ragonneau. *Model-Based Derivative-Free Optimization Methods + and Software*. PhD thesis, Department of Applied Mathematics, The Hong + Kong Polytechnic University, Hong Kong, China, 2022. URL: + https://theses.lib.polyu.edu.hk/handle/200/12294. + """ + if debug: + assert isinstance(const, float) + assert isinstance(grad, np.ndarray) and grad.ndim == 1 + assert inspect.signature(curv).bind(grad) + assert isinstance(xl, np.ndarray) and xl.shape == grad.shape + assert isinstance(xu, np.ndarray) and xu.shape == grad.shape + assert isinstance(delta, float) + assert isinstance(debug, bool) + tol = get_arrays_tol(xl, xu) + assert np.all(xl <= tol) + assert np.all(xu >= -tol) + assert np.isfinite(delta) and delta > 0.0 + xl = np.minimum(xl, 0.0) + xu = np.maximum(xu, 0.0) + + # To maximize the absolute value of a quadratic function, we maximize the + # function itself or its negative, and we choose the solution that provides + # the largest function value. + step1, q_val1 = _cauchy_geom(const, grad, curv, xl, xu, delta, debug) + step2, q_val2 = _cauchy_geom( + -const, + -grad, + lambda x: -curv(x), + xl, + xu, + delta, + debug, + ) + step = step1 if abs(q_val1) >= abs(q_val2) else step2 + + if debug: + assert np.all(xl <= step) + assert np.all(step <= xu) + assert np.linalg.norm(step) < 1.1 * delta + return step + + +def spider_geometry(const, grad, curv, xpt, xl, xu, delta, debug): + r""" + Maximize approximately the absolute value of a quadratic function subject + to bound constraints in a trust region. + + This function solves approximately + + .. math:: + + \max_{s \in \mathbb{R}^n} \quad \bigg\lvert c + g^{\mathsf{T}} s + + \frac{1}{2} s^{\mathsf{T}} H s \bigg\rvert \quad \text{s.t.} \quad + \left\{ \begin{array}{l} + l \le s \le u,\\ + \lVert s \rVert \le \Delta, + \end{array} \right. + + by maximizing the objective function along given straight lines. + + Parameters + ---------- + const : float + Constant :math:`c` as shown above. + grad : `numpy.ndarray`, shape (n,) + Gradient :math:`g` as shown above. + curv : callable + Curvature of :math:`H` along any vector. + + ``curv(s) -> float`` + + returns :math:`s^{\mathsf{T}} H s`. + xpt : `numpy.ndarray`, shape (n, npt) + Points defining the straight lines. The straight lines considered are + the ones passing through the origin and the points in `xpt`. + xl : `numpy.ndarray`, shape (n,) + Lower bounds :math:`l` as shown above. + xu : `numpy.ndarray`, shape (n,) + Upper bounds :math:`u` as shown above. + delta : float + Trust-region radius :math:`\Delta` as shown above. + debug : bool + Whether to make debugging tests during the execution. + + Returns + ------- + `numpy.ndarray`, shape (n,) + Approximate solution :math:`s`. + + Notes + ----- + This function is described as the second alternative in Section 6.5 of + [1]_. It is assumed that the origin is feasible with respect to the bound + constraints and that `delta` is finite and positive. + + References + ---------- + .. [1] T. M. Ragonneau. *Model-Based Derivative-Free Optimization Methods + and Software*. PhD thesis, Department of Applied Mathematics, The Hong + Kong Polytechnic University, Hong Kong, China, 2022. URL: + https://theses.lib.polyu.edu.hk/handle/200/12294. + """ + if debug: + assert isinstance(const, float) + assert isinstance(grad, np.ndarray) and grad.ndim == 1 + assert inspect.signature(curv).bind(grad) + assert ( + isinstance(xpt, np.ndarray) + and xpt.ndim == 2 + and xpt.shape[0] == grad.size + ) + assert isinstance(xl, np.ndarray) and xl.shape == grad.shape + assert isinstance(xu, np.ndarray) and xu.shape == grad.shape + assert isinstance(delta, float) + assert isinstance(debug, bool) + tol = get_arrays_tol(xl, xu) + assert np.all(xl <= tol) + assert np.all(xu >= -tol) + assert np.isfinite(delta) and delta > 0.0 + xl = np.minimum(xl, 0.0) + xu = np.maximum(xu, 0.0) + + # Iterate through the straight lines. + step = np.zeros_like(grad) + q_val = const + s_norm = np.linalg.norm(xpt, axis=0) + + # Set alpha_xl to the step size for the lower-bound constraint and + # alpha_xu to the step size for the upper-bound constraint. + + # xl.shape = (N,) + # xpt.shape = (N, M) + # i_xl_pos.shape = (M, N) + i_xl_pos = (xl > -np.inf) & (xpt.T > -TINY * xl) + i_xl_neg = (xl > -np.inf) & (xpt.T < TINY * xl) + i_xu_pos = (xu < np.inf) & (xpt.T > TINY * xu) + i_xu_neg = (xu < np.inf) & (xpt.T < -TINY * xu) + + # (M, N) + alpha_xl_pos = np.atleast_2d( + np.broadcast_to(xl, i_xl_pos.shape)[i_xl_pos] / xpt.T[i_xl_pos] + ) + # (M,) + alpha_xl_pos = np.max(alpha_xl_pos, axis=1, initial=-np.inf) + # make sure it's (M,) + alpha_xl_pos = np.broadcast_to(np.atleast_1d(alpha_xl_pos), xpt.shape[1]) + + alpha_xl_neg = np.atleast_2d( + np.broadcast_to(xl, i_xl_neg.shape)[i_xl_neg] / xpt.T[i_xl_neg] + ) + alpha_xl_neg = np.max(alpha_xl_neg, axis=1, initial=np.inf) + alpha_xl_neg = np.broadcast_to(np.atleast_1d(alpha_xl_neg), xpt.shape[1]) + + alpha_xu_neg = np.atleast_2d( + np.broadcast_to(xu, i_xu_neg.shape)[i_xu_neg] / xpt.T[i_xu_neg] + ) + alpha_xu_neg = np.max(alpha_xu_neg, axis=1, initial=-np.inf) + alpha_xu_neg = np.broadcast_to(np.atleast_1d(alpha_xu_neg), xpt.shape[1]) + + alpha_xu_pos = np.atleast_2d( + np.broadcast_to(xu, i_xu_pos.shape)[i_xu_pos] / xpt.T[i_xu_pos] + ) + alpha_xu_pos = np.max(alpha_xu_pos, axis=1, initial=np.inf) + alpha_xu_pos = np.broadcast_to(np.atleast_1d(alpha_xu_pos), xpt.shape[1]) + + for k in range(xpt.shape[1]): + # Set alpha_tr to the step size for the trust-region constraint. + if s_norm[k] > TINY * delta: + alpha_tr = max(delta / s_norm[k], 0.0) + else: + # The current straight line is basically zero. + continue + + alpha_bd_pos = max(min(alpha_xu_pos[k], alpha_xl_neg[k]), 0.0) + alpha_bd_neg = min(max(alpha_xl_pos[k], alpha_xu_neg[k]), 0.0) + + # Set alpha_quad_pos and alpha_quad_neg to the step size to the extrema + # of the quadratic function along the positive and negative directions. + grad_step = grad @ xpt[:, k] + curv_step = curv(xpt[:, k]) + if ( + grad_step >= 0.0 + and curv_step < -TINY * grad_step + or grad_step <= 0.0 + and curv_step > -TINY * grad_step + ): + alpha_quad_pos = max(-grad_step / curv_step, 0.0) + else: + alpha_quad_pos = np.inf + if ( + grad_step >= 0.0 + and curv_step > TINY * grad_step + or grad_step <= 0.0 + and curv_step < TINY * grad_step + ): + alpha_quad_neg = min(-grad_step / curv_step, 0.0) + else: + alpha_quad_neg = -np.inf + + # Select the step that provides the largest value of the objective + # function if it improves the current best. The best positive step is + # either the one that reaches the constraints or the one that reaches + # the extremum of the objective function along the current direction + # (only possible if the resulting step is feasible). We test both, and + # we perform similar calculations along the negative step. + # N.B.: we select the largest possible step among all the ones that + # maximize the objective function. This is to avoid returning the zero + # step in some extreme cases. + alpha_pos = min(alpha_tr, alpha_bd_pos) + alpha_neg = max(-alpha_tr, alpha_bd_neg) + q_val_pos = ( + const + alpha_pos * grad_step + 0.5 * alpha_pos**2.0 * curv_step + ) + q_val_neg = ( + const + alpha_neg * grad_step + 0.5 * alpha_neg**2.0 * curv_step + ) + if alpha_quad_pos < alpha_pos: + q_val_quad_pos = ( + const + + alpha_quad_pos * grad_step + + 0.5 * alpha_quad_pos**2.0 * curv_step + ) + if abs(q_val_quad_pos) > abs(q_val_pos): + alpha_pos = alpha_quad_pos + q_val_pos = q_val_quad_pos + if alpha_quad_neg > alpha_neg: + q_val_quad_neg = ( + const + + alpha_quad_neg * grad_step + + 0.5 * alpha_quad_neg**2.0 * curv_step + ) + if abs(q_val_quad_neg) > abs(q_val_neg): + alpha_neg = alpha_quad_neg + q_val_neg = q_val_quad_neg + if abs(q_val_pos) >= abs(q_val_neg) and abs(q_val_pos) > abs(q_val): + step = np.clip(alpha_pos * xpt[:, k], xl, xu) + q_val = q_val_pos + elif abs(q_val_neg) > abs(q_val_pos) and abs(q_val_neg) > abs(q_val): + step = np.clip(alpha_neg * xpt[:, k], xl, xu) + q_val = q_val_neg + + if debug: + assert np.all(xl <= step) + assert np.all(step <= xu) + assert np.linalg.norm(step) < 1.1 * delta + return step + + +def _cauchy_geom(const, grad, curv, xl, xu, delta, debug): + """ + Same as `bound_constrained_cauchy_step` without the absolute value. + """ + # Calculate the initial active set. + fixed_xl = (xl < 0.0) & (grad > 0.0) + fixed_xu = (xu > 0.0) & (grad < 0.0) + + # Calculate the Cauchy step. + cauchy_step = np.zeros_like(grad) + cauchy_step[fixed_xl] = xl[fixed_xl] + cauchy_step[fixed_xu] = xu[fixed_xu] + if np.linalg.norm(cauchy_step) > delta: + working = fixed_xl | fixed_xu + while True: + # Calculate the Cauchy step for the directions in the working set. + g_norm = np.linalg.norm(grad[working]) + delta_reduced = np.sqrt( + delta**2.0 - cauchy_step[~working] @ cauchy_step[~working] + ) + if g_norm > TINY * abs(delta_reduced): + mu = max(delta_reduced / g_norm, 0.0) + else: + break + cauchy_step[working] = mu * grad[working] + + # Update the working set. + fixed_xl = working & (cauchy_step < xl) + fixed_xu = working & (cauchy_step > xu) + if not np.any(fixed_xl) and not np.any(fixed_xu): + # Stop the calculations as the Cauchy step is now feasible. + break + cauchy_step[fixed_xl] = xl[fixed_xl] + cauchy_step[fixed_xu] = xu[fixed_xu] + working = working & ~(fixed_xl | fixed_xu) + + # Calculate the step that maximizes the quadratic along the Cauchy step. + grad_step = grad @ cauchy_step + if grad_step >= 0.0: + # Set alpha_tr to the step size for the trust-region constraint. + s_norm = np.linalg.norm(cauchy_step) + if s_norm > TINY * delta: + alpha_tr = max(delta / s_norm, 0.0) + else: + # The Cauchy step is basically zero. + alpha_tr = 0.0 + + # Set alpha_quad to the step size for the maximization problem. + curv_step = curv(cauchy_step) + if curv_step < -TINY * grad_step: + alpha_quad = max(-grad_step / curv_step, 0.0) + else: + alpha_quad = np.inf + + # Set alpha_bd to the step size for the bound constraints. + i_xl = (xl > -np.inf) & (cauchy_step < TINY * xl) + i_xu = (xu < np.inf) & (cauchy_step > TINY * xu) + alpha_xl = np.min(xl[i_xl] / cauchy_step[i_xl], initial=np.inf) + alpha_xu = np.min(xu[i_xu] / cauchy_step[i_xu], initial=np.inf) + alpha_bd = min(alpha_xl, alpha_xu) + + # Calculate the solution and the corresponding function value. + alpha = min(alpha_tr, alpha_quad, alpha_bd) + step = np.clip(alpha * cauchy_step, xl, xu) + q_val = const + alpha * grad_step + 0.5 * alpha**2.0 * curv_step + else: + # This case is never reached in exact arithmetic. It prevents this + # function to return a step that decreases the objective function. + step = np.zeros_like(grad) + q_val = const + + if debug: + assert np.all(xl <= step) + assert np.all(step <= xu) + assert np.linalg.norm(step) < 1.1 * delta + return step, q_val diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/optim.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/optim.py new file mode 100644 index 0000000000000000000000000000000000000000..c4a960396fb2e992cf76bac0baf171b5af9b7717 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/optim.py @@ -0,0 +1,1203 @@ +import inspect + +import numpy as np +from scipy.linalg import qr + +from ..utils import get_arrays_tol + + +TINY = np.finfo(float).tiny +EPS = np.finfo(float).eps + + +def tangential_byrd_omojokun(grad, hess_prod, xl, xu, delta, debug, **kwargs): + r""" + Minimize approximately a quadratic function subject to bound constraints in + a trust region. + + This function solves approximately + + .. math:: + + \min_{s \in \mathbb{R}^n} \quad g^{\mathsf{T}} s + \frac{1}{2} + s^{\mathsf{T}} H s \quad \text{s.t.} \quad + \left\{ \begin{array}{l} + l \le s \le u\\ + \lVert s \rVert \le \Delta, + \end{array} \right. + + using an active-set variation of the truncated conjugate gradient method. + + Parameters + ---------- + grad : `numpy.ndarray`, shape (n,) + Gradient :math:`g` as shown above. + hess_prod : callable + Product of the Hessian matrix :math:`H` with any vector. + + ``hess_prod(s) -> `numpy.ndarray`, shape (n,)`` + + returns the product :math:`H s`. + xl : `numpy.ndarray`, shape (n,) + Lower bounds :math:`l` as shown above. + xu : `numpy.ndarray`, shape (n,) + Upper bounds :math:`u` as shown above. + delta : float + Trust-region radius :math:`\Delta` as shown above. + debug : bool + Whether to make debugging tests during the execution. + + Returns + ------- + `numpy.ndarray`, shape (n,) + Approximate solution :math:`s`. + + Other Parameters + ---------------- + improve_tcg : bool, optional + If True, a solution generated by the truncated conjugate gradient + method that is on the boundary of the trust region is improved by + moving around the trust-region boundary on the two-dimensional space + spanned by the solution and the gradient of the quadratic function at + the solution (default is True). + + Notes + ----- + This function implements Algorithm 6.2 of [1]_. It is assumed that the + origin is feasible with respect to the bound constraints and that `delta` + is finite and positive. + + References + ---------- + .. [1] T. M. Ragonneau. *Model-Based Derivative-Free Optimization Methods + and Software*. PhD thesis, Department of Applied Mathematics, The Hong + Kong Polytechnic University, Hong Kong, China, 2022. URL: + https://theses.lib.polyu.edu.hk/handle/200/12294. + """ + if debug: + assert isinstance(grad, np.ndarray) and grad.ndim == 1 + assert inspect.signature(hess_prod).bind(grad) + assert isinstance(xl, np.ndarray) and xl.shape == grad.shape + assert isinstance(xu, np.ndarray) and xu.shape == grad.shape + assert isinstance(delta, float) + assert isinstance(debug, bool) + tol = get_arrays_tol(xl, xu) + assert np.all(xl <= tol) + assert np.all(xu >= -tol) + assert np.isfinite(delta) and delta > 0.0 + xl = np.minimum(xl, 0.0) + xu = np.maximum(xu, 0.0) + + # Copy the arrays that may be modified by the code below. + n = grad.size + grad = np.copy(grad) + grad_orig = np.copy(grad) + + # Calculate the initial active set. + free_bd = ((xl < 0.0) | (grad < 0.0)) & ((xu > 0.0) | (grad > 0.0)) + + # Set the initial iterate and the initial search direction. + step = np.zeros_like(grad) + sd = np.zeros_like(step) + sd[free_bd] = -grad[free_bd] + + k = 0 + reduct = 0.0 + boundary_reached = False + while k < np.count_nonzero(free_bd): + # Stop the computations if sd is not a descent direction. + grad_sd = grad @ sd + if grad_sd >= -10.0 * EPS * n * max(1.0, np.linalg.norm(grad)): + break + + # Set alpha_tr to the step size for the trust-region constraint. + try: + alpha_tr = _alpha_tr(step, sd, delta) + except ZeroDivisionError: + break + + # Stop the computations if a step along sd is expected to give a + # relatively small reduction in the objective function. + if -alpha_tr * grad_sd <= 1e-8 * reduct: + break + + # Set alpha_quad to the step size for the minimization problem. + hess_sd = hess_prod(sd) + curv_sd = sd @ hess_sd + if curv_sd > TINY * abs(grad_sd): + alpha_quad = max(-grad_sd / curv_sd, 0.0) + else: + alpha_quad = np.inf + + # Stop the computations if the reduction in the objective function + # provided by an unconstrained step is small. + alpha = min(alpha_tr, alpha_quad) + if -alpha * (grad_sd + 0.5 * alpha * curv_sd) <= 1e-8 * reduct: + break + + # Set alpha_bd to the step size for the bound constraints. + i_xl = (xl > -np.inf) & (sd < -TINY * np.abs(xl - step)) + i_xu = (xu < np.inf) & (sd > TINY * np.abs(xu - step)) + all_alpha_xl = np.full_like(step, np.inf) + all_alpha_xu = np.full_like(step, np.inf) + all_alpha_xl[i_xl] = np.maximum( + (xl[i_xl] - step[i_xl]) / sd[i_xl], + 0.0, + ) + all_alpha_xu[i_xu] = np.maximum( + (xu[i_xu] - step[i_xu]) / sd[i_xu], + 0.0, + ) + alpha_xl = np.min(all_alpha_xl) + alpha_xu = np.min(all_alpha_xu) + alpha_bd = min(alpha_xl, alpha_xu) + + # Update the iterate. + alpha = min(alpha, alpha_bd) + if alpha > 0.0: + step[free_bd] = np.clip( + step[free_bd] + alpha * sd[free_bd], + xl[free_bd], + xu[free_bd], + ) + grad += alpha * hess_sd + reduct -= alpha * (grad_sd + 0.5 * alpha * curv_sd) + + if alpha < min(alpha_tr, alpha_bd): + # The current iteration is a conjugate gradient iteration. Update + # the search direction so that it is conjugate (with respect to H) + # to all the previous search directions. + beta = (grad[free_bd] @ hess_sd[free_bd]) / curv_sd + sd[free_bd] = beta * sd[free_bd] - grad[free_bd] + sd[~free_bd] = 0.0 + k += 1 + elif alpha < alpha_tr: + # The iterate is restricted by a bound constraint. Add this bound + # constraint to the active set, and restart the calculations. + if alpha_xl <= alpha: + i_new = np.argmin(all_alpha_xl) + step[i_new] = xl[i_new] + else: + i_new = np.argmin(all_alpha_xu) + step[i_new] = xu[i_new] + free_bd[i_new] = False + sd[free_bd] = -grad[free_bd] + sd[~free_bd] = 0.0 + k = 0 + else: + # The current iterate is on the trust-region boundary. Add all the + # active bounds to the working set to prepare for the improvement + # of the solution, and stop the iterations. + if alpha_xl <= alpha: + i_new = _argmin(all_alpha_xl) + step[i_new] = xl[i_new] + free_bd[i_new] = False + if alpha_xu <= alpha: + i_new = _argmin(all_alpha_xu) + step[i_new] = xu[i_new] + free_bd[i_new] = False + boundary_reached = True + break + + # Attempt to improve the solution on the trust-region boundary. + if kwargs.get("improve_tcg", True) and boundary_reached: + step_base = np.copy(step) + step_comparator = grad_orig @ step_base + 0.5 * step_base @ hess_prod( + step_base + ) + + while np.count_nonzero(free_bd) > 0: + # Check whether a substantial reduction in the objective function + # is possible, and set the search direction. + step_sq = step[free_bd] @ step[free_bd] + grad_sq = grad[free_bd] @ grad[free_bd] + grad_step = grad[free_bd] @ step[free_bd] + grad_sd = -np.sqrt(max(step_sq * grad_sq - grad_step**2.0, 0.0)) + sd[free_bd] = grad_step * step[free_bd] - step_sq * grad[free_bd] + sd[~free_bd] = 0.0 + if grad_sd >= -1e-8 * reduct or np.any( + grad_sd >= -TINY * np.abs(sd[free_bd]) + ): + break + sd[free_bd] /= -grad_sd + + # Calculate an upper bound for the tangent of half the angle theta + # of this alternative iteration. The step will be updated as: + # step = cos(theta) * step + sin(theta) * sd. + temp_xl = np.zeros(n) + temp_xu = np.zeros(n) + temp_xl[free_bd] = ( + step[free_bd] ** 2.0 + sd[free_bd] ** 2.0 - xl[free_bd] ** 2.0 + ) + temp_xu[free_bd] = ( + step[free_bd] ** 2.0 + sd[free_bd] ** 2.0 - xu[free_bd] ** 2.0 + ) + temp_xl[temp_xl > 0.0] = ( + np.sqrt(temp_xl[temp_xl > 0.0]) - sd[temp_xl > 0.0] + ) + temp_xu[temp_xu > 0.0] = ( + np.sqrt(temp_xu[temp_xu > 0.0]) + sd[temp_xu > 0.0] + ) + dist_xl = np.maximum(step - xl, 0.0) + dist_xu = np.maximum(xu - step, 0.0) + i_xl = temp_xl > TINY * dist_xl + i_xu = temp_xu > TINY * dist_xu + all_t_xl = np.ones(n) + all_t_xu = np.ones(n) + all_t_xl[i_xl] = np.minimum( + all_t_xl[i_xl], + dist_xl[i_xl] / temp_xl[i_xl], + ) + all_t_xu[i_xu] = np.minimum( + all_t_xu[i_xu], + dist_xu[i_xu] / temp_xu[i_xu], + ) + t_xl = np.min(all_t_xl) + t_xu = np.min(all_t_xu) + t_bd = min(t_xl, t_xu) + + # Calculate some curvature information. + hess_step = hess_prod(step) + hess_sd = hess_prod(sd) + curv_step = step @ hess_step + curv_sd = sd @ hess_sd + curv_step_sd = step @ hess_sd + + # For a range of equally spaced values of tan(0.5 * theta), + # calculate the reduction in the objective function that would be + # obtained by accepting the corresponding angle. + n_samples = 20 + n_samples = int((n_samples - 3) * t_bd + 3) + t_samples = np.linspace(t_bd / n_samples, t_bd, n_samples) + sin_values = 2.0 * t_samples / (1.0 + t_samples**2.0) + all_reduct = sin_values * ( + grad_step * t_samples + - grad_sd + - t_samples * curv_step + + sin_values + * (t_samples * curv_step_sd - 0.5 * (curv_sd - curv_step)) + ) + if np.all(all_reduct <= 0.0): + # No reduction in the objective function is obtained. + break + + # Accept the angle that provides the largest reduction in the + # objective function, and update the iterate. + i_max = np.argmax(all_reduct) + cos_value = (1.0 - t_samples[i_max] ** 2.0) / ( + 1.0 + t_samples[i_max] ** 2.0 + ) + step[free_bd] = ( + cos_value * step[free_bd] + sin_values[i_max] * sd[free_bd] + ) + grad += (cos_value - 1.0) * hess_step + sin_values[i_max] * hess_sd + reduct += all_reduct[i_max] + + # If the above angle is restricted by bound constraints, add them + # to the working set, and restart the alternative iteration. + # Otherwise, the calculations are terminated. + if t_bd < 1.0 and i_max == n_samples - 1: + if t_xl <= t_bd: + i_new = _argmin(all_t_xl) + step[i_new] = xl[i_new] + free_bd[i_new] = False + if t_xu <= t_bd: + i_new = _argmin(all_t_xu) + step[i_new] = xu[i_new] + free_bd[i_new] = False + else: + break + + # Ensure that the alternative iteration improves the objective + # function. + if grad_orig @ step + 0.5 * step @ hess_prod(step) > step_comparator: + step = step_base + + if debug: + assert np.all(xl <= step) + assert np.all(step <= xu) + assert np.linalg.norm(step) < 1.1 * delta + return step + + +def constrained_tangential_byrd_omojokun( + grad, + hess_prod, + xl, + xu, + aub, + bub, + aeq, + delta, + debug, + **kwargs, +): + r""" + Minimize approximately a quadratic function subject to bound and linear + constraints in a trust region. + + This function solves approximately + + .. math:: + + \min_{s \in \mathbb{R}^n} \quad g^{\mathsf{T}} s + \frac{1}{2} + s^{\mathsf{T}} H s \quad \text{s.t.} \quad + \left\{ \begin{array}{l} + l \le s \le u,\\ + A_{\scriptscriptstyle I} s \le b_{\scriptscriptstyle I},\\ + A_{\scriptscriptstyle E} s = 0,\\ + \lVert s \rVert \le \Delta, + \end{array} \right. + + using an active-set variation of the truncated conjugate gradient method. + + Parameters + ---------- + grad : `numpy.ndarray`, shape (n,) + Gradient :math:`g` as shown above. + hess_prod : callable + Product of the Hessian matrix :math:`H` with any vector. + + ``hess_prod(s) -> `numpy.ndarray`, shape (n,)`` + + returns the product :math:`H s`. + xl : `numpy.ndarray`, shape (n,) + Lower bounds :math:`l` as shown above. + xu : `numpy.ndarray`, shape (n,) + Upper bounds :math:`u` as shown above. + aub : `numpy.ndarray`, shape (m_linear_ub, n) + Coefficient matrix :math:`A_{\scriptscriptstyle I}` as shown above. + bub : `numpy.ndarray`, shape (m_linear_ub,) + Right-hand side :math:`b_{\scriptscriptstyle I}` as shown above. + aeq : `numpy.ndarray`, shape (m_linear_eq, n) + Coefficient matrix :math:`A_{\scriptscriptstyle E}` as shown above. + delta : float + Trust-region radius :math:`\Delta` as shown above. + debug : bool + Whether to make debugging tests during the execution. + + Returns + ------- + `numpy.ndarray`, shape (n,) + Approximate solution :math:`s`. + + Other Parameters + ---------------- + improve_tcg : bool, optional + If True, a solution generated by the truncated conjugate gradient + method that is on the boundary of the trust region is improved by + moving around the trust-region boundary on the two-dimensional space + spanned by the solution and the gradient of the quadratic function at + the solution (default is True). + + Notes + ----- + This function implements Algorithm 6.3 of [1]_. It is assumed that the + origin is feasible with respect to the bound and linear constraints, and + that `delta` is finite and positive. + + References + ---------- + .. [1] T. M. Ragonneau. *Model-Based Derivative-Free Optimization Methods + and Software*. PhD thesis, Department of Applied Mathematics, The Hong + Kong Polytechnic University, Hong Kong, China, 2022. URL: + https://theses.lib.polyu.edu.hk/handle/200/12294. + """ + if debug: + assert isinstance(grad, np.ndarray) and grad.ndim == 1 + assert inspect.signature(hess_prod).bind(grad) + assert isinstance(xl, np.ndarray) and xl.shape == grad.shape + assert isinstance(xu, np.ndarray) and xu.shape == grad.shape + assert ( + isinstance(aub, np.ndarray) + and aub.ndim == 2 + and aub.shape[1] == grad.size + ) + assert ( + isinstance(bub, np.ndarray) + and bub.ndim == 1 + and bub.size == aub.shape[0] + ) + assert ( + isinstance(aeq, np.ndarray) + and aeq.ndim == 2 + and aeq.shape[1] == grad.size + ) + assert isinstance(delta, float) + assert isinstance(debug, bool) + tol = get_arrays_tol(xl, xu) + assert np.all(xl <= tol) + assert np.all(xu >= -tol) + assert np.all(bub >= -tol) + assert np.isfinite(delta) and delta > 0.0 + xl = np.minimum(xl, 0.0) + xu = np.maximum(xu, 0.0) + bub = np.maximum(bub, 0.0) + + # Copy the arrays that may be modified by the code below. + n = grad.size + grad = np.copy(grad) + grad_orig = np.copy(grad) + + # Calculate the initial active set. + free_xl = (xl < 0.0) | (grad < 0.0) + free_xu = (xu > 0.0) | (grad > 0.0) + free_ub = (bub > 0.0) | (aub @ grad > 0.0) + n_act, q = qr_tangential_byrd_omojokun(aub, aeq, free_xl, free_xu, free_ub) + + # Set the initial iterate and the initial search direction. + step = np.zeros_like(grad) + sd = -q[:, n_act:] @ (q[:, n_act:].T @ grad) + resid = np.copy(bub) + + k = 0 + reduct = 0.0 + boundary_reached = False + while k < n - n_act: + # Stop the computations if sd is not a descent direction. + grad_sd = grad @ sd + if grad_sd >= -10.0 * EPS * n * max(1.0, np.linalg.norm(grad)): + break + + # Set alpha_tr to the step size for the trust-region constraint. + try: + alpha_tr = _alpha_tr(step, sd, delta) + except ZeroDivisionError: + break + + # Stop the computations if a step along sd is expected to give a + # relatively small reduction in the objective function. + if -alpha_tr * grad_sd <= 1e-8 * reduct: + break + + # Set alpha_quad to the step size for the minimization problem. + hess_sd = hess_prod(sd) + curv_sd = sd @ hess_sd + if curv_sd > TINY * abs(grad_sd): + alpha_quad = max(-grad_sd / curv_sd, 0.0) + else: + alpha_quad = np.inf + + # Stop the computations if the reduction in the objective function + # provided by an unconstrained step is small. + alpha = min(alpha_tr, alpha_quad) + if -alpha * (grad_sd + 0.5 * alpha * curv_sd) <= 1e-8 * reduct: + break + + # Set alpha_bd to the step size for the bound constraints. + i_xl = free_xl & (xl > -np.inf) & (sd < -TINY * np.abs(xl - step)) + i_xu = free_xu & (xu < np.inf) & (sd > TINY * np.abs(xu - step)) + all_alpha_xl = np.full_like(step, np.inf) + all_alpha_xu = np.full_like(step, np.inf) + all_alpha_xl[i_xl] = np.maximum( + (xl[i_xl] - step[i_xl]) / sd[i_xl], + 0.0, + ) + all_alpha_xu[i_xu] = np.maximum( + (xu[i_xu] - step[i_xu]) / sd[i_xu], + 0.0, + ) + alpha_xl = np.min(all_alpha_xl) + alpha_xu = np.min(all_alpha_xu) + alpha_bd = min(alpha_xl, alpha_xu) + + # Set alpha_ub to the step size for the linear constraints. + aub_sd = aub @ sd + i_ub = free_ub & (aub_sd > TINY * np.abs(resid)) + all_alpha_ub = np.full_like(bub, np.inf) + all_alpha_ub[i_ub] = resid[i_ub] / aub_sd[i_ub] + alpha_ub = np.min(all_alpha_ub, initial=np.inf) + + # Update the iterate. + alpha = min(alpha, alpha_bd, alpha_ub) + if alpha > 0.0: + step = np.clip(step + alpha * sd, xl, xu) + grad += alpha * hess_sd + resid = np.maximum(0.0, resid - alpha * aub_sd) + reduct -= alpha * (grad_sd + 0.5 * alpha * curv_sd) + + if alpha < min(alpha_tr, alpha_bd, alpha_ub): + # The current iteration is a conjugate gradient iteration. Update + # the search direction so that it is conjugate (with respect to H) + # to all the previous search directions. + grad_proj = q[:, n_act:] @ (q[:, n_act:].T @ grad) + beta = (grad_proj @ hess_sd) / curv_sd + sd = beta * sd - grad_proj + k += 1 + elif alpha < alpha_tr: + # The iterate is restricted by a bound/linear constraint. Add this + # constraint to the active set, and restart the calculations. + if alpha_xl <= alpha: + i_new = np.argmin(all_alpha_xl) + step[i_new] = xl[i_new] + free_xl[i_new] = False + elif alpha_xu <= alpha: + i_new = np.argmin(all_alpha_xu) + step[i_new] = xu[i_new] + free_xu[i_new] = False + else: + i_new = np.argmin(all_alpha_ub) + free_ub[i_new] = False + n_act, q = qr_tangential_byrd_omojokun( + aub, + aeq, + free_xl, + free_xu, + free_ub, + ) + sd = -q[:, n_act:] @ (q[:, n_act:].T @ grad) + k = 0 + else: + # The current iterate is on the trust-region boundary. Add all the + # active bound/linear constraints to the working set to prepare for + # the improvement of the solution, and stop the iterations. + if alpha_xl <= alpha: + i_new = _argmin(all_alpha_xl) + step[i_new] = xl[i_new] + free_xl[i_new] = False + if alpha_xu <= alpha: + i_new = _argmin(all_alpha_xu) + step[i_new] = xu[i_new] + free_xu[i_new] = False + if alpha_ub <= alpha: + i_new = _argmin(all_alpha_ub) + free_ub[i_new] = False + n_act, q = qr_tangential_byrd_omojokun( + aub, + aeq, + free_xl, + free_xu, + free_ub, + ) + boundary_reached = True + break + + # Attempt to improve the solution on the trust-region boundary. + if kwargs.get("improve_tcg", True) and boundary_reached and n_act < n: + step_base = np.copy(step) + while n_act < n: + # Check whether a substantial reduction in the objective function + # is possible, and set the search direction. + step_proj = q[:, n_act:] @ (q[:, n_act:].T @ step) + grad_proj = q[:, n_act:] @ (q[:, n_act:].T @ grad) + step_sq = step_proj @ step_proj + grad_sq = grad_proj @ grad_proj + grad_step = grad_proj @ step_proj + grad_sd = -np.sqrt(max(step_sq * grad_sq - grad_step**2.0, 0.0)) + sd = q[:, n_act:] @ ( + q[:, n_act:].T @ (grad_step * step - step_sq * grad) + ) + if grad_sd >= -1e-8 * reduct or np.any( + grad_sd >= -TINY * np.abs(sd) + ): + break + sd /= -grad_sd + + # Calculate an upper bound for the tangent of half the angle theta + # of this alternative iteration for the bound constraints. The step + # will be updated as: + # step += (cos(theta) - 1) * step_proj + sin(theta) * sd. + temp_xl = np.zeros(n) + temp_xu = np.zeros(n) + dist_xl = np.maximum(step - xl, 0.0) + dist_xu = np.maximum(xu - step, 0.0) + temp_xl[free_xl] = sd[free_xl] ** 2.0 - dist_xl[free_xl] * ( + dist_xl[free_xl] - 2.0 * step_proj[free_xl] + ) + temp_xu[free_xu] = sd[free_xu] ** 2.0 - dist_xu[free_xu] * ( + dist_xu[free_xu] + 2.0 * step_proj[free_xu] + ) + temp_xl[temp_xl > 0.0] = ( + np.sqrt(temp_xl[temp_xl > 0.0]) - sd[temp_xl > 0.0] + ) + temp_xu[temp_xu > 0.0] = ( + np.sqrt(temp_xu[temp_xu > 0.0]) + sd[temp_xu > 0.0] + ) + i_xl = temp_xl > TINY * dist_xl + i_xu = temp_xu > TINY * dist_xu + all_t_xl = np.ones(n) + all_t_xu = np.ones(n) + all_t_xl[i_xl] = np.minimum( + all_t_xl[i_xl], + dist_xl[i_xl] / temp_xl[i_xl], + ) + all_t_xu[i_xu] = np.minimum( + all_t_xu[i_xu], + dist_xu[i_xu] / temp_xu[i_xu], + ) + t_xl = np.min(all_t_xl) + t_xu = np.min(all_t_xu) + t_bd = min(t_xl, t_xu) + + # Calculate an upper bound for the tangent of half the angle theta + # of this alternative iteration for the linear constraints. + temp_ub = np.zeros_like(resid) + aub_step = aub @ step_proj + aub_sd = aub @ sd + temp_ub[free_ub] = aub_sd[free_ub] ** 2.0 - resid[free_ub] * ( + resid[free_ub] + 2.0 * aub_step[free_ub] + ) + temp_ub[temp_ub > 0.0] = ( + np.sqrt(temp_ub[temp_ub > 0.0]) + aub_sd[temp_ub > 0.0] + ) + i_ub = temp_ub > TINY * resid + all_t_ub = np.ones_like(resid) + all_t_ub[i_ub] = np.minimum( + all_t_ub[i_ub], + resid[i_ub] / temp_ub[i_ub], + ) + t_ub = np.min(all_t_ub, initial=1.0) + t_min = min(t_bd, t_ub) + + # Calculate some curvature information. + hess_step = hess_prod(step_proj) + hess_sd = hess_prod(sd) + curv_step = step_proj @ hess_step + curv_sd = sd @ hess_sd + curv_step_sd = step_proj @ hess_sd + + # For a range of equally spaced values of tan(0.5 * theta), + # calculate the reduction in the objective function that would be + # obtained by accepting the corresponding angle. + n_samples = 20 + n_samples = int((n_samples - 3) * t_min + 3) + t_samples = np.linspace(t_min / n_samples, t_min, n_samples) + sin_values = 2.0 * t_samples / (1.0 + t_samples**2.0) + all_reduct = sin_values * ( + grad_step * t_samples + - grad_sd + - sin_values + * ( + 0.5 * t_samples**2.0 * curv_step + - 2.0 * t_samples * curv_step_sd + + 0.5 * curv_sd + ) + ) + if np.all(all_reduct <= 0.0): + # No reduction in the objective function is obtained. + break + + # Accept the angle that provides the largest reduction in the + # objective function, and update the iterate. + i_max = np.argmax(all_reduct) + cos_value = (1.0 - t_samples[i_max] ** 2.0) / ( + 1.0 + t_samples[i_max] ** 2.0 + ) + step = np.clip( + step + (cos_value - 1.0) * step_proj + sin_values[i_max] * sd, + xl, + xu, + ) + grad += (cos_value - 1.0) * hess_step + sin_values[i_max] * hess_sd + resid = np.maximum( + 0.0, + resid + - (cos_value - 1.0) * aub_step + - sin_values[i_max] * aub_sd, + ) + reduct += all_reduct[i_max] + + # If the above angle is restricted by bound constraints, add them + # to the working set, and restart the alternative iteration. + # Otherwise, the calculations are terminated. + if t_min < 1.0 and i_max == n_samples - 1: + if t_xl <= t_min: + i_new = _argmin(all_t_xl) + step[i_new] = xl[i_new] + free_xl[i_new] = False + if t_xu <= t_min: + i_new = _argmin(all_t_xu) + step[i_new] = xu[i_new] + free_xl[i_new] = False + if t_ub <= t_min: + i_new = _argmin(all_t_ub) + free_ub[i_new] = False + n_act, q = qr_tangential_byrd_omojokun( + aub, + aeq, + free_xl, + free_xu, + free_ub, + ) + else: + break + + # Ensure that the alternative iteration improves the objective + # function. + if grad_orig @ step + 0.5 * step @ hess_prod( + step + ) > grad_orig @ step_base + 0.5 * step_base @ hess_prod(step_base): + step = step_base + + if debug: + tol = get_arrays_tol(xl, xu) + assert np.all(xl <= step) + assert np.all(step <= xu) + assert np.all(aub @ step <= bub + tol) + assert np.all(np.abs(aeq @ step) <= tol) + assert np.linalg.norm(step) < 1.1 * delta + return step + + +def normal_byrd_omojokun(aub, bub, aeq, beq, xl, xu, delta, debug, **kwargs): + r""" + Minimize approximately a linear constraint violation subject to bound + constraints in a trust region. + + This function solves approximately + + .. math:: + + \min_{s \in \mathbb{R}^n} \quad \frac{1}{2} \big( \lVert \max \{ + A_{\scriptscriptstyle I} s - b_{\scriptscriptstyle I}, 0 \} \rVert^2 + + \lVert A_{\scriptscriptstyle E} s - b_{\scriptscriptstyle E} \rVert^2 + \big) \quad \text{s.t.} + \quad + \left\{ \begin{array}{l} + l \le s \le u,\\ + \lVert s \rVert \le \Delta, + \end{array} \right. + + using a variation of the truncated conjugate gradient method. + + Parameters + ---------- + aub : `numpy.ndarray`, shape (m_linear_ub, n) + Matrix :math:`A_{\scriptscriptstyle I}` as shown above. + bub : `numpy.ndarray`, shape (m_linear_ub,) + Vector :math:`b_{\scriptscriptstyle I}` as shown above. + aeq : `numpy.ndarray`, shape (m_linear_eq, n) + Matrix :math:`A_{\scriptscriptstyle E}` as shown above. + beq : `numpy.ndarray`, shape (m_linear_eq,) + Vector :math:`b_{\scriptscriptstyle E}` as shown above. + xl : `numpy.ndarray`, shape (n,) + Lower bounds :math:`l` as shown above. + xu : `numpy.ndarray`, shape (n,) + Upper bounds :math:`u` as shown above. + delta : float + Trust-region radius :math:`\Delta` as shown above. + debug : bool + Whether to make debugging tests during the execution. + + Returns + ------- + `numpy.ndarray`, shape (n,) + Approximate solution :math:`s`. + + Other Parameters + ---------------- + improve_tcg : bool, optional + If True, a solution generated by the truncated conjugate gradient + method that is on the boundary of the trust region is improved by + moving around the trust-region boundary on the two-dimensional space + spanned by the solution and the gradient of the quadratic function at + the solution (default is True). + + Notes + ----- + This function implements Algorithm 6.4 of [1]_. It is assumed that the + origin is feasible with respect to the bound constraints and that `delta` + is finite and positive. + + References + ---------- + .. [1] T. M. Ragonneau. *Model-Based Derivative-Free Optimization Methods + and Software*. PhD thesis, Department of Applied Mathematics, The Hong + Kong Polytechnic University, Hong Kong, China, 2022. URL: + https://theses.lib.polyu.edu.hk/handle/200/12294. + """ + if debug: + assert isinstance(aub, np.ndarray) and aub.ndim == 2 + assert ( + isinstance(bub, np.ndarray) + and bub.ndim == 1 + and bub.size == aub.shape[0] + ) + assert ( + isinstance(aeq, np.ndarray) + and aeq.ndim == 2 + and aeq.shape[1] == aub.shape[1] + ) + assert ( + isinstance(beq, np.ndarray) + and beq.ndim == 1 + and beq.size == aeq.shape[0] + ) + assert isinstance(xl, np.ndarray) and xl.shape == (aub.shape[1],) + assert isinstance(xu, np.ndarray) and xu.shape == (aub.shape[1],) + assert isinstance(delta, float) + assert isinstance(debug, bool) + tol = get_arrays_tol(xl, xu) + assert np.all(xl <= tol) + assert np.all(xu >= -tol) + assert np.isfinite(delta) and delta > 0.0 + xl = np.minimum(xl, 0.0) + xu = np.maximum(xu, 0.0) + + # Calculate the initial active set. + m_linear_ub, n = aub.shape + grad = np.r_[aeq.T @ -beq, np.maximum(0.0, -bub)] + free_xl = (xl < 0.0) | (grad[:n] < 0.0) + free_xu = (xu > 0.0) | (grad[:n] > 0.0) + free_slack = bub < 0.0 + free_ub = (bub > 0.0) | (aub @ grad[:n] - grad[n:] > 0.0) + n_act, q = qr_normal_byrd_omojokun( + aub, + free_xl, + free_xu, + free_slack, + free_ub, + ) + + # Calculate an upper bound on the norm of the slack variables. It is not + # used in the original algorithm, but it may prevent undesired behaviors + # engendered by computer rounding errors. + delta_slack = np.sqrt(beq @ beq + grad[n:] @ grad[n:]) + + # Set the initial iterate and the initial search direction. + step = np.zeros(n) + sd = -q[:, n_act:] @ (q[:, n_act:].T @ grad) + resid = bub + grad[n:] + + k = 0 + reduct = 0.0 + boundary_reached = False + while k < n + m_linear_ub - n_act: + # Stop the computations if sd is not a descent direction. + grad_sd = grad @ sd + if grad_sd >= -10.0 * EPS * n * max(1.0, np.linalg.norm(grad)): + break + + # Set alpha_tr to the step size for the trust-region constraint. + try: + alpha_tr = _alpha_tr(step, sd[:n], delta) + except ZeroDivisionError: + alpha_tr = np.inf + + # Prevent undesired behaviors engendered by computer rounding errors by + # considering the trust-region constraint on the slack variables. + try: + alpha_tr = min(alpha_tr, _alpha_tr(grad[n:], sd[n:], delta_slack)) + except ZeroDivisionError: + pass + + # Stop the computations if a step along sd is expected to give a + # relatively small reduction in the objective function. + if -alpha_tr * grad_sd <= 1e-8 * reduct: + break + + # Set alpha_quad to the step size for the minimization problem. + hess_sd = np.r_[aeq.T @ (aeq @ sd[:n]), sd[n:]] + curv_sd = sd @ hess_sd + if curv_sd > TINY * abs(grad_sd): + alpha_quad = max(-grad_sd / curv_sd, 0.0) + else: + alpha_quad = np.inf + + # Stop the computations if the reduction in the objective function + # provided by an unconstrained step is small. + alpha = min(alpha_tr, alpha_quad) + if -alpha * (grad_sd + 0.5 * alpha * curv_sd) <= 1e-8 * reduct: + break + + # Set alpha_bd to the step size for the bound constraints. + i_xl = free_xl & (xl > -np.inf) & (sd[:n] < -TINY * np.abs(xl - step)) + i_xu = free_xu & (xu < np.inf) & (sd[:n] > TINY * np.abs(xu - step)) + i_slack = free_slack & (sd[n:] < -TINY * np.abs(grad[n:])) + all_alpha_xl = np.full_like(step, np.inf) + all_alpha_xu = np.full_like(step, np.inf) + all_alpha_slack = np.full_like(bub, np.inf) + all_alpha_xl[i_xl] = np.maximum( + (xl[i_xl] - step[i_xl]) / sd[:n][i_xl], + 0.0, + ) + all_alpha_xu[i_xu] = np.maximum( + (xu[i_xu] - step[i_xu]) / sd[:n][i_xu], + 0.0, + ) + all_alpha_slack[i_slack] = np.maximum( + -grad[n:][i_slack] / sd[n:][i_slack], + 0.0, + ) + alpha_xl = np.min(all_alpha_xl) + alpha_xu = np.min(all_alpha_xu) + alpha_slack = np.min(all_alpha_slack, initial=np.inf) + alpha_bd = min(alpha_xl, alpha_xu, alpha_slack) + + # Set alpha_ub to the step size for the linear constraints. + aub_sd = aub @ sd[:n] - sd[n:] + i_ub = free_ub & (aub_sd > TINY * np.abs(resid)) + all_alpha_ub = np.full_like(bub, np.inf) + all_alpha_ub[i_ub] = resid[i_ub] / aub_sd[i_ub] + alpha_ub = np.min(all_alpha_ub, initial=np.inf) + + # Update the iterate. + alpha = min(alpha, alpha_bd, alpha_ub) + if alpha > 0.0: + step = np.clip(step + alpha * sd[:n], xl, xu) + grad += alpha * hess_sd + resid = np.maximum(0.0, resid - alpha * aub_sd) + reduct -= alpha * (grad_sd + 0.5 * alpha * curv_sd) + + if alpha < min(alpha_tr, alpha_bd, alpha_ub): + # The current iteration is a conjugate gradient iteration. Update + # the search direction so that it is conjugate (with respect to H) + # to all the previous search directions. + grad_proj = q[:, n_act:] @ (q[:, n_act:].T @ grad) + beta = (grad_proj @ hess_sd) / curv_sd + sd = beta * sd - grad_proj + k += 1 + elif alpha < alpha_tr: + # The iterate is restricted by a bound/linear constraint. Add this + # constraint to the active set, and restart the calculations. + if alpha_xl <= alpha: + i_new = np.argmin(all_alpha_xl) + step[i_new] = xl[i_new] + free_xl[i_new] = False + elif alpha_xu <= alpha: + i_new = np.argmin(all_alpha_xu) + step[i_new] = xu[i_new] + free_xu[i_new] = False + elif alpha_slack <= alpha: + i_new = np.argmin(all_alpha_slack) + free_slack[i_new] = False + else: + i_new = np.argmin(all_alpha_ub) + free_ub[i_new] = False + n_act, q = qr_normal_byrd_omojokun( + aub, free_xl, free_xu, free_slack, free_ub + ) + sd = -q[:, n_act:] @ (q[:, n_act:].T @ grad) + k = 0 + else: + # The current iterate is on the trust-region boundary. Add all the + # active bound constraints to the working set to prepare for the + # improvement of the solution, and stop the iterations. + if alpha_xl <= alpha: + i_new = _argmin(all_alpha_xl) + step[i_new] = xl[i_new] + free_xl[i_new] = False + if alpha_xu <= alpha: + i_new = _argmin(all_alpha_xu) + step[i_new] = xu[i_new] + free_xu[i_new] = False + boundary_reached = True + break + + # Attempt to improve the solution on the trust-region boundary. + if kwargs.get("improve_tcg", True) and boundary_reached: + step_base = np.copy(step) + free_bd = free_xl & free_xu + grad = aub.T @ np.maximum(aub @ step - bub, 0.0) + aeq.T @ ( + aeq @ step - beq + ) + sd = np.zeros(n) + while np.count_nonzero(free_bd) > 0: + # Check whether a substantial reduction in the objective function + # is possible, and set the search direction. + step_sq = step[free_bd] @ step[free_bd] + grad_sq = grad[free_bd] @ grad[free_bd] + grad_step = grad[free_bd] @ step[free_bd] + grad_sd = -np.sqrt(max(step_sq * grad_sq - grad_step**2.0, 0.0)) + sd[free_bd] = grad_step * step[free_bd] - step_sq * grad[free_bd] + sd[~free_bd] = 0.0 + if grad_sd >= -1e-8 * reduct or np.any( + grad_sd >= -TINY * np.abs(sd[free_bd]) + ): + break + sd[free_bd] /= -grad_sd + + # Calculate an upper bound for the tangent of half the angle theta + # of this alternative iteration. The step will be updated as: + # step = cos(theta) * step + sin(theta) * sd. + temp_xl = np.zeros(n) + temp_xu = np.zeros(n) + temp_xl[free_bd] = ( + step[free_bd] ** 2.0 + sd[free_bd] ** 2.0 - xl[free_bd] ** 2.0 + ) + temp_xu[free_bd] = ( + step[free_bd] ** 2.0 + sd[free_bd] ** 2.0 - xu[free_bd] ** 2.0 + ) + temp_xl[temp_xl > 0.0] = ( + np.sqrt(temp_xl[temp_xl > 0.0]) - sd[temp_xl > 0.0] + ) + temp_xu[temp_xu > 0.0] = ( + np.sqrt(temp_xu[temp_xu > 0.0]) + sd[temp_xu > 0.0] + ) + dist_xl = np.maximum(step - xl, 0.0) + dist_xu = np.maximum(xu - step, 0.0) + i_xl = temp_xl > TINY * dist_xl + i_xu = temp_xu > TINY * dist_xu + all_t_xl = np.ones(n) + all_t_xu = np.ones(n) + all_t_xl[i_xl] = np.minimum( + all_t_xl[i_xl], + dist_xl[i_xl] / temp_xl[i_xl], + ) + all_t_xu[i_xu] = np.minimum( + all_t_xu[i_xu], + dist_xu[i_xu] / temp_xu[i_xu], + ) + t_xl = np.min(all_t_xl) + t_xu = np.min(all_t_xu) + t_bd = min(t_xl, t_xu) + + # For a range of equally spaced values of tan(0.5 * theta), + # calculate the reduction in the objective function that would be + # obtained by accepting the corresponding angle. + n_samples = 20 + n_samples = int((n_samples - 3) * t_bd + 3) + t_samples = np.linspace(t_bd / n_samples, t_bd, n_samples) + resid_ub = np.maximum(aub @ step - bub, 0.0) + resid_eq = aeq @ step - beq + step_proj = np.copy(step) + step_proj[~free_bd] = 0.0 + all_reduct = np.empty(n_samples) + for i in range(n_samples): + sin_value = 2.0 * t_samples[i] / (1.0 + t_samples[i] ** 2.0) + step_alt = np.clip( + step + sin_value * (sd - t_samples[i] * step_proj), + xl, + xu, + ) + resid_ub_alt = np.maximum(aub @ step_alt - bub, 0.0) + resid_eq_alt = aeq @ step_alt - beq + all_reduct[i] = 0.5 * ( + resid_ub @ resid_ub + + resid_eq @ resid_eq + - resid_ub_alt @ resid_ub_alt + - resid_eq_alt @ resid_eq_alt + ) + if np.all(all_reduct <= 0.0): + # No reduction in the objective function is obtained. + break + + # Accept the angle that provides the largest reduction in the + # objective function, and update the iterate. + i_max = np.argmax(all_reduct) + cos_value = (1.0 - t_samples[i_max] ** 2.0) / ( + 1.0 + t_samples[i_max] ** 2.0 + ) + sin_value = (2.0 * t_samples[i_max] + / (1.0 + t_samples[i_max] ** 2.0)) + step[free_bd] = cos_value * step[free_bd] + sin_value * sd[free_bd] + grad = aub.T @ np.maximum(aub @ step - bub, 0.0) + aeq.T @ ( + aeq @ step - beq + ) + reduct += all_reduct[i_max] + + # If the above angle is restricted by bound constraints, add them + # to the working set, and restart the alternative iteration. + # Otherwise, the calculations are terminated. + if t_bd < 1.0 and i_max == n_samples - 1: + if t_xl <= t_bd: + i_new = _argmin(all_t_xl) + step[i_new] = xl[i_new] + free_bd[i_new] = False + if t_xu <= t_bd: + i_new = _argmin(all_t_xu) + step[i_new] = xu[i_new] + free_bd[i_new] = False + else: + break + + # Ensure that the alternative iteration improves the objective + # function. + resid_ub = np.maximum(aub @ step - bub, 0.0) + resid_ub_base = np.maximum(aub @ step_base - bub, 0.0) + resid_eq = aeq @ step - beq + resid_eq_base = aeq @ step_base - beq + if ( + resid_ub @ resid_ub + resid_eq @ resid_eq + > resid_ub_base @ resid_ub_base + resid_eq_base @ resid_eq_base + ): + step = step_base + + if debug: + assert np.all(xl <= step) + assert np.all(step <= xu) + assert np.linalg.norm(step) < 1.1 * delta + return step + + +def qr_tangential_byrd_omojokun(aub, aeq, free_xl, free_xu, free_ub): + n = free_xl.size + identity = np.eye(n) + q, r, _ = qr( + np.block( + [ + [aeq], + [aub[~free_ub, :]], + [-identity[~free_xl, :]], + [identity[~free_xu, :]], + ] + ).T, + pivoting=True, + ) + n_act = np.count_nonzero( + np.abs(np.diag(r)) + >= 10.0 + * EPS + * n + * np.linalg.norm(r[: np.min(r.shape), : np.min(r.shape)], axis=0) + ) + return n_act, q + + +def qr_normal_byrd_omojokun(aub, free_xl, free_xu, free_slack, free_ub): + m_linear_ub, n = aub.shape + identity_n = np.eye(n) + identity_m = np.eye(m_linear_ub) + q, r, _ = qr( + np.block( + [ + [ + aub[~free_ub, :], + -identity_m[~free_ub, :], + ], + [ + np.zeros((m_linear_ub - np.count_nonzero(free_slack), n)), + -identity_m[~free_slack, :], + ], + [ + -identity_n[~free_xl, :], + np.zeros((n - np.count_nonzero(free_xl), m_linear_ub)), + ], + [ + identity_n[~free_xu, :], + np.zeros((n - np.count_nonzero(free_xu), m_linear_ub)), + ], + ] + ).T, + pivoting=True, + ) + n_act = np.count_nonzero( + np.abs(np.diag(r)) + >= 10.0 + * EPS + * (n + m_linear_ub) + * np.linalg.norm(r[: np.min(r.shape), : np.min(r.shape)], axis=0) + ) + return n_act, q + + +def _alpha_tr(step, sd, delta): + step_sd = step @ sd + sd_sq = sd @ sd + dist_tr_sq = delta**2.0 - step @ step + temp = np.sqrt(max(step_sd**2.0 + sd_sq * dist_tr_sq, 0.0)) + if step_sd <= 0.0 and sd_sq > TINY * abs(temp - step_sd): + alpha_tr = max((temp - step_sd) / sd_sq, 0.0) + elif abs(temp + step_sd) > TINY * dist_tr_sq: + alpha_tr = max(dist_tr_sq / (temp + step_sd), 0.0) + else: + raise ZeroDivisionError + return alpha_tr + + +def _argmax(x): + return np.flatnonzero(x >= np.max(x)) + + +def _argmin(x): + return np.flatnonzero(x <= np.min(x)) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/__init__.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fe6b4841ddff3a04bda5cbff744e30681b6963b9 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/__init__.py @@ -0,0 +1,18 @@ +from .exceptions import ( + MaxEvalError, + TargetSuccess, + CallbackSuccess, + FeasibleSuccess, +) +from .math import get_arrays_tol, exact_1d_array +from .versions import show_versions + +__all__ = [ + "MaxEvalError", + "TargetSuccess", + "CallbackSuccess", + "FeasibleSuccess", + "get_arrays_tol", + "exact_1d_array", + "show_versions", +] diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/__pycache__/__init__.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..69c53a809aec86433587b767b38d367891017f12 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/__pycache__/exceptions.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/__pycache__/exceptions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a149340b6aff6c93a410e52218c69ffb201c3d2b Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/__pycache__/exceptions.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/__pycache__/math.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/__pycache__/math.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26c9793d3a399f7083204034fe18b5ffefa4d11d Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/__pycache__/math.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/__pycache__/versions.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/__pycache__/versions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9183a0fc4bce223cf64cb121e2ba61b4c5b7e36d Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/__pycache__/versions.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/exceptions.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..c85094894f378a8e3934ad109ea6166e33e4366b --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/exceptions.py @@ -0,0 +1,22 @@ +class MaxEvalError(Exception): + """ + Exception raised when the maximum number of evaluations is reached. + """ + + +class TargetSuccess(Exception): + """ + Exception raised when the target value is reached. + """ + + +class CallbackSuccess(StopIteration): + """ + Exception raised when the callback function raises a ``StopIteration``. + """ + + +class FeasibleSuccess(Exception): + """ + Exception raised when a feasible point of a feasible problem is found. + """ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/math.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/math.py new file mode 100644 index 0000000000000000000000000000000000000000..1b16ae98a0df38752815f5a69d56da20f856f9f9 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/math.py @@ -0,0 +1,77 @@ +import numpy as np + + +EPS = np.finfo(float).eps + + +def get_arrays_tol(*arrays): + """ + Get a relative tolerance for a set of arrays. + + Parameters + ---------- + *arrays: tuple + Set of `numpy.ndarray` to get the tolerance for. + + Returns + ------- + float + Relative tolerance for the set of arrays. + + Raises + ------ + ValueError + If no array is provided. + """ + if len(arrays) == 0: + raise ValueError("At least one array must be provided.") + size = max(array.size for array in arrays) + weight = max( + np.max(np.abs(array[np.isfinite(array)]), initial=1.0) + for array in arrays + ) + return 10.0 * EPS * max(size, 1.0) * weight + + +def exact_1d_array(x, message): + """ + Preprocess a 1-dimensional array. + + Parameters + ---------- + x : array_like + Array to be preprocessed. + message : str + Error message if `x` cannot be interpreter as a 1-dimensional array. + + Returns + ------- + `numpy.ndarray` + Preprocessed array. + """ + x = np.atleast_1d(np.squeeze(x)).astype(float) + if x.ndim != 1: + raise ValueError(message) + return x + + +def exact_2d_array(x, message): + """ + Preprocess a 2-dimensional array. + + Parameters + ---------- + x : array_like + Array to be preprocessed. + message : str + Error message if `x` cannot be interpreter as a 2-dimensional array. + + Returns + ------- + `numpy.ndarray` + Preprocessed array. + """ + x = np.atleast_2d(x).astype(float) + if x.ndim != 2: + raise ValueError(message) + return x diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/versions.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/versions.py new file mode 100644 index 0000000000000000000000000000000000000000..94a0f8f5cef626354f40901cbe06a84287291c1c --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/versions.py @@ -0,0 +1,67 @@ +import os +import platform +import sys +from importlib.metadata import PackageNotFoundError, version + + +def _get_sys_info(): + """ + Get useful system information. + + Returns + ------- + dict + Useful system information. + """ + return { + "python": sys.version.replace(os.linesep, " "), + "executable": sys.executable, + "machine": platform.platform(), + } + + +def _get_deps_info(): + """ + Get the versions of the dependencies. + + Returns + ------- + dict + Versions of the dependencies. + """ + deps = ["cobyqa", "numpy", "scipy", "setuptools", "pip"] + deps_info = {} + for module in deps: + try: + deps_info[module] = version(module) + except PackageNotFoundError: + deps_info[module] = None + return deps_info + + +def show_versions(): + """ + Display useful system and dependencies information. + + When reporting issues, please include this information. + """ + print("System settings") + print("---------------") + sys_info = _get_sys_info() + print( + "\n".join( + f"{k:>{max(map(len, sys_info.keys())) + 1}}: {v}" + for k, v in sys_info.items() + ) + ) + + print() + print("Python dependencies") + print("-------------------") + deps_info = _get_deps_info() + print( + "\n".join( + f"{k:>{max(map(len, deps_info.keys())) + 1}}: {v}" + for k, v in deps_info.items() + ) + ) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/__init__.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c545a00b9fd63427088ac873fa3fa65678b77f71 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/__init__.py @@ -0,0 +1,114 @@ +""" +============================================== +Discrete Fourier transforms (:mod:`scipy.fft`) +============================================== + +.. currentmodule:: scipy.fft + +Fast Fourier Transforms (FFTs) +============================== + +.. autosummary:: + :toctree: generated/ + + fft - Fast (discrete) Fourier Transform (FFT) + ifft - Inverse FFT + fft2 - 2-D FFT + ifft2 - 2-D inverse FFT + fftn - N-D FFT + ifftn - N-D inverse FFT + rfft - FFT of strictly real-valued sequence + irfft - Inverse of rfft + rfft2 - 2-D FFT of real sequence + irfft2 - Inverse of rfft2 + rfftn - N-D FFT of real sequence + irfftn - Inverse of rfftn + hfft - FFT of a Hermitian sequence (real spectrum) + ihfft - Inverse of hfft + hfft2 - 2-D FFT of a Hermitian sequence + ihfft2 - Inverse of hfft2 + hfftn - N-D FFT of a Hermitian sequence + ihfftn - Inverse of hfftn + +Discrete Sin and Cosine Transforms (DST and DCT) +================================================ + +.. autosummary:: + :toctree: generated/ + + dct - Discrete cosine transform + idct - Inverse discrete cosine transform + dctn - N-D Discrete cosine transform + idctn - N-D Inverse discrete cosine transform + dst - Discrete sine transform + idst - Inverse discrete sine transform + dstn - N-D Discrete sine transform + idstn - N-D Inverse discrete sine transform + +Fast Hankel Transforms +====================== + +.. autosummary:: + :toctree: generated/ + + fht - Fast Hankel transform + ifht - Inverse of fht + +Helper functions +================ + +.. autosummary:: + :toctree: generated/ + + fftshift - Shift the zero-frequency component to the center of the spectrum + ifftshift - The inverse of `fftshift` + fftfreq - Return the Discrete Fourier Transform sample frequencies + rfftfreq - DFT sample frequencies (for usage with rfft, irfft) + fhtoffset - Compute an optimal offset for the Fast Hankel Transform + next_fast_len - Find the optimal length to zero-pad an FFT for speed + prev_fast_len - Find the maximum slice length that results in a fast FFT + set_workers - Context manager to set default number of workers + get_workers - Get the current default number of workers + +Backend control +=============== + +.. autosummary:: + :toctree: generated/ + + set_backend - Context manager to set the backend within a fixed scope + skip_backend - Context manager to skip a backend within a fixed scope + set_global_backend - Sets the global fft backend + register_backend - Register a backend for permanent use + +""" + +from ._basic import ( + fft, ifft, fft2, ifft2, fftn, ifftn, + rfft, irfft, rfft2, irfft2, rfftn, irfftn, + hfft, ihfft, hfft2, ihfft2, hfftn, ihfftn) +from ._realtransforms import dct, idct, dst, idst, dctn, idctn, dstn, idstn +from ._fftlog import fht, ifht, fhtoffset +from ._helper import ( + next_fast_len, prev_fast_len, fftfreq, + rfftfreq, fftshift, ifftshift) +from ._backend import (set_backend, skip_backend, set_global_backend, + register_backend) +from ._pocketfft.helper import set_workers, get_workers + +__all__ = [ + 'fft', 'ifft', 'fft2', 'ifft2', 'fftn', 'ifftn', + 'rfft', 'irfft', 'rfft2', 'irfft2', 'rfftn', 'irfftn', + 'hfft', 'ihfft', 'hfft2', 'ihfft2', 'hfftn', 'ihfftn', + 'fftfreq', 'rfftfreq', 'fftshift', 'ifftshift', + 'next_fast_len', 'prev_fast_len', + 'dct', 'idct', 'dst', 'idst', 'dctn', 'idctn', 'dstn', 'idstn', + 'fht', 'ifht', + 'fhtoffset', + 'set_backend', 'skip_backend', 'set_global_backend', 'register_backend', + 'get_workers', 'set_workers'] + + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/__init__.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..637e29dd3b884e5fab0092abbd5c8cd74f6ac7f3 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/__init__.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_backend.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_backend.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..266409f0e90a7e08d2668ca5b3c91ed50b558d39 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_backend.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_basic.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_basic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8798a13071a06c5c6737adaf144f2e314f2959c3 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_basic.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_basic_backend.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_basic_backend.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3f5d888f558d0360f0aa243efd7e3ad0d4b34052 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_basic_backend.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_debug_backends.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_debug_backends.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..27d098aee3e2682c7fa74e609284e125eaadf25f Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_debug_backends.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_fftlog.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_fftlog.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd04a07ab469b383ab5fb068972c0b1818d1c2e1 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_fftlog.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_fftlog_backend.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_fftlog_backend.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd2fe1dd2b63affe93bae3057994eaba55bf7076 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_fftlog_backend.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_helper.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_helper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..75be6e66fad839799245de870148d0018e54131f Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_helper.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_realtransforms.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_realtransforms.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b8745933bf5ea24f1bef6831324f007a1751e1d Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_realtransforms.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_realtransforms_backend.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_realtransforms_backend.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31d5b199d6b049b2b0a1d5b85528ccc2c433d3fa Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/__pycache__/_realtransforms_backend.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_backend.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_backend.py new file mode 100644 index 0000000000000000000000000000000000000000..c1e5cfcad5c4cbc43276e151d2da33039368630d --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_backend.py @@ -0,0 +1,196 @@ +import scipy._lib.uarray as ua +from . import _basic_backend +from . import _realtransforms_backend +from . import _fftlog_backend + + +class _ScipyBackend: + """The default backend for fft calculations + + Notes + ----- + We use the domain ``numpy.scipy`` rather than ``scipy`` because ``uarray`` + treats the domain as a hierarchy. This means the user can install a single + backend for ``numpy`` and have it implement ``numpy.scipy.fft`` as well. + """ + __ua_domain__ = "numpy.scipy.fft" + + @staticmethod + def __ua_function__(method, args, kwargs): + + fn = getattr(_basic_backend, method.__name__, None) + if fn is None: + fn = getattr(_realtransforms_backend, method.__name__, None) + if fn is None: + fn = getattr(_fftlog_backend, method.__name__, None) + if fn is None: + return NotImplemented + return fn(*args, **kwargs) + + +_named_backends = { + 'scipy': _ScipyBackend, +} + + +def _backend_from_arg(backend): + """Maps strings to known backends and validates the backend""" + + if isinstance(backend, str): + try: + backend = _named_backends[backend] + except KeyError as e: + raise ValueError(f'Unknown backend {backend}') from e + + if backend.__ua_domain__ != 'numpy.scipy.fft': + raise ValueError('Backend does not implement "numpy.scipy.fft"') + + return backend + + +def set_global_backend(backend, coerce=False, only=False, try_last=False): + """Sets the global fft backend + + This utility method replaces the default backend for permanent use. It + will be tried in the list of backends automatically, unless the + ``only`` flag is set on a backend. This will be the first tried + backend outside the :obj:`set_backend` context manager. + + Parameters + ---------- + backend : {object, 'scipy'} + The backend to use. + Can either be a ``str`` containing the name of a known backend + {'scipy'} or an object that implements the uarray protocol. + coerce : bool + Whether to coerce input types when trying this backend. + only : bool + If ``True``, no more backends will be tried if this fails. + Implied by ``coerce=True``. + try_last : bool + If ``True``, the global backend is tried after registered backends. + + Raises + ------ + ValueError: If the backend does not implement ``numpy.scipy.fft``. + + Notes + ----- + This will overwrite the previously set global backend, which, by default, is + the SciPy implementation. + + Examples + -------- + We can set the global fft backend: + + >>> from scipy.fft import fft, set_global_backend + >>> set_global_backend("scipy") # Sets global backend (default is "scipy"). + >>> fft([1]) # Calls the global backend + array([1.+0.j]) + """ + backend = _backend_from_arg(backend) + ua.set_global_backend(backend, coerce=coerce, only=only, try_last=try_last) + + +def register_backend(backend): + """ + Register a backend for permanent use. + + Registered backends have the lowest priority and will be tried after the + global backend. + + Parameters + ---------- + backend : {object, 'scipy'} + The backend to use. + Can either be a ``str`` containing the name of a known backend + {'scipy'} or an object that implements the uarray protocol. + + Raises + ------ + ValueError: If the backend does not implement ``numpy.scipy.fft``. + + Examples + -------- + We can register a new fft backend: + + >>> from scipy.fft import fft, register_backend, set_global_backend + >>> class NoopBackend: # Define an invalid Backend + ... __ua_domain__ = "numpy.scipy.fft" + ... def __ua_function__(self, func, args, kwargs): + ... return NotImplemented + >>> set_global_backend(NoopBackend()) # Set the invalid backend as global + >>> register_backend("scipy") # Register a new backend + # The registered backend is called because + # the global backend returns `NotImplemented` + >>> fft([1]) + array([1.+0.j]) + >>> set_global_backend("scipy") # Restore global backend to default + + """ + backend = _backend_from_arg(backend) + ua.register_backend(backend) + + +def set_backend(backend, coerce=False, only=False): + """Context manager to set the backend within a fixed scope. + + Upon entering the ``with`` statement, the given backend will be added to + the list of available backends with the highest priority. Upon exit, the + backend is reset to the state before entering the scope. + + Parameters + ---------- + backend : {object, 'scipy'} + The backend to use. + Can either be a ``str`` containing the name of a known backend + {'scipy'} or an object that implements the uarray protocol. + coerce : bool, optional + Whether to allow expensive conversions for the ``x`` parameter. e.g., + copying a NumPy array to the GPU for a CuPy backend. Implies ``only``. + only : bool, optional + If only is ``True`` and this backend returns ``NotImplemented``, then a + BackendNotImplemented error will be raised immediately. Ignoring any + lower priority backends. + + Examples + -------- + >>> import scipy.fft as fft + >>> with fft.set_backend('scipy', only=True): + ... fft.fft([1]) # Always calls the scipy implementation + array([1.+0.j]) + """ + backend = _backend_from_arg(backend) + return ua.set_backend(backend, coerce=coerce, only=only) + + +def skip_backend(backend): + """Context manager to skip a backend within a fixed scope. + + Within the context of a ``with`` statement, the given backend will not be + called. This covers backends registered both locally and globally. Upon + exit, the backend will again be considered. + + Parameters + ---------- + backend : {object, 'scipy'} + The backend to skip. + Can either be a ``str`` containing the name of a known backend + {'scipy'} or an object that implements the uarray protocol. + + Examples + -------- + >>> import scipy.fft as fft + >>> fft.fft([1]) # Calls default SciPy backend + array([1.+0.j]) + >>> with fft.skip_backend('scipy'): # We explicitly skip the SciPy backend + ... fft.fft([1]) # leaving no implementation available + Traceback (most recent call last): + ... + BackendNotImplementedError: No selected backends had an implementation ... + """ + backend = _backend_from_arg(backend) + return ua.skip_backend(backend) + + +set_global_backend('scipy', try_last=True) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_basic.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_basic.py new file mode 100644 index 0000000000000000000000000000000000000000..a3fc021c9ef9b7c2a40bf7b5138158df8e276ae6 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_basic.py @@ -0,0 +1,1630 @@ +from scipy._lib.uarray import generate_multimethod, Dispatchable +import numpy as np + + +def _x_replacer(args, kwargs, dispatchables): + """ + uarray argument replacer to replace the transform input array (``x``) + """ + if len(args) > 0: + return (dispatchables[0],) + args[1:], kwargs + kw = kwargs.copy() + kw['x'] = dispatchables[0] + return args, kw + + +def _dispatch(func): + """ + Function annotation that creates a uarray multimethod from the function + """ + return generate_multimethod(func, _x_replacer, domain="numpy.scipy.fft") + + +@_dispatch +def fft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *, + plan=None): + """ + Compute the 1-D discrete Fourier Transform. + + This function computes the 1-D *n*-point discrete Fourier + Transform (DFT) with the efficient Fast Fourier Transform (FFT) + algorithm [1]_. + + Parameters + ---------- + x : array_like + Input array, can be complex. + n : int, optional + Length of the transformed axis of the output. + If `n` is smaller than the length of the input, the input is cropped. + If it is larger, the input is padded with zeros. If `n` is not given, + the length of the input along the axis specified by `axis` is used. + axis : int, optional + Axis over which to compute the FFT. If not given, the last axis is + used. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode. Default is "backward", meaning no normalization on + the forward transforms and scaling by ``1/n`` on the `ifft`. + "forward" instead applies the ``1/n`` factor on the forward transform. + For ``norm="ortho"``, both directions are scaled by ``1/sqrt(n)``. + + .. versionadded:: 1.6.0 + ``norm={"forward", "backward"}`` options were added + + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + See the notes below for more details. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. See below for more + details. + plan : object, optional + This argument is reserved for passing in a precomputed plan provided + by downstream FFT vendors. It is currently not used in SciPy. + + .. versionadded:: 1.5.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + + Raises + ------ + IndexError + if `axes` is larger than the last axis of `x`. + + See Also + -------- + ifft : The inverse of `fft`. + fft2 : The 2-D FFT. + fftn : The N-D FFT. + rfftn : The N-D FFT of real input. + fftfreq : Frequency bins for given FFT parameters. + next_fast_len : Size to pad input to for most efficient transforms + + Notes + ----- + FFT (Fast Fourier Transform) refers to a way the discrete Fourier Transform + (DFT) can be calculated efficiently, by using symmetries in the calculated + terms. The symmetry is highest when `n` is a power of 2, and the transform + is therefore most efficient for these sizes. For poorly factorizable sizes, + `scipy.fft` uses Bluestein's algorithm [2]_ and so is never worse than + O(`n` log `n`). Further performance improvements may be seen by zero-padding + the input using `next_fast_len`. + + If ``x`` is a 1d array, then the `fft` is equivalent to :: + + y[k] = np.sum(x * np.exp(-2j * np.pi * k * np.arange(n)/n)) + + The frequency term ``f=k/n`` is found at ``y[k]``. At ``y[n/2]`` we reach + the Nyquist frequency and wrap around to the negative-frequency terms. So, + for an 8-point transform, the frequencies of the result are + [0, 1, 2, 3, -4, -3, -2, -1]. To rearrange the fft output so that the + zero-frequency component is centered, like [-4, -3, -2, -1, 0, 1, 2, 3], + use `fftshift`. + + Transforms can be done in single, double, or extended precision (long + double) floating point. Half precision inputs will be converted to single + precision and non-floating-point inputs will be converted to double + precision. + + If the data type of ``x`` is real, a "real FFT" algorithm is automatically + used, which roughly halves the computation time. To increase efficiency + a little further, use `rfft`, which does the same calculation, but only + outputs half of the symmetrical spectrum. If the data are both real and + symmetrical, the `dct` can again double the efficiency, by generating + half of the spectrum from half of the signal. + + When ``overwrite_x=True`` is specified, the memory referenced by ``x`` may + be used by the implementation in any way. This may include reusing the + memory for the result, but this is in no way guaranteed. You should not + rely on the contents of ``x`` after the transform as this may change in + future without warning. + + The ``workers`` argument specifies the maximum number of parallel jobs to + split the FFT computation into. This will execute independent 1-D + FFTs within ``x``. So, ``x`` must be at least 2-D and the + non-transformed axes must be large enough to split into chunks. If ``x`` is + too small, fewer jobs may be used than requested. + + References + ---------- + .. [1] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the + machine calculation of complex Fourier series," *Math. Comput.* + 19: 297-301. + .. [2] Bluestein, L., 1970, "A linear filtering approach to the + computation of discrete Fourier transform". *IEEE Transactions on + Audio and Electroacoustics.* 18 (4): 451-455. + + Examples + -------- + >>> import scipy.fft + >>> import numpy as np + >>> scipy.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8)) + array([-2.33486982e-16+1.14423775e-17j, 8.00000000e+00-1.25557246e-15j, + 2.33486982e-16+2.33486982e-16j, 0.00000000e+00+1.22464680e-16j, + -1.14423775e-17+2.33486982e-16j, 0.00000000e+00+5.20784380e-16j, + 1.14423775e-17+1.14423775e-17j, 0.00000000e+00+1.22464680e-16j]) + + In this example, real input has an FFT which is Hermitian, i.e., symmetric + in the real part and anti-symmetric in the imaginary part: + + >>> from scipy.fft import fft, fftfreq, fftshift + >>> import matplotlib.pyplot as plt + >>> t = np.arange(256) + >>> sp = fftshift(fft(np.sin(t))) + >>> freq = fftshift(fftfreq(t.shape[-1])) + >>> plt.plot(freq, sp.real, freq, sp.imag) + [, + ] + >>> plt.show() + + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def ifft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *, + plan=None): + """ + Compute the 1-D inverse discrete Fourier Transform. + + This function computes the inverse of the 1-D *n*-point + discrete Fourier transform computed by `fft`. In other words, + ``ifft(fft(x)) == x`` to within numerical accuracy. + + The input should be ordered in the same way as is returned by `fft`, + i.e., + + * ``x[0]`` should contain the zero frequency term, + * ``x[1:n//2]`` should contain the positive-frequency terms, + * ``x[n//2 + 1:]`` should contain the negative-frequency terms, in + increasing order starting from the most negative frequency. + + For an even number of input points, ``x[n//2]`` represents the sum of + the values at the positive and negative Nyquist frequencies, as the two + are aliased together. See `fft` for details. + + Parameters + ---------- + x : array_like + Input array, can be complex. + n : int, optional + Length of the transformed axis of the output. + If `n` is smaller than the length of the input, the input is cropped. + If it is larger, the input is padded with zeros. If `n` is not given, + the length of the input along the axis specified by `axis` is used. + See notes about padding issues. + axis : int, optional + Axis over which to compute the inverse DFT. If not given, the last + axis is used. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `fft`). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + See :func:`fft` for more details. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + plan : object, optional + This argument is reserved for passing in a precomputed plan provided + by downstream FFT vendors. It is currently not used in SciPy. + + .. versionadded:: 1.5.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + + Raises + ------ + IndexError + If `axes` is larger than the last axis of `x`. + + See Also + -------- + fft : The 1-D (forward) FFT, of which `ifft` is the inverse. + ifft2 : The 2-D inverse FFT. + ifftn : The N-D inverse FFT. + + Notes + ----- + If the input parameter `n` is larger than the size of the input, the input + is padded by appending zeros at the end. Even though this is the common + approach, it might lead to surprising results. If a different padding is + desired, it must be performed before calling `ifft`. + + If ``x`` is a 1-D array, then the `ifft` is equivalent to :: + + y[k] = np.sum(x * np.exp(2j * np.pi * k * np.arange(n)/n)) / len(x) + + As with `fft`, `ifft` has support for all floating point types and is + optimized for real input. + + Examples + -------- + >>> import scipy.fft + >>> import numpy as np + >>> scipy.fft.ifft([0, 4, 0, 0]) + array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j]) # may vary + + Create and plot a band-limited signal with random phases: + + >>> import matplotlib.pyplot as plt + >>> rng = np.random.default_rng() + >>> t = np.arange(400) + >>> n = np.zeros((400,), dtype=complex) + >>> n[40:60] = np.exp(1j*rng.uniform(0, 2*np.pi, (20,))) + >>> s = scipy.fft.ifft(n) + >>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--') + [, ] + >>> plt.legend(('real', 'imaginary')) + + >>> plt.show() + + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def rfft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *, + plan=None): + """ + Compute the 1-D discrete Fourier Transform for real input. + + This function computes the 1-D *n*-point discrete Fourier + Transform (DFT) of a real-valued array by means of an efficient algorithm + called the Fast Fourier Transform (FFT). + + Parameters + ---------- + x : array_like + Input array + n : int, optional + Number of points along transformation axis in the input to use. + If `n` is smaller than the length of the input, the input is cropped. + If it is larger, the input is padded with zeros. If `n` is not given, + the length of the input along the axis specified by `axis` is used. + axis : int, optional + Axis over which to compute the FFT. If not given, the last axis is + used. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `fft`). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + See :func:`fft` for more details. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + plan : object, optional + This argument is reserved for passing in a precomputed plan provided + by downstream FFT vendors. It is currently not used in SciPy. + + .. versionadded:: 1.5.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + If `n` is even, the length of the transformed axis is ``(n/2)+1``. + If `n` is odd, the length is ``(n+1)/2``. + + Raises + ------ + IndexError + If `axis` is larger than the last axis of `a`. + + See Also + -------- + irfft : The inverse of `rfft`. + fft : The 1-D FFT of general (complex) input. + fftn : The N-D FFT. + rfft2 : The 2-D FFT of real input. + rfftn : The N-D FFT of real input. + + Notes + ----- + When the DFT is computed for purely real input, the output is + Hermitian-symmetric, i.e., the negative frequency terms are just the complex + conjugates of the corresponding positive-frequency terms, and the + negative-frequency terms are therefore redundant. This function does not + compute the negative frequency terms, and the length of the transformed + axis of the output is therefore ``n//2 + 1``. + + When ``X = rfft(x)`` and fs is the sampling frequency, ``X[0]`` contains + the zero-frequency term 0*fs, which is real due to Hermitian symmetry. + + If `n` is even, ``A[-1]`` contains the term representing both positive + and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely + real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains + the largest positive frequency (fs/2*(n-1)/n), and is complex in the + general case. + + If the input `a` contains an imaginary part, it is silently discarded. + + Examples + -------- + >>> import scipy.fft + >>> scipy.fft.fft([0, 1, 0, 0]) + array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j]) # may vary + >>> scipy.fft.rfft([0, 1, 0, 0]) + array([ 1.+0.j, 0.-1.j, -1.+0.j]) # may vary + + Notice how the final element of the `fft` output is the complex conjugate + of the second element, for real input. For `rfft`, this symmetry is + exploited to compute only the non-negative frequency terms. + + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def irfft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *, + plan=None): + """ + Computes the inverse of `rfft`. + + This function computes the inverse of the 1-D *n*-point + discrete Fourier Transform of real input computed by `rfft`. + In other words, ``irfft(rfft(x), len(x)) == x`` to within numerical + accuracy. (See Notes below for why ``len(a)`` is necessary here.) + + The input is expected to be in the form returned by `rfft`, i.e., the + real zero-frequency term followed by the complex positive frequency terms + in order of increasing frequency. Since the discrete Fourier Transform of + real input is Hermitian-symmetric, the negative frequency terms are taken + to be the complex conjugates of the corresponding positive frequency terms. + + Parameters + ---------- + x : array_like + The input array. + n : int, optional + Length of the transformed axis of the output. + For `n` output points, ``n//2+1`` input points are necessary. If the + input is longer than this, it is cropped. If it is shorter than this, + it is padded with zeros. If `n` is not given, it is taken to be + ``2*(m-1)``, where ``m`` is the length of the input along the axis + specified by `axis`. + axis : int, optional + Axis over which to compute the inverse FFT. If not given, the last + axis is used. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `fft`). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + See :func:`fft` for more details. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + plan : object, optional + This argument is reserved for passing in a precomputed plan provided + by downstream FFT vendors. It is currently not used in SciPy. + + .. versionadded:: 1.5.0 + + Returns + ------- + out : ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + The length of the transformed axis is `n`, or, if `n` is not given, + ``2*(m-1)`` where ``m`` is the length of the transformed axis of the + input. To get an odd number of output points, `n` must be specified. + + Raises + ------ + IndexError + If `axis` is larger than the last axis of `x`. + + See Also + -------- + rfft : The 1-D FFT of real input, of which `irfft` is inverse. + fft : The 1-D FFT. + irfft2 : The inverse of the 2-D FFT of real input. + irfftn : The inverse of the N-D FFT of real input. + + Notes + ----- + Returns the real valued `n`-point inverse discrete Fourier transform + of `x`, where `x` contains the non-negative frequency terms of a + Hermitian-symmetric sequence. `n` is the length of the result, not the + input. + + If you specify an `n` such that `a` must be zero-padded or truncated, the + extra/removed values will be added/removed at high frequencies. One can + thus resample a series to `m` points via Fourier interpolation by: + ``a_resamp = irfft(rfft(a), m)``. + + The default value of `n` assumes an even output length. By the Hermitian + symmetry, the last imaginary component must be 0 and so is ignored. To + avoid losing information, the correct length of the real input *must* be + given. + + Examples + -------- + >>> import scipy.fft + >>> scipy.fft.ifft([1, -1j, -1, 1j]) + array([0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]) # may vary + >>> scipy.fft.irfft([1, -1j, -1]) + array([0., 1., 0., 0.]) + + Notice how the last term in the input to the ordinary `ifft` is the + complex conjugate of the second term, and the output has zero imaginary + part everywhere. When calling `irfft`, the negative frequencies are not + specified, and the output array is purely real. + + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def hfft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *, + plan=None): + """ + Compute the FFT of a signal that has Hermitian symmetry, i.e., a real + spectrum. + + Parameters + ---------- + x : array_like + The input array. + n : int, optional + Length of the transformed axis of the output. For `n` output + points, ``n//2 + 1`` input points are necessary. If the input is + longer than this, it is cropped. If it is shorter than this, it is + padded with zeros. If `n` is not given, it is taken to be ``2*(m-1)``, + where ``m`` is the length of the input along the axis specified by + `axis`. + axis : int, optional + Axis over which to compute the FFT. If not given, the last + axis is used. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `fft`). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + See `fft` for more details. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + plan : object, optional + This argument is reserved for passing in a precomputed plan provided + by downstream FFT vendors. It is currently not used in SciPy. + + .. versionadded:: 1.5.0 + + Returns + ------- + out : ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + The length of the transformed axis is `n`, or, if `n` is not given, + ``2*m - 2``, where ``m`` is the length of the transformed axis of + the input. To get an odd number of output points, `n` must be + specified, for instance, as ``2*m - 1`` in the typical case, + + Raises + ------ + IndexError + If `axis` is larger than the last axis of `a`. + + See Also + -------- + rfft : Compute the 1-D FFT for real input. + ihfft : The inverse of `hfft`. + hfftn : Compute the N-D FFT of a Hermitian signal. + + Notes + ----- + `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the + opposite case: here the signal has Hermitian symmetry in the time + domain and is real in the frequency domain. So, here, it's `hfft`, for + which you must supply the length of the result if it is to be odd. + * even: ``ihfft(hfft(a, 2*len(a) - 2) == a``, within roundoff error, + * odd: ``ihfft(hfft(a, 2*len(a) - 1) == a``, within roundoff error. + + Examples + -------- + >>> from scipy.fft import fft, hfft + >>> import numpy as np + >>> a = 2 * np.pi * np.arange(10) / 10 + >>> signal = np.cos(a) + 3j * np.sin(3 * a) + >>> fft(signal).round(10) + array([ -0.+0.j, 5.+0.j, -0.+0.j, 15.-0.j, 0.+0.j, 0.+0.j, + -0.+0.j, -15.-0.j, 0.+0.j, 5.+0.j]) + >>> hfft(signal[:6]).round(10) # Input first half of signal + array([ 0., 5., 0., 15., -0., 0., 0., -15., -0., 5.]) + >>> hfft(signal, 10) # Input entire signal and truncate + array([ 0., 5., 0., 15., -0., 0., 0., -15., -0., 5.]) + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def ihfft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *, + plan=None): + """ + Compute the inverse FFT of a signal that has Hermitian symmetry. + + Parameters + ---------- + x : array_like + Input array. + n : int, optional + Length of the inverse FFT, the number of points along + transformation axis in the input to use. If `n` is smaller than + the length of the input, the input is cropped. If it is larger, + the input is padded with zeros. If `n` is not given, the length of + the input along the axis specified by `axis` is used. + axis : int, optional + Axis over which to compute the inverse FFT. If not given, the last + axis is used. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `fft`). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + See `fft` for more details. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + plan : object, optional + This argument is reserved for passing in a precomputed plan provided + by downstream FFT vendors. It is currently not used in SciPy. + + .. versionadded:: 1.5.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + The length of the transformed axis is ``n//2 + 1``. + + See Also + -------- + hfft, irfft + + Notes + ----- + `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the + opposite case: here, the signal has Hermitian symmetry in the time + domain and is real in the frequency domain. So, here, it's `hfft`, for + which you must supply the length of the result if it is to be odd: + * even: ``ihfft(hfft(a, 2*len(a) - 2) == a``, within roundoff error, + * odd: ``ihfft(hfft(a, 2*len(a) - 1) == a``, within roundoff error. + + Examples + -------- + >>> from scipy.fft import ifft, ihfft + >>> import numpy as np + >>> spectrum = np.array([ 15, -4, 0, -1, 0, -4]) + >>> ifft(spectrum) + array([1.+0.j, 2.+0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.+0.j]) # may vary + >>> ihfft(spectrum) + array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j]) # may vary + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def fftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *, + plan=None): + """ + Compute the N-D discrete Fourier Transform. + + This function computes the N-D discrete Fourier Transform over + any number of axes in an M-D array by means of the Fast Fourier + Transform (FFT). + + Parameters + ---------- + x : array_like + Input array, can be complex. + s : sequence of ints, optional + Shape (length of each transformed axis) of the output + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). + This corresponds to ``n`` for ``fft(x, n)``. + Along any axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + if `s` is not given, the shape of the input along the axes specified + by `axes` is used. + axes : sequence of ints, optional + Axes over which to compute the FFT. If not given, the last ``len(s)`` + axes are used, or all axes if `s` is also not specified. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `fft`). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + See :func:`fft` for more details. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + plan : object, optional + This argument is reserved for passing in a precomputed plan provided + by downstream FFT vendors. It is currently not used in SciPy. + + .. versionadded:: 1.5.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or by a combination of `s` and `x`, + as explained in the parameters section above. + + Raises + ------ + ValueError + If `s` and `axes` have different length. + IndexError + If an element of `axes` is larger than the number of axes of `x`. + + See Also + -------- + ifftn : The inverse of `fftn`, the inverse N-D FFT. + fft : The 1-D FFT, with definitions and conventions used. + rfftn : The N-D FFT of real input. + fft2 : The 2-D FFT. + fftshift : Shifts zero-frequency terms to centre of array. + + Notes + ----- + The output, analogously to `fft`, contains the term for zero frequency in + the low-order corner of all axes, the positive frequency terms in the + first half of all axes, the term for the Nyquist frequency in the middle + of all axes and the negative frequency terms in the second half of all + axes, in order of decreasingly negative frequency. + + Examples + -------- + >>> import scipy.fft + >>> import numpy as np + >>> x = np.mgrid[:3, :3, :3][0] + >>> scipy.fft.fftn(x, axes=(1, 2)) + array([[[ 0.+0.j, 0.+0.j, 0.+0.j], # may vary + [ 0.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j]], + [[ 9.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j]], + [[18.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j]]]) + >>> scipy.fft.fftn(x, (2, 2), axes=(0, 1)) + array([[[ 2.+0.j, 2.+0.j, 2.+0.j], # may vary + [ 0.+0.j, 0.+0.j, 0.+0.j]], + [[-2.+0.j, -2.+0.j, -2.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j]]]) + + >>> import matplotlib.pyplot as plt + >>> rng = np.random.default_rng() + >>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12, + ... 2 * np.pi * np.arange(200) / 34) + >>> S = np.sin(X) + np.cos(Y) + rng.uniform(0, 1, X.shape) + >>> FS = scipy.fft.fftn(S) + >>> plt.imshow(np.log(np.abs(scipy.fft.fftshift(FS))**2)) + + >>> plt.show() + + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def ifftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *, + plan=None): + """ + Compute the N-D inverse discrete Fourier Transform. + + This function computes the inverse of the N-D discrete + Fourier Transform over any number of axes in an M-D array by + means of the Fast Fourier Transform (FFT). In other words, + ``ifftn(fftn(x)) == x`` to within numerical accuracy. + + The input, analogously to `ifft`, should be ordered in the same way as is + returned by `fftn`, i.e., it should have the term for zero frequency + in all axes in the low-order corner, the positive frequency terms in the + first half of all axes, the term for the Nyquist frequency in the middle + of all axes and the negative frequency terms in the second half of all + axes, in order of decreasingly negative frequency. + + Parameters + ---------- + x : array_like + Input array, can be complex. + s : sequence of ints, optional + Shape (length of each transformed axis) of the output + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). + This corresponds to ``n`` for ``ifft(x, n)``. + Along any axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + if `s` is not given, the shape of the input along the axes specified + by `axes` is used. See notes for issue on `ifft` zero padding. + axes : sequence of ints, optional + Axes over which to compute the IFFT. If not given, the last ``len(s)`` + axes are used, or all axes if `s` is also not specified. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `fft`). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + See :func:`fft` for more details. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + plan : object, optional + This argument is reserved for passing in a precomputed plan provided + by downstream FFT vendors. It is currently not used in SciPy. + + .. versionadded:: 1.5.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or by a combination of `s` or `x`, + as explained in the parameters section above. + + Raises + ------ + ValueError + If `s` and `axes` have different length. + IndexError + If an element of `axes` is larger than the number of axes of `x`. + + See Also + -------- + fftn : The forward N-D FFT, of which `ifftn` is the inverse. + ifft : The 1-D inverse FFT. + ifft2 : The 2-D inverse FFT. + ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning + of array. + + Notes + ----- + Zero-padding, analogously with `ifft`, is performed by appending zeros to + the input along the specified dimension. Although this is the common + approach, it might lead to surprising results. If another form of zero + padding is desired, it must be performed before `ifftn` is called. + + Examples + -------- + >>> import scipy.fft + >>> import numpy as np + >>> x = np.eye(4) + >>> scipy.fft.ifftn(scipy.fft.fftn(x, axes=(0,)), axes=(1,)) + array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary + [0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j], + [0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j], + [0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]]) + + + Create and plot an image with band-limited frequency content: + + >>> import matplotlib.pyplot as plt + >>> rng = np.random.default_rng() + >>> n = np.zeros((200,200), dtype=complex) + >>> n[60:80, 20:40] = np.exp(1j*rng.uniform(0, 2*np.pi, (20, 20))) + >>> im = scipy.fft.ifftn(n).real + >>> plt.imshow(im) + + >>> plt.show() + + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def fft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *, + plan=None): + """ + Compute the 2-D discrete Fourier Transform + + This function computes the N-D discrete Fourier Transform + over any axes in an M-D array by means of the + Fast Fourier Transform (FFT). By default, the transform is computed over + the last two axes of the input array, i.e., a 2-dimensional FFT. + + Parameters + ---------- + x : array_like + Input array, can be complex + s : sequence of ints, optional + Shape (length of each transformed axis) of the output + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). + This corresponds to ``n`` for ``fft(x, n)``. + Along each axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + if `s` is not given, the shape of the input along the axes specified + by `axes` is used. + axes : sequence of ints, optional + Axes over which to compute the FFT. If not given, the last two axes are + used. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `fft`). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + See :func:`fft` for more details. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + plan : object, optional + This argument is reserved for passing in a precomputed plan provided + by downstream FFT vendors. It is currently not used in SciPy. + + .. versionadded:: 1.5.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or the last two axes if `axes` is not given. + + Raises + ------ + ValueError + If `s` and `axes` have different length, or `axes` not given and + ``len(s) != 2``. + IndexError + If an element of `axes` is larger than the number of axes of `x`. + + See Also + -------- + ifft2 : The inverse 2-D FFT. + fft : The 1-D FFT. + fftn : The N-D FFT. + fftshift : Shifts zero-frequency terms to the center of the array. + For 2-D input, swaps first and third quadrants, and second + and fourth quadrants. + + Notes + ----- + `fft2` is just `fftn` with a different default for `axes`. + + The output, analogously to `fft`, contains the term for zero frequency in + the low-order corner of the transformed axes, the positive frequency terms + in the first half of these axes, the term for the Nyquist frequency in the + middle of the axes and the negative frequency terms in the second half of + the axes, in order of decreasingly negative frequency. + + See `fftn` for details and a plotting example, and `fft` for + definitions and conventions used. + + + Examples + -------- + >>> import scipy.fft + >>> import numpy as np + >>> x = np.mgrid[:5, :5][0] + >>> scipy.fft.fft2(x) + array([[ 50. +0.j , 0. +0.j , 0. +0.j , # may vary + 0. +0.j , 0. +0.j ], + [-12.5+17.20477401j, 0. +0.j , 0. +0.j , + 0. +0.j , 0. +0.j ], + [-12.5 +4.0614962j , 0. +0.j , 0. +0.j , + 0. +0.j , 0. +0.j ], + [-12.5 -4.0614962j , 0. +0.j , 0. +0.j , + 0. +0.j , 0. +0.j ], + [-12.5-17.20477401j, 0. +0.j , 0. +0.j , + 0. +0.j , 0. +0.j ]]) + + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def ifft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *, + plan=None): + """ + Compute the 2-D inverse discrete Fourier Transform. + + This function computes the inverse of the 2-D discrete Fourier + Transform over any number of axes in an M-D array by means of + the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(x)) == x`` + to within numerical accuracy. By default, the inverse transform is + computed over the last two axes of the input array. + + The input, analogously to `ifft`, should be ordered in the same way as is + returned by `fft2`, i.e., it should have the term for zero frequency + in the low-order corner of the two axes, the positive frequency terms in + the first half of these axes, the term for the Nyquist frequency in the + middle of the axes and the negative frequency terms in the second half of + both axes, in order of decreasingly negative frequency. + + Parameters + ---------- + x : array_like + Input array, can be complex. + s : sequence of ints, optional + Shape (length of each axis) of the output (``s[0]`` refers to axis 0, + ``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``. + Along each axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + if `s` is not given, the shape of the input along the axes specified + by `axes` is used. See notes for issue on `ifft` zero padding. + axes : sequence of ints, optional + Axes over which to compute the FFT. If not given, the last two + axes are used. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `fft`). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + See :func:`fft` for more details. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + plan : object, optional + This argument is reserved for passing in a precomputed plan provided + by downstream FFT vendors. It is currently not used in SciPy. + + .. versionadded:: 1.5.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or the last two axes if `axes` is not given. + + Raises + ------ + ValueError + If `s` and `axes` have different length, or `axes` not given and + ``len(s) != 2``. + IndexError + If an element of `axes` is larger than the number of axes of `x`. + + See Also + -------- + fft2 : The forward 2-D FFT, of which `ifft2` is the inverse. + ifftn : The inverse of the N-D FFT. + fft : The 1-D FFT. + ifft : The 1-D inverse FFT. + + Notes + ----- + `ifft2` is just `ifftn` with a different default for `axes`. + + See `ifftn` for details and a plotting example, and `fft` for + definition and conventions used. + + Zero-padding, analogously with `ifft`, is performed by appending zeros to + the input along the specified dimension. Although this is the common + approach, it might lead to surprising results. If another form of zero + padding is desired, it must be performed before `ifft2` is called. + + Examples + -------- + >>> import scipy.fft + >>> import numpy as np + >>> x = 4 * np.eye(4) + >>> scipy.fft.ifft2(x) + array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary + [0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j], + [0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j], + [0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]]) + + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def rfftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *, + plan=None): + """ + Compute the N-D discrete Fourier Transform for real input. + + This function computes the N-D discrete Fourier Transform over + any number of axes in an M-D real array by means of the Fast + Fourier Transform (FFT). By default, all axes are transformed, with the + real transform performed over the last axis, while the remaining + transforms are complex. + + Parameters + ---------- + x : array_like + Input array, taken to be real. + s : sequence of ints, optional + Shape (length along each transformed axis) to use from the input. + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). + The final element of `s` corresponds to `n` for ``rfft(x, n)``, while + for the remaining axes, it corresponds to `n` for ``fft(x, n)``. + Along any axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + if `s` is not given, the shape of the input along the axes specified + by `axes` is used. + axes : sequence of ints, optional + Axes over which to compute the FFT. If not given, the last ``len(s)`` + axes are used, or all axes if `s` is also not specified. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `fft`). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + See :func:`fft` for more details. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + plan : object, optional + This argument is reserved for passing in a precomputed plan provided + by downstream FFT vendors. It is currently not used in SciPy. + + .. versionadded:: 1.5.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or by a combination of `s` and `x`, + as explained in the parameters section above. + The length of the last axis transformed will be ``s[-1]//2+1``, + while the remaining transformed axes will have lengths according to + `s`, or unchanged from the input. + + Raises + ------ + ValueError + If `s` and `axes` have different length. + IndexError + If an element of `axes` is larger than the number of axes of `x`. + + See Also + -------- + irfftn : The inverse of `rfftn`, i.e., the inverse of the N-D FFT + of real input. + fft : The 1-D FFT, with definitions and conventions used. + rfft : The 1-D FFT of real input. + fftn : The N-D FFT. + rfft2 : The 2-D FFT of real input. + + Notes + ----- + The transform for real input is performed over the last transformation + axis, as by `rfft`, then the transform over the remaining axes is + performed as by `fftn`. The order of the output is as for `rfft` for the + final transformation axis, and as for `fftn` for the remaining + transformation axes. + + See `fft` for details, definitions and conventions used. + + Examples + -------- + >>> import scipy.fft + >>> import numpy as np + >>> x = np.ones((2, 2, 2)) + >>> scipy.fft.rfftn(x) + array([[[8.+0.j, 0.+0.j], # may vary + [0.+0.j, 0.+0.j]], + [[0.+0.j, 0.+0.j], + [0.+0.j, 0.+0.j]]]) + + >>> scipy.fft.rfftn(x, axes=(2, 0)) + array([[[4.+0.j, 0.+0.j], # may vary + [4.+0.j, 0.+0.j]], + [[0.+0.j, 0.+0.j], + [0.+0.j, 0.+0.j]]]) + + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def rfft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *, + plan=None): + """ + Compute the 2-D FFT of a real array. + + Parameters + ---------- + x : array + Input array, taken to be real. + s : sequence of ints, optional + Shape of the FFT. + axes : sequence of ints, optional + Axes over which to compute the FFT. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `fft`). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + See :func:`fft` for more details. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + plan : object, optional + This argument is reserved for passing in a precomputed plan provided + by downstream FFT vendors. It is currently not used in SciPy. + + .. versionadded:: 1.5.0 + + Returns + ------- + out : ndarray + The result of the real 2-D FFT. + + See Also + -------- + irfft2 : The inverse of the 2-D FFT of real input. + rfft : The 1-D FFT of real input. + rfftn : Compute the N-D discrete Fourier Transform for real + input. + + Notes + ----- + This is really just `rfftn` with different default behavior. + For more details see `rfftn`. + + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def irfftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *, + plan=None): + """ + Computes the inverse of `rfftn` + + This function computes the inverse of the N-D discrete + Fourier Transform for real input over any number of axes in an + M-D array by means of the Fast Fourier Transform (FFT). In + other words, ``irfftn(rfftn(x), x.shape) == x`` to within numerical + accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`, + and for the same reason.) + + The input should be ordered in the same way as is returned by `rfftn`, + i.e., as for `irfft` for the final transformation axis, and as for `ifftn` + along all the other axes. + + Parameters + ---------- + x : array_like + Input array. + s : sequence of ints, optional + Shape (length of each transformed axis) of the output + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the + number of input points used along this axis, except for the last axis, + where ``s[-1]//2+1`` points of the input are used. + Along any axis, if the shape indicated by `s` is smaller than that of + the input, the input is cropped. If it is larger, the input is padded + with zeros. If `s` is not given, the shape of the input along the axes + specified by axes is used. Except for the last axis which is taken to be + ``2*(m-1)``, where ``m`` is the length of the input along that axis. + axes : sequence of ints, optional + Axes over which to compute the inverse FFT. If not given, the last + `len(s)` axes are used, or all axes if `s` is also not specified. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `fft`). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + See :func:`fft` for more details. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + plan : object, optional + This argument is reserved for passing in a precomputed plan provided + by downstream FFT vendors. It is currently not used in SciPy. + + .. versionadded:: 1.5.0 + + Returns + ------- + out : ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or by a combination of `s` or `x`, + as explained in the parameters section above. + The length of each transformed axis is as given by the corresponding + element of `s`, or the length of the input in every axis except for the + last one if `s` is not given. In the final transformed axis the length + of the output when `s` is not given is ``2*(m-1)``, where ``m`` is the + length of the final transformed axis of the input. To get an odd + number of output points in the final axis, `s` must be specified. + + Raises + ------ + ValueError + If `s` and `axes` have different length. + IndexError + If an element of `axes` is larger than the number of axes of `x`. + + See Also + -------- + rfftn : The forward N-D FFT of real input, + of which `ifftn` is the inverse. + fft : The 1-D FFT, with definitions and conventions used. + irfft : The inverse of the 1-D FFT of real input. + irfft2 : The inverse of the 2-D FFT of real input. + + Notes + ----- + See `fft` for definitions and conventions used. + + See `rfft` for definitions and conventions used for real input. + + The default value of `s` assumes an even output length in the final + transformation axis. When performing the final complex to real + transformation, the Hermitian symmetry requires that the last imaginary + component along that axis must be 0 and so it is ignored. To avoid losing + information, the correct length of the real input *must* be given. + + Examples + -------- + >>> import scipy.fft + >>> import numpy as np + >>> x = np.zeros((3, 2, 2)) + >>> x[0, 0, 0] = 3 * 2 * 2 + >>> scipy.fft.irfftn(x) + array([[[1., 1.], + [1., 1.]], + [[1., 1.], + [1., 1.]], + [[1., 1.], + [1., 1.]]]) + + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def irfft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *, + plan=None): + """ + Computes the inverse of `rfft2` + + Parameters + ---------- + x : array_like + The input array + s : sequence of ints, optional + Shape of the real output to the inverse FFT. + axes : sequence of ints, optional + The axes over which to compute the inverse fft. + Default is the last two axes. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `fft`). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + See :func:`fft` for more details. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + plan : object, optional + This argument is reserved for passing in a precomputed plan provided + by downstream FFT vendors. It is currently not used in SciPy. + + .. versionadded:: 1.5.0 + + Returns + ------- + out : ndarray + The result of the inverse real 2-D FFT. + + See Also + -------- + rfft2 : The 2-D FFT of real input. + irfft : The inverse of the 1-D FFT of real input. + irfftn : The inverse of the N-D FFT of real input. + + Notes + ----- + This is really `irfftn` with different defaults. + For more details see `irfftn`. + + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def hfftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *, + plan=None): + """ + Compute the N-D FFT of Hermitian symmetric complex input, i.e., a + signal with a real spectrum. + + This function computes the N-D discrete Fourier Transform for a + Hermitian symmetric complex input over any number of axes in an + M-D array by means of the Fast Fourier Transform (FFT). In other + words, ``ihfftn(hfftn(x, s)) == x`` to within numerical accuracy. (``s`` + here is ``x.shape`` with ``s[-1] = x.shape[-1] * 2 - 1``, this is necessary + for the same reason ``x.shape`` would be necessary for `irfft`.) + + Parameters + ---------- + x : array_like + Input array. + s : sequence of ints, optional + Shape (length of each transformed axis) of the output + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the + number of input points used along this axis, except for the last axis, + where ``s[-1]//2+1`` points of the input are used. + Along any axis, if the shape indicated by `s` is smaller than that of + the input, the input is cropped. If it is larger, the input is padded + with zeros. If `s` is not given, the shape of the input along the axes + specified by axes is used. Except for the last axis which is taken to be + ``2*(m-1)`` where ``m`` is the length of the input along that axis. + axes : sequence of ints, optional + Axes over which to compute the inverse FFT. If not given, the last + `len(s)` axes are used, or all axes if `s` is also not specified. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `fft`). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + See :func:`fft` for more details. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + plan : object, optional + This argument is reserved for passing in a precomputed plan provided + by downstream FFT vendors. It is currently not used in SciPy. + + .. versionadded:: 1.5.0 + + Returns + ------- + out : ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or by a combination of `s` or `x`, + as explained in the parameters section above. + The length of each transformed axis is as given by the corresponding + element of `s`, or the length of the input in every axis except for the + last one if `s` is not given. In the final transformed axis the length + of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the + length of the final transformed axis of the input. To get an odd + number of output points in the final axis, `s` must be specified. + + Raises + ------ + ValueError + If `s` and `axes` have different length. + IndexError + If an element of `axes` is larger than the number of axes of `x`. + + See Also + -------- + ihfftn : The inverse N-D FFT with real spectrum. Inverse of `hfftn`. + fft : The 1-D FFT, with definitions and conventions used. + rfft : Forward FFT of real input. + + Notes + ----- + For a 1-D signal ``x`` to have a real spectrum, it must satisfy + the Hermitian property:: + + x[i] == np.conj(x[-i]) for all i + + This generalizes into higher dimensions by reflecting over each axis in + turn:: + + x[i, j, k, ...] == np.conj(x[-i, -j, -k, ...]) for all i, j, k, ... + + This should not be confused with a Hermitian matrix, for which the + transpose is its own conjugate:: + + x[i, j] == np.conj(x[j, i]) for all i, j + + + The default value of `s` assumes an even output length in the final + transformation axis. When performing the final complex to real + transformation, the Hermitian symmetry requires that the last imaginary + component along that axis must be 0 and so it is ignored. To avoid losing + information, the correct length of the real input *must* be given. + + Examples + -------- + >>> import scipy.fft + >>> import numpy as np + >>> x = np.ones((3, 2, 2)) + >>> scipy.fft.hfftn(x) + array([[[12., 0.], + [ 0., 0.]], + [[ 0., 0.], + [ 0., 0.]], + [[ 0., 0.], + [ 0., 0.]]]) + + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def hfft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *, + plan=None): + """ + Compute the 2-D FFT of a Hermitian complex array. + + Parameters + ---------- + x : array + Input array, taken to be Hermitian complex. + s : sequence of ints, optional + Shape of the real output. + axes : sequence of ints, optional + Axes over which to compute the FFT. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `fft`). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + See `fft` for more details. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + plan : object, optional + This argument is reserved for passing in a precomputed plan provided + by downstream FFT vendors. It is currently not used in SciPy. + + .. versionadded:: 1.5.0 + + Returns + ------- + out : ndarray + The real result of the 2-D Hermitian complex real FFT. + + See Also + -------- + hfftn : Compute the N-D discrete Fourier Transform for Hermitian + complex input. + + Notes + ----- + This is really just `hfftn` with different default behavior. + For more details see `hfftn`. + + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def ihfftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *, + plan=None): + """ + Compute the N-D inverse discrete Fourier Transform for a real + spectrum. + + This function computes the N-D inverse discrete Fourier Transform + over any number of axes in an M-D real array by means of the Fast + Fourier Transform (FFT). By default, all axes are transformed, with the + real transform performed over the last axis, while the remaining transforms + are complex. + + Parameters + ---------- + x : array_like + Input array, taken to be real. + s : sequence of ints, optional + Shape (length along each transformed axis) to use from the input. + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). + Along any axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + if `s` is not given, the shape of the input along the axes specified + by `axes` is used. + axes : sequence of ints, optional + Axes over which to compute the FFT. If not given, the last ``len(s)`` + axes are used, or all axes if `s` is also not specified. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `fft`). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + See :func:`fft` for more details. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + plan : object, optional + This argument is reserved for passing in a precomputed plan provided + by downstream FFT vendors. It is currently not used in SciPy. + + .. versionadded:: 1.5.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or by a combination of `s` and `x`, + as explained in the parameters section above. + The length of the last axis transformed will be ``s[-1]//2+1``, + while the remaining transformed axes will have lengths according to + `s`, or unchanged from the input. + + Raises + ------ + ValueError + If `s` and `axes` have different length. + IndexError + If an element of `axes` is larger than the number of axes of `x`. + + See Also + -------- + hfftn : The forward N-D FFT of Hermitian input. + hfft : The 1-D FFT of Hermitian input. + fft : The 1-D FFT, with definitions and conventions used. + fftn : The N-D FFT. + hfft2 : The 2-D FFT of Hermitian input. + + Notes + ----- + The transform for real input is performed over the last transformation + axis, as by `ihfft`, then the transform over the remaining axes is + performed as by `ifftn`. The order of the output is the positive part of + the Hermitian output signal, in the same format as `rfft`. + + Examples + -------- + >>> import scipy.fft + >>> import numpy as np + >>> x = np.ones((2, 2, 2)) + >>> scipy.fft.ihfftn(x) + array([[[1.+0.j, 0.+0.j], # may vary + [0.+0.j, 0.+0.j]], + [[0.+0.j, 0.+0.j], + [0.+0.j, 0.+0.j]]]) + >>> scipy.fft.ihfftn(x, axes=(2, 0)) + array([[[1.+0.j, 0.+0.j], # may vary + [1.+0.j, 0.+0.j]], + [[0.+0.j, 0.+0.j], + [0.+0.j, 0.+0.j]]]) + + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def ihfft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *, + plan=None): + """ + Compute the 2-D inverse FFT of a real spectrum. + + Parameters + ---------- + x : array_like + The input array + s : sequence of ints, optional + Shape of the real input to the inverse FFT. + axes : sequence of ints, optional + The axes over which to compute the inverse fft. + Default is the last two axes. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `fft`). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + See :func:`fft` for more details. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + plan : object, optional + This argument is reserved for passing in a precomputed plan provided + by downstream FFT vendors. It is currently not used in SciPy. + + .. versionadded:: 1.5.0 + + Returns + ------- + out : ndarray + The result of the inverse real 2-D FFT. + + See Also + -------- + ihfftn : Compute the inverse of the N-D FFT of Hermitian input. + + Notes + ----- + This is really `ihfftn` with different defaults. + For more details see `ihfftn`. + + """ + return (Dispatchable(x, np.ndarray),) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_basic_backend.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_basic_backend.py new file mode 100644 index 0000000000000000000000000000000000000000..b21efa56bc5bdeba9b2f96542ace027373e76c9b --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_basic_backend.py @@ -0,0 +1,180 @@ +from scipy._lib._array_api import ( + array_namespace, is_numpy, xp_unsupported_param_msg, is_complex +) +from . import _pocketfft +import numpy as np + + +def _validate_fft_args(workers, plan, norm): + if workers is not None: + raise ValueError(xp_unsupported_param_msg("workers")) + if plan is not None: + raise ValueError(xp_unsupported_param_msg("plan")) + if norm is None: + norm = 'backward' + return norm + + +# pocketfft is used whenever SCIPY_ARRAY_API is not set, +# or x is a NumPy array or array-like. +# When SCIPY_ARRAY_API is set, we try to use xp.fft for CuPy arrays, +# PyTorch arrays and other array API standard supporting objects. +# If xp.fft does not exist, we attempt to convert to np and back to use pocketfft. + +def _execute_1D(func_str, pocketfft_func, x, n, axis, norm, overwrite_x, workers, plan): + xp = array_namespace(x) + + if is_numpy(xp): + x = np.asarray(x) + return pocketfft_func(x, n=n, axis=axis, norm=norm, + overwrite_x=overwrite_x, workers=workers, plan=plan) + + norm = _validate_fft_args(workers, plan, norm) + if hasattr(xp, 'fft'): + xp_func = getattr(xp.fft, func_str) + return xp_func(x, n=n, axis=axis, norm=norm) + + x = np.asarray(x) + y = pocketfft_func(x, n=n, axis=axis, norm=norm) + return xp.asarray(y) + + +def _execute_nD(func_str, pocketfft_func, x, s, axes, norm, overwrite_x, workers, plan): + xp = array_namespace(x) + + if is_numpy(xp): + x = np.asarray(x) + return pocketfft_func(x, s=s, axes=axes, norm=norm, + overwrite_x=overwrite_x, workers=workers, plan=plan) + + norm = _validate_fft_args(workers, plan, norm) + if hasattr(xp, 'fft'): + xp_func = getattr(xp.fft, func_str) + return xp_func(x, s=s, axes=axes, norm=norm) + + x = np.asarray(x) + y = pocketfft_func(x, s=s, axes=axes, norm=norm) + return xp.asarray(y) + + +def fft(x, n=None, axis=-1, norm=None, + overwrite_x=False, workers=None, *, plan=None): + return _execute_1D('fft', _pocketfft.fft, x, n=n, axis=axis, norm=norm, + overwrite_x=overwrite_x, workers=workers, plan=plan) + + +def ifft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *, + plan=None): + return _execute_1D('ifft', _pocketfft.ifft, x, n=n, axis=axis, norm=norm, + overwrite_x=overwrite_x, workers=workers, plan=plan) + + +def rfft(x, n=None, axis=-1, norm=None, + overwrite_x=False, workers=None, *, plan=None): + return _execute_1D('rfft', _pocketfft.rfft, x, n=n, axis=axis, norm=norm, + overwrite_x=overwrite_x, workers=workers, plan=plan) + + +def irfft(x, n=None, axis=-1, norm=None, + overwrite_x=False, workers=None, *, plan=None): + return _execute_1D('irfft', _pocketfft.irfft, x, n=n, axis=axis, norm=norm, + overwrite_x=overwrite_x, workers=workers, plan=plan) + + +def hfft(x, n=None, axis=-1, norm=None, + overwrite_x=False, workers=None, *, plan=None): + return _execute_1D('hfft', _pocketfft.hfft, x, n=n, axis=axis, norm=norm, + overwrite_x=overwrite_x, workers=workers, plan=plan) + + +def ihfft(x, n=None, axis=-1, norm=None, + overwrite_x=False, workers=None, *, plan=None): + return _execute_1D('ihfft', _pocketfft.ihfft, x, n=n, axis=axis, norm=norm, + overwrite_x=overwrite_x, workers=workers, plan=plan) + + +def fftn(x, s=None, axes=None, norm=None, + overwrite_x=False, workers=None, *, plan=None): + return _execute_nD('fftn', _pocketfft.fftn, x, s=s, axes=axes, norm=norm, + overwrite_x=overwrite_x, workers=workers, plan=plan) + + + +def ifftn(x, s=None, axes=None, norm=None, + overwrite_x=False, workers=None, *, plan=None): + return _execute_nD('ifftn', _pocketfft.ifftn, x, s=s, axes=axes, norm=norm, + overwrite_x=overwrite_x, workers=workers, plan=plan) + + +def fft2(x, s=None, axes=(-2, -1), norm=None, + overwrite_x=False, workers=None, *, plan=None): + return fftn(x, s, axes, norm, overwrite_x, workers, plan=plan) + + +def ifft2(x, s=None, axes=(-2, -1), norm=None, + overwrite_x=False, workers=None, *, plan=None): + return ifftn(x, s, axes, norm, overwrite_x, workers, plan=plan) + + +def rfftn(x, s=None, axes=None, norm=None, + overwrite_x=False, workers=None, *, plan=None): + return _execute_nD('rfftn', _pocketfft.rfftn, x, s=s, axes=axes, norm=norm, + overwrite_x=overwrite_x, workers=workers, plan=plan) + + +def rfft2(x, s=None, axes=(-2, -1), norm=None, + overwrite_x=False, workers=None, *, plan=None): + return rfftn(x, s, axes, norm, overwrite_x, workers, plan=plan) + + +def irfftn(x, s=None, axes=None, norm=None, + overwrite_x=False, workers=None, *, plan=None): + return _execute_nD('irfftn', _pocketfft.irfftn, x, s=s, axes=axes, norm=norm, + overwrite_x=overwrite_x, workers=workers, plan=plan) + + +def irfft2(x, s=None, axes=(-2, -1), norm=None, + overwrite_x=False, workers=None, *, plan=None): + return irfftn(x, s, axes, norm, overwrite_x, workers, plan=plan) + + +def _swap_direction(norm): + if norm in (None, 'backward'): + norm = 'forward' + elif norm == 'forward': + norm = 'backward' + elif norm != 'ortho': + raise ValueError('Invalid norm value %s; should be "backward", ' + '"ortho", or "forward".' % norm) + return norm + + +def hfftn(x, s=None, axes=None, norm=None, + overwrite_x=False, workers=None, *, plan=None): + xp = array_namespace(x) + if is_numpy(xp): + x = np.asarray(x) + return _pocketfft.hfftn(x, s, axes, norm, overwrite_x, workers, plan=plan) + if is_complex(x, xp): + x = xp.conj(x) + return irfftn(x, s, axes, _swap_direction(norm), + overwrite_x, workers, plan=plan) + + +def hfft2(x, s=None, axes=(-2, -1), norm=None, + overwrite_x=False, workers=None, *, plan=None): + return hfftn(x, s, axes, norm, overwrite_x, workers, plan=plan) + + +def ihfftn(x, s=None, axes=None, norm=None, + overwrite_x=False, workers=None, *, plan=None): + xp = array_namespace(x) + if is_numpy(xp): + x = np.asarray(x) + return _pocketfft.ihfftn(x, s, axes, norm, overwrite_x, workers, plan=plan) + return xp.conj(rfftn(x, s, axes, _swap_direction(norm), + overwrite_x, workers, plan=plan)) + +def ihfft2(x, s=None, axes=(-2, -1), norm=None, + overwrite_x=False, workers=None, *, plan=None): + return ihfftn(x, s, axes, norm, overwrite_x, workers, plan=plan) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_debug_backends.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_debug_backends.py new file mode 100644 index 0000000000000000000000000000000000000000..c9647c5d6ceddc73b97d95f562662ada02c1ae74 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_debug_backends.py @@ -0,0 +1,22 @@ +import numpy as np + +class NumPyBackend: + """Backend that uses numpy.fft""" + __ua_domain__ = "numpy.scipy.fft" + + @staticmethod + def __ua_function__(method, args, kwargs): + kwargs.pop("overwrite_x", None) + + fn = getattr(np.fft, method.__name__, None) + return (NotImplemented if fn is None + else fn(*args, **kwargs)) + + +class EchoBackend: + """Backend that just prints the __ua_function__ arguments""" + __ua_domain__ = "numpy.scipy.fft" + + @staticmethod + def __ua_function__(method, args, kwargs): + print(method, args, kwargs, sep='\n') diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_fftlog.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_fftlog.py new file mode 100644 index 0000000000000000000000000000000000000000..8960242989c7c1d062af4fe1960c2384abaab94f --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_fftlog.py @@ -0,0 +1,223 @@ +"""Fast Hankel transforms using the FFTLog algorithm. + +The implementation closely follows the Fortran code of Hamilton (2000). + +added: 14/11/2020 Nicolas Tessore +""" + +from ._basic import _dispatch +from scipy._lib.uarray import Dispatchable +from ._fftlog_backend import fhtoffset +import numpy as np + +__all__ = ['fht', 'ifht', 'fhtoffset'] + + +@_dispatch +def fht(a, dln, mu, offset=0.0, bias=0.0): + r'''Compute the fast Hankel transform. + + Computes the discrete Hankel transform of a logarithmically spaced periodic + sequence using the FFTLog algorithm [1]_, [2]_. + + Parameters + ---------- + a : array_like (..., n) + Real periodic input array, uniformly logarithmically spaced. For + multidimensional input, the transform is performed over the last axis. + dln : float + Uniform logarithmic spacing of the input array. + mu : float + Order of the Hankel transform, any positive or negative real number. + offset : float, optional + Offset of the uniform logarithmic spacing of the output array. + bias : float, optional + Exponent of power law bias, any positive or negative real number. + + Returns + ------- + A : array_like (..., n) + The transformed output array, which is real, periodic, uniformly + logarithmically spaced, and of the same shape as the input array. + + See Also + -------- + ifht : The inverse of `fht`. + fhtoffset : Return an optimal offset for `fht`. + + Notes + ----- + This function computes a discrete version of the Hankel transform + + .. math:: + + A(k) = \int_{0}^{\infty} \! a(r) \, J_\mu(kr) \, k \, dr \;, + + where :math:`J_\mu` is the Bessel function of order :math:`\mu`. The index + :math:`\mu` may be any real number, positive or negative. Note that the + numerical Hankel transform uses an integrand of :math:`k \, dr`, while the + mathematical Hankel transform is commonly defined using :math:`r \, dr`. + + The input array `a` is a periodic sequence of length :math:`n`, uniformly + logarithmically spaced with spacing `dln`, + + .. math:: + + a_j = a(r_j) \;, \quad + r_j = r_c \exp[(j-j_c) \, \mathtt{dln}] + + centred about the point :math:`r_c`. Note that the central index + :math:`j_c = (n-1)/2` is half-integral if :math:`n` is even, so that + :math:`r_c` falls between two input elements. Similarly, the output + array `A` is a periodic sequence of length :math:`n`, also uniformly + logarithmically spaced with spacing `dln` + + .. math:: + + A_j = A(k_j) \;, \quad + k_j = k_c \exp[(j-j_c) \, \mathtt{dln}] + + centred about the point :math:`k_c`. + + The centre points :math:`r_c` and :math:`k_c` of the periodic intervals may + be chosen arbitrarily, but it would be usual to choose the product + :math:`k_c r_c = k_j r_{n-1-j} = k_{n-1-j} r_j` to be unity. This can be + changed using the `offset` parameter, which controls the logarithmic offset + :math:`\log(k_c) = \mathtt{offset} - \log(r_c)` of the output array. + Choosing an optimal value for `offset` may reduce ringing of the discrete + Hankel transform. + + If the `bias` parameter is nonzero, this function computes a discrete + version of the biased Hankel transform + + .. math:: + + A(k) = \int_{0}^{\infty} \! a_q(r) \, (kr)^q \, J_\mu(kr) \, k \, dr + + where :math:`q` is the value of `bias`, and a power law bias + :math:`a_q(r) = a(r) \, (kr)^{-q}` is applied to the input sequence. + Biasing the transform can help approximate the continuous transform of + :math:`a(r)` if there is a value :math:`q` such that :math:`a_q(r)` is + close to a periodic sequence, in which case the resulting :math:`A(k)` will + be close to the continuous transform. + + References + ---------- + .. [1] Talman J. D., 1978, J. Comp. Phys., 29, 35 + .. [2] Hamilton A. J. S., 2000, MNRAS, 312, 257 (astro-ph/9905191) + + Examples + -------- + + This example is the adapted version of ``fftlogtest.f`` which is provided + in [2]_. It evaluates the integral + + .. math:: + + \int^\infty_0 r^{\mu+1} \exp(-r^2/2) J_\mu(k, r) k dr + = k^{\mu+1} \exp(-k^2/2) . + + >>> import numpy as np + >>> from scipy import fft + >>> import matplotlib.pyplot as plt + + Parameters for the transform. + + >>> mu = 0.0 # Order mu of Bessel function + >>> r = np.logspace(-7, 1, 128) # Input evaluation points + >>> dln = np.log(r[1]/r[0]) # Step size + >>> offset = fft.fhtoffset(dln, initial=-6*np.log(10), mu=mu) + >>> k = np.exp(offset)/r[::-1] # Output evaluation points + + Define the analytical function. + + >>> def f(x, mu): + ... """Analytical function: x^(mu+1) exp(-x^2/2).""" + ... return x**(mu + 1)*np.exp(-x**2/2) + + Evaluate the function at ``r`` and compute the corresponding values at + ``k`` using FFTLog. + + >>> a_r = f(r, mu) + >>> fht = fft.fht(a_r, dln, mu=mu, offset=offset) + + For this example we can actually compute the analytical response (which in + this case is the same as the input function) for comparison and compute the + relative error. + + >>> a_k = f(k, mu) + >>> rel_err = abs((fht-a_k)/a_k) + + Plot the result. + + >>> figargs = {'sharex': True, 'sharey': True, 'constrained_layout': True} + >>> fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 4), **figargs) + >>> ax1.set_title(r'$r^{\mu+1}\ \exp(-r^2/2)$') + >>> ax1.loglog(r, a_r, 'k', lw=2) + >>> ax1.set_xlabel('r') + >>> ax2.set_title(r'$k^{\mu+1} \exp(-k^2/2)$') + >>> ax2.loglog(k, a_k, 'k', lw=2, label='Analytical') + >>> ax2.loglog(k, fht, 'C3--', lw=2, label='FFTLog') + >>> ax2.set_xlabel('k') + >>> ax2.legend(loc=3, framealpha=1) + >>> ax2.set_ylim([1e-10, 1e1]) + >>> ax2b = ax2.twinx() + >>> ax2b.loglog(k, rel_err, 'C0', label='Rel. Error (-)') + >>> ax2b.set_ylabel('Rel. Error (-)', color='C0') + >>> ax2b.tick_params(axis='y', labelcolor='C0') + >>> ax2b.legend(loc=4, framealpha=1) + >>> ax2b.set_ylim([1e-9, 1e-3]) + >>> plt.show() + + ''' + return (Dispatchable(a, np.ndarray),) + + +@_dispatch +def ifht(A, dln, mu, offset=0.0, bias=0.0): + r"""Compute the inverse fast Hankel transform. + + Computes the discrete inverse Hankel transform of a logarithmically spaced + periodic sequence. This is the inverse operation to `fht`. + + Parameters + ---------- + A : array_like (..., n) + Real periodic input array, uniformly logarithmically spaced. For + multidimensional input, the transform is performed over the last axis. + dln : float + Uniform logarithmic spacing of the input array. + mu : float + Order of the Hankel transform, any positive or negative real number. + offset : float, optional + Offset of the uniform logarithmic spacing of the output array. + bias : float, optional + Exponent of power law bias, any positive or negative real number. + + Returns + ------- + a : array_like (..., n) + The transformed output array, which is real, periodic, uniformly + logarithmically spaced, and of the same shape as the input array. + + See Also + -------- + fht : Definition of the fast Hankel transform. + fhtoffset : Return an optimal offset for `ifht`. + + Notes + ----- + This function computes a discrete version of the Hankel transform + + .. math:: + + a(r) = \int_{0}^{\infty} \! A(k) \, J_\mu(kr) \, r \, dk \;, + + where :math:`J_\mu` is the Bessel function of order :math:`\mu`. The index + :math:`\mu` may be any real number, positive or negative. Note that the + numerical inverse Hankel transform uses an integrand of :math:`r \, dk`, while the + mathematical inverse Hankel transform is commonly defined using :math:`k \, dk`. + + See `fht` for further details. + """ + return (Dispatchable(A, np.ndarray),) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_fftlog_backend.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_fftlog_backend.py new file mode 100644 index 0000000000000000000000000000000000000000..616752104d942edef93b8ed41bb8b302ed686ca3 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_fftlog_backend.py @@ -0,0 +1,199 @@ +import numpy as np +from warnings import warn +from ._basic import rfft, irfft +from ..special import loggamma, poch + +from scipy._lib._array_api import array_namespace, copy + +__all__ = ['fht', 'ifht', 'fhtoffset'] + +# constants +LN_2 = np.log(2) + + +def fht(a, dln, mu, offset=0.0, bias=0.0): + xp = array_namespace(a) + a = xp.asarray(a) + + # size of transform + n = a.shape[-1] + + # bias input array + if bias != 0: + # a_q(r) = a(r) (r/r_c)^{-q} + j_c = (n-1)/2 + j = xp.arange(n, dtype=xp.float64) + a = a * xp.exp(-bias*(j - j_c)*dln) + + # compute FHT coefficients + u = xp.asarray(fhtcoeff(n, dln, mu, offset=offset, bias=bias)) + + # transform + A = _fhtq(a, u, xp=xp) + + # bias output array + if bias != 0: + # A(k) = A_q(k) (k/k_c)^{-q} (k_c r_c)^{-q} + A *= xp.exp(-bias*((j - j_c)*dln + offset)) + + return A + + +def ifht(A, dln, mu, offset=0.0, bias=0.0): + xp = array_namespace(A) + A = xp.asarray(A) + + # size of transform + n = A.shape[-1] + + # bias input array + if bias != 0: + # A_q(k) = A(k) (k/k_c)^{q} (k_c r_c)^{q} + j_c = (n-1)/2 + j = xp.arange(n, dtype=xp.float64) + A = A * xp.exp(bias*((j - j_c)*dln + offset)) + + # compute FHT coefficients + u = xp.asarray(fhtcoeff(n, dln, mu, offset=offset, bias=bias, inverse=True)) + + # transform + a = _fhtq(A, u, inverse=True, xp=xp) + + # bias output array + if bias != 0: + # a(r) = a_q(r) (r/r_c)^{q} + a /= xp.exp(-bias*(j - j_c)*dln) + + return a + + +def fhtcoeff(n, dln, mu, offset=0.0, bias=0.0, inverse=False): + """Compute the coefficient array for a fast Hankel transform.""" + lnkr, q = offset, bias + + # Hankel transform coefficients + # u_m = (kr)^{-i 2m pi/(n dlnr)} U_mu(q + i 2m pi/(n dlnr)) + # with U_mu(x) = 2^x Gamma((mu+1+x)/2)/Gamma((mu+1-x)/2) + xp = (mu+1+q)/2 + xm = (mu+1-q)/2 + y = np.linspace(0, np.pi*(n//2)/(n*dln), n//2+1) + u = np.empty(n//2+1, dtype=complex) + v = np.empty(n//2+1, dtype=complex) + u.imag[:] = y + u.real[:] = xm + loggamma(u, out=v) + u.real[:] = xp + loggamma(u, out=u) + y *= 2*(LN_2 - lnkr) + u.real -= v.real + u.real += LN_2*q + u.imag += v.imag + u.imag += y + np.exp(u, out=u) + + # fix last coefficient to be real + u.imag[-1] = 0 + + # deal with special cases + if not np.isfinite(u[0]): + # write u_0 = 2^q Gamma(xp)/Gamma(xm) = 2^q poch(xm, xp-xm) + # poch() handles special cases for negative integers correctly + u[0] = 2**q * poch(xm, xp-xm) + # the coefficient may be inf or 0, meaning the transform or the + # inverse transform, respectively, is singular + + # check for singular transform or singular inverse transform + if np.isinf(u[0]) and not inverse: + warn('singular transform; consider changing the bias', stacklevel=3) + # fix coefficient to obtain (potentially correct) transform anyway + u = copy(u) + u[0] = 0 + elif u[0] == 0 and inverse: + warn('singular inverse transform; consider changing the bias', stacklevel=3) + # fix coefficient to obtain (potentially correct) inverse anyway + u = copy(u) + u[0] = np.inf + + return u + + +def fhtoffset(dln, mu, initial=0.0, bias=0.0): + """Return optimal offset for a fast Hankel transform. + + Returns an offset close to `initial` that fulfils the low-ringing + condition of [1]_ for the fast Hankel transform `fht` with logarithmic + spacing `dln`, order `mu` and bias `bias`. + + Parameters + ---------- + dln : float + Uniform logarithmic spacing of the transform. + mu : float + Order of the Hankel transform, any positive or negative real number. + initial : float, optional + Initial value for the offset. Returns the closest value that fulfils + the low-ringing condition. + bias : float, optional + Exponent of power law bias, any positive or negative real number. + + Returns + ------- + offset : float + Optimal offset of the uniform logarithmic spacing of the transform that + fulfils a low-ringing condition. + + Examples + -------- + >>> from scipy.fft import fhtoffset + >>> dln = 0.1 + >>> mu = 2.0 + >>> initial = 0.5 + >>> bias = 0.0 + >>> offset = fhtoffset(dln, mu, initial, bias) + >>> offset + 0.5454581477676637 + + See Also + -------- + fht : Definition of the fast Hankel transform. + + References + ---------- + .. [1] Hamilton A. J. S., 2000, MNRAS, 312, 257 (astro-ph/9905191) + + """ + + lnkr, q = initial, bias + + xp = (mu+1+q)/2 + xm = (mu+1-q)/2 + y = np.pi/(2*dln) + zp = loggamma(xp + 1j*y) + zm = loggamma(xm + 1j*y) + arg = (LN_2 - lnkr)/dln + (zp.imag + zm.imag)/np.pi + return lnkr + (arg - np.round(arg))*dln + + +def _fhtq(a, u, inverse=False, *, xp=None): + """Compute the biased fast Hankel transform. + + This is the basic FFTLog routine. + """ + if xp is None: + xp = np + + # size of transform + n = a.shape[-1] + + # biased fast Hankel transform via real FFT + A = rfft(a, axis=-1) + if not inverse: + # forward transform + A *= u + else: + # backward transform + A /= xp.conj(u) + A = irfft(A, n, axis=-1) + A = xp.flip(A, axis=-1) + + return A diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_helper.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..76e08c4f61c854f9bfb9407a1951f2cf5a2af123 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_helper.py @@ -0,0 +1,379 @@ +from functools import update_wrapper, lru_cache +import inspect + +from ._pocketfft import helper as _helper + +import numpy as np +from scipy._lib._array_api import array_namespace + + +def next_fast_len(target, real=False): + """Find the next fast size of input data to ``fft``, for zero-padding, etc. + + SciPy's FFT algorithms gain their speed by a recursive divide and conquer + strategy. This relies on efficient functions for small prime factors of the + input length. Thus, the transforms are fastest when using composites of the + prime factors handled by the fft implementation. If there are efficient + functions for all radices <= `n`, then the result will be a number `x` + >= ``target`` with only prime factors < `n`. (Also known as `n`-smooth + numbers) + + Parameters + ---------- + target : int + Length to start searching from. Must be a positive integer. + real : bool, optional + True if the FFT involves real input or output (e.g., `rfft` or `hfft` + but not `fft`). Defaults to False. + + Returns + ------- + out : int + The smallest fast length greater than or equal to ``target``. + + Notes + ----- + The result of this function may change in future as performance + considerations change, for example, if new prime factors are added. + + Calling `fft` or `ifft` with real input data performs an ``'R2C'`` + transform internally. + + Examples + -------- + On a particular machine, an FFT of prime length takes 11.4 ms: + + >>> from scipy import fft + >>> import numpy as np + >>> rng = np.random.default_rng() + >>> min_len = 93059 # prime length is worst case for speed + >>> a = rng.standard_normal(min_len) + >>> b = fft.fft(a) + + Zero-padding to the next regular length reduces computation time to + 1.6 ms, a speedup of 7.3 times: + + >>> fft.next_fast_len(min_len, real=True) + 93312 + >>> b = fft.fft(a, 93312) + + Rounding up to the next power of 2 is not optimal, taking 3.0 ms to + compute; 1.9 times longer than the size given by ``next_fast_len``: + + >>> b = fft.fft(a, 131072) + + """ + pass + + +# Directly wrap the c-function good_size but take the docstring etc., from the +# next_fast_len function above +_sig = inspect.signature(next_fast_len) +next_fast_len = update_wrapper(lru_cache(_helper.good_size), next_fast_len) +next_fast_len.__wrapped__ = _helper.good_size +next_fast_len.__signature__ = _sig + + +def prev_fast_len(target, real=False): + """Find the previous fast size of input data to ``fft``. + Useful for discarding a minimal number of samples before FFT. + + SciPy's FFT algorithms gain their speed by a recursive divide and conquer + strategy. This relies on efficient functions for small prime factors of the + input length. Thus, the transforms are fastest when using composites of the + prime factors handled by the fft implementation. If there are efficient + functions for all radices <= `n`, then the result will be a number `x` + <= ``target`` with only prime factors <= `n`. (Also known as `n`-smooth + numbers) + + Parameters + ---------- + target : int + Maximum length to search until. Must be a positive integer. + real : bool, optional + True if the FFT involves real input or output (e.g., `rfft` or `hfft` + but not `fft`). Defaults to False. + + Returns + ------- + out : int + The largest fast length less than or equal to ``target``. + + Notes + ----- + The result of this function may change in future as performance + considerations change, for example, if new prime factors are added. + + Calling `fft` or `ifft` with real input data performs an ``'R2C'`` + transform internally. + + In the current implementation, prev_fast_len assumes radices of + 2,3,5,7,11 for complex FFT and 2,3,5 for real FFT. + + Examples + -------- + On a particular machine, an FFT of prime length takes 16.2 ms: + + >>> from scipy import fft + >>> import numpy as np + >>> rng = np.random.default_rng() + >>> max_len = 93059 # prime length is worst case for speed + >>> a = rng.standard_normal(max_len) + >>> b = fft.fft(a) + + Performing FFT on the maximum fast length less than max_len + reduces the computation time to 1.5 ms, a speedup of 10.5 times: + + >>> fft.prev_fast_len(max_len, real=True) + 92160 + >>> c = fft.fft(a[:92160]) # discard last 899 samples + + """ + pass + + +# Directly wrap the c-function prev_good_size but take the docstring etc., +# from the prev_fast_len function above +_sig_prev_fast_len = inspect.signature(prev_fast_len) +prev_fast_len = update_wrapper(lru_cache()(_helper.prev_good_size), prev_fast_len) +prev_fast_len.__wrapped__ = _helper.prev_good_size +prev_fast_len.__signature__ = _sig_prev_fast_len + + +def _init_nd_shape_and_axes(x, shape, axes): + """Handle shape and axes arguments for N-D transforms. + + Returns the shape and axes in a standard form, taking into account negative + values and checking for various potential errors. + + Parameters + ---------- + x : array_like + The input array. + shape : int or array_like of ints or None + The shape of the result. If both `shape` and `axes` (see below) are + None, `shape` is ``x.shape``; if `shape` is None but `axes` is + not None, then `shape` is ``numpy.take(x.shape, axes, axis=0)``. + If `shape` is -1, the size of the corresponding dimension of `x` is + used. + axes : int or array_like of ints or None + Axes along which the calculation is computed. + The default is over all axes. + Negative indices are automatically converted to their positive + counterparts. + + Returns + ------- + shape : tuple + The shape of the result as a tuple of integers. + axes : list + Axes along which the calculation is computed, as a list of integers. + + """ + x = np.asarray(x) + return _helper._init_nd_shape_and_axes(x, shape, axes) + + +def fftfreq(n, d=1.0, *, xp=None, device=None): + """Return the Discrete Fourier Transform sample frequencies. + + The returned float array `f` contains the frequency bin centers in cycles + per unit of the sample spacing (with zero at the start). For instance, if + the sample spacing is in seconds, then the frequency unit is cycles/second. + + Given a window length `n` and a sample spacing `d`:: + + f = [0, 1, ..., n/2-1, -n/2, ..., -1] / (d*n) if n is even + f = [0, 1, ..., (n-1)/2, -(n-1)/2, ..., -1] / (d*n) if n is odd + + Parameters + ---------- + n : int + Window length. + d : scalar, optional + Sample spacing (inverse of the sampling rate). Defaults to 1. + xp : array_namespace, optional + The namespace for the return array. Default is None, where NumPy is used. + device : device, optional + The device for the return array. + Only valid when `xp.fft.fftfreq` implements the device parameter. + + Returns + ------- + f : ndarray + Array of length `n` containing the sample frequencies. + + Examples + -------- + >>> import numpy as np + >>> import scipy.fft + >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float) + >>> fourier = scipy.fft.fft(signal) + >>> n = signal.size + >>> timestep = 0.1 + >>> freq = scipy.fft.fftfreq(n, d=timestep) + >>> freq + array([ 0. , 1.25, 2.5 , ..., -3.75, -2.5 , -1.25]) + + """ + xp = np if xp is None else xp + # numpy does not yet support the `device` keyword + # `xp.__name__ != 'numpy'` should be removed when numpy is compatible + if hasattr(xp, 'fft') and xp.__name__ != 'numpy': + return xp.fft.fftfreq(n, d=d, device=device) + if device is not None: + raise ValueError('device parameter is not supported for input array type') + return np.fft.fftfreq(n, d=d) + + +def rfftfreq(n, d=1.0, *, xp=None, device=None): + """Return the Discrete Fourier Transform sample frequencies + (for usage with rfft, irfft). + + The returned float array `f` contains the frequency bin centers in cycles + per unit of the sample spacing (with zero at the start). For instance, if + the sample spacing is in seconds, then the frequency unit is cycles/second. + + Given a window length `n` and a sample spacing `d`:: + + f = [0, 1, ..., n/2-1, n/2] / (d*n) if n is even + f = [0, 1, ..., (n-1)/2-1, (n-1)/2] / (d*n) if n is odd + + Unlike `fftfreq` (but like `scipy.fftpack.rfftfreq`) + the Nyquist frequency component is considered to be positive. + + Parameters + ---------- + n : int + Window length. + d : scalar, optional + Sample spacing (inverse of the sampling rate). Defaults to 1. + xp : array_namespace, optional + The namespace for the return array. Default is None, where NumPy is used. + device : device, optional + The device for the return array. + Only valid when `xp.fft.rfftfreq` implements the device parameter. + + Returns + ------- + f : ndarray + Array of length ``n//2 + 1`` containing the sample frequencies. + + Examples + -------- + >>> import numpy as np + >>> import scipy.fft + >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5, -3, 4], dtype=float) + >>> fourier = scipy.fft.rfft(signal) + >>> n = signal.size + >>> sample_rate = 100 + >>> freq = scipy.fft.fftfreq(n, d=1./sample_rate) + >>> freq + array([ 0., 10., 20., ..., -30., -20., -10.]) + >>> freq = scipy.fft.rfftfreq(n, d=1./sample_rate) + >>> freq + array([ 0., 10., 20., 30., 40., 50.]) + + """ + xp = np if xp is None else xp + # numpy does not yet support the `device` keyword + # `xp.__name__ != 'numpy'` should be removed when numpy is compatible + if hasattr(xp, 'fft') and xp.__name__ != 'numpy': + return xp.fft.rfftfreq(n, d=d, device=device) + if device is not None: + raise ValueError('device parameter is not supported for input array type') + return np.fft.rfftfreq(n, d=d) + + +def fftshift(x, axes=None): + """Shift the zero-frequency component to the center of the spectrum. + + This function swaps half-spaces for all axes listed (defaults to all). + Note that ``y[0]`` is the Nyquist component only if ``len(x)`` is even. + + Parameters + ---------- + x : array_like + Input array. + axes : int or shape tuple, optional + Axes over which to shift. Default is None, which shifts all axes. + + Returns + ------- + y : ndarray + The shifted array. + + See Also + -------- + ifftshift : The inverse of `fftshift`. + + Examples + -------- + >>> import numpy as np + >>> freqs = np.fft.fftfreq(10, 0.1) + >>> freqs + array([ 0., 1., 2., ..., -3., -2., -1.]) + >>> np.fft.fftshift(freqs) + array([-5., -4., -3., -2., -1., 0., 1., 2., 3., 4.]) + + Shift the zero-frequency component only along the second axis: + + >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3) + >>> freqs + array([[ 0., 1., 2.], + [ 3., 4., -4.], + [-3., -2., -1.]]) + >>> np.fft.fftshift(freqs, axes=(1,)) + array([[ 2., 0., 1.], + [-4., 3., 4.], + [-1., -3., -2.]]) + + """ + xp = array_namespace(x) + if hasattr(xp, 'fft'): + return xp.fft.fftshift(x, axes=axes) + x = np.asarray(x) + y = np.fft.fftshift(x, axes=axes) + return xp.asarray(y) + + +def ifftshift(x, axes=None): + """The inverse of `fftshift`. Although identical for even-length `x`, the + functions differ by one sample for odd-length `x`. + + Parameters + ---------- + x : array_like + Input array. + axes : int or shape tuple, optional + Axes over which to calculate. Defaults to None, which shifts all axes. + + Returns + ------- + y : ndarray + The shifted array. + + See Also + -------- + fftshift : Shift zero-frequency component to the center of the spectrum. + + Examples + -------- + >>> import numpy as np + >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3) + >>> freqs + array([[ 0., 1., 2.], + [ 3., 4., -4.], + [-3., -2., -1.]]) + >>> np.fft.ifftshift(np.fft.fftshift(freqs)) + array([[ 0., 1., 2.], + [ 3., 4., -4.], + [-3., -2., -1.]]) + + """ + xp = array_namespace(x) + if hasattr(xp, 'fft'): + return xp.fft.ifftshift(x, axes=axes) + x = np.asarray(x) + y = np.fft.ifftshift(x, axes=axes) + return xp.asarray(y) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/LICENSE.md b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/LICENSE.md new file mode 100644 index 0000000000000000000000000000000000000000..1b5163d8435976c24988afbd39ded304947178cb --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/LICENSE.md @@ -0,0 +1,25 @@ +Copyright (C) 2010-2019 Max-Planck-Society +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. +* Neither the name of the copyright holder nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/__init__.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0671484c9a0780df353b9b783813b6fa7492d38d --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/__init__.py @@ -0,0 +1,9 @@ +""" FFT backend using pypocketfft """ + +from .basic import * +from .realtransforms import * +from .helper import * + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/__pycache__/__init__.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b6101f8b4748663762dd9fbbc0a2713df566ff8 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/__pycache__/__init__.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/__pycache__/basic.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/__pycache__/basic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b78d6cabb6c7922662e08365dd76add502c23625 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/__pycache__/basic.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/__pycache__/helper.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/__pycache__/helper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16a4049bacc16a3c1c198350f023420398307200 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/__pycache__/helper.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/__pycache__/realtransforms.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/__pycache__/realtransforms.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ae7bd398a95a671432f73e6326e296cf0ef1fc6 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/__pycache__/realtransforms.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/basic.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/basic.py new file mode 100644 index 0000000000000000000000000000000000000000..bd2d0d33958021c431171b72f72c37363ac98e03 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/basic.py @@ -0,0 +1,251 @@ +""" +Discrete Fourier Transforms - basic.py +""" +import numpy as np +import functools +from . import pypocketfft as pfft +from .helper import (_asfarray, _init_nd_shape_and_axes, _datacopied, + _fix_shape, _fix_shape_1d, _normalization, + _workers) + +def c2c(forward, x, n=None, axis=-1, norm=None, overwrite_x=False, + workers=None, *, plan=None): + """ Return discrete Fourier transform of real or complex sequence. """ + if plan is not None: + raise NotImplementedError('Passing a precomputed plan is not yet ' + 'supported by scipy.fft functions') + tmp = _asfarray(x) + overwrite_x = overwrite_x or _datacopied(tmp, x) + norm = _normalization(norm, forward) + workers = _workers(workers) + + if n is not None: + tmp, copied = _fix_shape_1d(tmp, n, axis) + overwrite_x = overwrite_x or copied + elif tmp.shape[axis] < 1: + message = f"invalid number of data points ({tmp.shape[axis]}) specified" + raise ValueError(message) + + out = (tmp if overwrite_x and tmp.dtype.kind == 'c' else None) + + return pfft.c2c(tmp, (axis,), forward, norm, out, workers) + + +fft = functools.partial(c2c, True) +fft.__name__ = 'fft' +ifft = functools.partial(c2c, False) +ifft.__name__ = 'ifft' + + +def r2c(forward, x, n=None, axis=-1, norm=None, overwrite_x=False, + workers=None, *, plan=None): + """ + Discrete Fourier transform of a real sequence. + """ + if plan is not None: + raise NotImplementedError('Passing a precomputed plan is not yet ' + 'supported by scipy.fft functions') + tmp = _asfarray(x) + norm = _normalization(norm, forward) + workers = _workers(workers) + + if not np.isrealobj(tmp): + raise TypeError("x must be a real sequence") + + if n is not None: + tmp, _ = _fix_shape_1d(tmp, n, axis) + elif tmp.shape[axis] < 1: + raise ValueError(f"invalid number of data points ({tmp.shape[axis]}) specified") + + # Note: overwrite_x is not utilised + return pfft.r2c(tmp, (axis,), forward, norm, None, workers) + + +rfft = functools.partial(r2c, True) +rfft.__name__ = 'rfft' +ihfft = functools.partial(r2c, False) +ihfft.__name__ = 'ihfft' + + +def c2r(forward, x, n=None, axis=-1, norm=None, overwrite_x=False, + workers=None, *, plan=None): + """ + Return inverse discrete Fourier transform of real sequence x. + """ + if plan is not None: + raise NotImplementedError('Passing a precomputed plan is not yet ' + 'supported by scipy.fft functions') + tmp = _asfarray(x) + norm = _normalization(norm, forward) + workers = _workers(workers) + + # TODO: Optimize for hermitian and real? + if np.isrealobj(tmp): + tmp = tmp + 0.j + + # Last axis utilizes hermitian symmetry + if n is None: + n = (tmp.shape[axis] - 1) * 2 + if n < 1: + raise ValueError(f"Invalid number of data points ({n}) specified") + else: + tmp, _ = _fix_shape_1d(tmp, (n//2) + 1, axis) + + # Note: overwrite_x is not utilized + return pfft.c2r(tmp, (axis,), n, forward, norm, None, workers) + + +hfft = functools.partial(c2r, True) +hfft.__name__ = 'hfft' +irfft = functools.partial(c2r, False) +irfft.__name__ = 'irfft' + + +def hfft2(x, s=None, axes=(-2,-1), norm=None, overwrite_x=False, workers=None, + *, plan=None): + """ + 2-D discrete Fourier transform of a Hermitian sequence + """ + if plan is not None: + raise NotImplementedError('Passing a precomputed plan is not yet ' + 'supported by scipy.fft functions') + return hfftn(x, s, axes, norm, overwrite_x, workers) + + +def ihfft2(x, s=None, axes=(-2,-1), norm=None, overwrite_x=False, workers=None, + *, plan=None): + """ + 2-D discrete inverse Fourier transform of a Hermitian sequence + """ + if plan is not None: + raise NotImplementedError('Passing a precomputed plan is not yet ' + 'supported by scipy.fft functions') + return ihfftn(x, s, axes, norm, overwrite_x, workers) + + +def c2cn(forward, x, s=None, axes=None, norm=None, overwrite_x=False, + workers=None, *, plan=None): + """ + Return multidimensional discrete Fourier transform. + """ + if plan is not None: + raise NotImplementedError('Passing a precomputed plan is not yet ' + 'supported by scipy.fft functions') + tmp = _asfarray(x) + + shape, axes = _init_nd_shape_and_axes(tmp, s, axes) + overwrite_x = overwrite_x or _datacopied(tmp, x) + workers = _workers(workers) + + if len(axes) == 0: + return x + + tmp, copied = _fix_shape(tmp, shape, axes) + overwrite_x = overwrite_x or copied + + norm = _normalization(norm, forward) + out = (tmp if overwrite_x and tmp.dtype.kind == 'c' else None) + + return pfft.c2c(tmp, axes, forward, norm, out, workers) + + +fftn = functools.partial(c2cn, True) +fftn.__name__ = 'fftn' +ifftn = functools.partial(c2cn, False) +ifftn.__name__ = 'ifftn' + +def r2cn(forward, x, s=None, axes=None, norm=None, overwrite_x=False, + workers=None, *, plan=None): + """Return multidimensional discrete Fourier transform of real input""" + if plan is not None: + raise NotImplementedError('Passing a precomputed plan is not yet ' + 'supported by scipy.fft functions') + tmp = _asfarray(x) + + if not np.isrealobj(tmp): + raise TypeError("x must be a real sequence") + + shape, axes = _init_nd_shape_and_axes(tmp, s, axes) + tmp, _ = _fix_shape(tmp, shape, axes) + norm = _normalization(norm, forward) + workers = _workers(workers) + + if len(axes) == 0: + raise ValueError("at least 1 axis must be transformed") + + # Note: overwrite_x is not utilized + return pfft.r2c(tmp, axes, forward, norm, None, workers) + + +rfftn = functools.partial(r2cn, True) +rfftn.__name__ = 'rfftn' +ihfftn = functools.partial(r2cn, False) +ihfftn.__name__ = 'ihfftn' + + +def c2rn(forward, x, s=None, axes=None, norm=None, overwrite_x=False, + workers=None, *, plan=None): + """Multidimensional inverse discrete fourier transform with real output""" + if plan is not None: + raise NotImplementedError('Passing a precomputed plan is not yet ' + 'supported by scipy.fft functions') + tmp = _asfarray(x) + + # TODO: Optimize for hermitian and real? + if np.isrealobj(tmp): + tmp = tmp + 0.j + + noshape = s is None + shape, axes = _init_nd_shape_and_axes(tmp, s, axes) + + if len(axes) == 0: + raise ValueError("at least 1 axis must be transformed") + + shape = list(shape) + if noshape: + shape[-1] = (x.shape[axes[-1]] - 1) * 2 + + norm = _normalization(norm, forward) + workers = _workers(workers) + + # Last axis utilizes hermitian symmetry + lastsize = shape[-1] + shape[-1] = (shape[-1] // 2) + 1 + + tmp, _ = tuple(_fix_shape(tmp, shape, axes)) + + # Note: overwrite_x is not utilized + return pfft.c2r(tmp, axes, lastsize, forward, norm, None, workers) + + +hfftn = functools.partial(c2rn, True) +hfftn.__name__ = 'hfftn' +irfftn = functools.partial(c2rn, False) +irfftn.__name__ = 'irfftn' + + +def r2r_fftpack(forward, x, n=None, axis=-1, norm=None, overwrite_x=False): + """FFT of a real sequence, returning fftpack half complex format""" + tmp = _asfarray(x) + overwrite_x = overwrite_x or _datacopied(tmp, x) + norm = _normalization(norm, forward) + workers = _workers(None) + + if tmp.dtype.kind == 'c': + raise TypeError('x must be a real sequence') + + if n is not None: + tmp, copied = _fix_shape_1d(tmp, n, axis) + overwrite_x = overwrite_x or copied + elif tmp.shape[axis] < 1: + raise ValueError(f"invalid number of data points ({tmp.shape[axis]}) specified") + + out = (tmp if overwrite_x else None) + + return pfft.r2r_fftpack(tmp, (axis,), forward, forward, norm, out, workers) + + +rfft_fftpack = functools.partial(r2r_fftpack, True) +rfft_fftpack.__name__ = 'rfft_fftpack' +irfft_fftpack = functools.partial(r2r_fftpack, False) +irfft_fftpack.__name__ = 'irfft_fftpack' diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/helper.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/helper.py new file mode 100644 index 0000000000000000000000000000000000000000..ab2fbc553ccc46a4b337060a62702ec28cb8b254 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/helper.py @@ -0,0 +1,221 @@ +from numbers import Number +import operator +import os +import threading +import contextlib + +import numpy as np + +from scipy._lib._util import copy_if_needed + +# good_size is exposed (and used) from this import +from .pypocketfft import good_size, prev_good_size + + +__all__ = ['good_size', 'prev_good_size', 'set_workers', 'get_workers'] + +_config = threading.local() +_cpu_count = os.cpu_count() + + +def _iterable_of_int(x, name=None): + """Convert ``x`` to an iterable sequence of int + + Parameters + ---------- + x : value, or sequence of values, convertible to int + name : str, optional + Name of the argument being converted, only used in the error message + + Returns + ------- + y : ``List[int]`` + """ + if isinstance(x, Number): + x = (x,) + + try: + x = [operator.index(a) for a in x] + except TypeError as e: + name = name or "value" + raise ValueError(f"{name} must be a scalar or iterable of integers") from e + + return x + + +def _init_nd_shape_and_axes(x, shape, axes): + """Handles shape and axes arguments for nd transforms""" + noshape = shape is None + noaxes = axes is None + + if not noaxes: + axes = _iterable_of_int(axes, 'axes') + axes = [a + x.ndim if a < 0 else a for a in axes] + + if any(a >= x.ndim or a < 0 for a in axes): + raise ValueError("axes exceeds dimensionality of input") + if len(set(axes)) != len(axes): + raise ValueError("all axes must be unique") + + if not noshape: + shape = _iterable_of_int(shape, 'shape') + + if axes and len(axes) != len(shape): + raise ValueError("when given, axes and shape arguments" + " have to be of the same length") + if noaxes: + if len(shape) > x.ndim: + raise ValueError("shape requires more axes than are present") + axes = range(x.ndim - len(shape), x.ndim) + + shape = [x.shape[a] if s == -1 else s for s, a in zip(shape, axes)] + elif noaxes: + shape = list(x.shape) + axes = range(x.ndim) + else: + shape = [x.shape[a] for a in axes] + + if any(s < 1 for s in shape): + raise ValueError( + f"invalid number of data points ({shape}) specified") + + return tuple(shape), list(axes) + + +def _asfarray(x): + """ + Convert to array with floating or complex dtype. + + float16 values are also promoted to float32. + """ + if not hasattr(x, "dtype"): + x = np.asarray(x) + + if x.dtype == np.float16: + return np.asarray(x, np.float32) + elif x.dtype.kind not in 'fc': + return np.asarray(x, np.float64) + + # Require native byte order + dtype = x.dtype.newbyteorder('=') + # Always align input + copy = True if not x.flags['ALIGNED'] else copy_if_needed + return np.array(x, dtype=dtype, copy=copy) + +def _datacopied(arr, original): + """ + Strict check for `arr` not sharing any data with `original`, + under the assumption that arr = asarray(original) + """ + if arr is original: + return False + if not isinstance(original, np.ndarray) and hasattr(original, '__array__'): + return False + return arr.base is None + + +def _fix_shape(x, shape, axes): + """Internal auxiliary function for _raw_fft, _raw_fftnd.""" + must_copy = False + + # Build an nd slice with the dimensions to be read from x + index = [slice(None)]*x.ndim + for n, ax in zip(shape, axes): + if x.shape[ax] >= n: + index[ax] = slice(0, n) + else: + index[ax] = slice(0, x.shape[ax]) + must_copy = True + + index = tuple(index) + + if not must_copy: + return x[index], False + + s = list(x.shape) + for n, axis in zip(shape, axes): + s[axis] = n + + z = np.zeros(s, x.dtype) + z[index] = x[index] + return z, True + + +def _fix_shape_1d(x, n, axis): + if n < 1: + raise ValueError( + f"invalid number of data points ({n}) specified") + + return _fix_shape(x, (n,), (axis,)) + + +_NORM_MAP = {None: 0, 'backward': 0, 'ortho': 1, 'forward': 2} + + +def _normalization(norm, forward): + """Returns the pypocketfft normalization mode from the norm argument""" + try: + inorm = _NORM_MAP[norm] + return inorm if forward else (2 - inorm) + except KeyError: + raise ValueError( + f'Invalid norm value {norm!r}, should ' + 'be "backward", "ortho" or "forward"') from None + + +def _workers(workers): + if workers is None: + return getattr(_config, 'default_workers', 1) + + if workers < 0: + if workers >= -_cpu_count: + workers += 1 + _cpu_count + else: + raise ValueError(f"workers value out of range; got {workers}, must not be" + f" less than {-_cpu_count}") + elif workers == 0: + raise ValueError("workers must not be zero") + + return workers + + +@contextlib.contextmanager +def set_workers(workers): + """Context manager for the default number of workers used in `scipy.fft` + + Parameters + ---------- + workers : int + The default number of workers to use + + Examples + -------- + >>> import numpy as np + >>> from scipy import fft, signal + >>> rng = np.random.default_rng() + >>> x = rng.standard_normal((128, 64)) + >>> with fft.set_workers(4): + ... y = signal.fftconvolve(x, x) + + """ + old_workers = get_workers() + _config.default_workers = _workers(operator.index(workers)) + try: + yield + finally: + _config.default_workers = old_workers + + +def get_workers(): + """Returns the default number of workers within the current context + + Examples + -------- + >>> from scipy import fft + >>> fft.get_workers() + 1 + >>> with fft.set_workers(4): + ... fft.get_workers() + 4 + """ + return getattr(_config, 'default_workers', 1) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/realtransforms.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/realtransforms.py new file mode 100644 index 0000000000000000000000000000000000000000..5a0c616742305444d51258e650344c060129dfab --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/realtransforms.py @@ -0,0 +1,109 @@ +import numpy as np +from . import pypocketfft as pfft +from .helper import (_asfarray, _init_nd_shape_and_axes, _datacopied, + _fix_shape, _fix_shape_1d, _normalization, _workers) +import functools + + +def _r2r(forward, transform, x, type=2, n=None, axis=-1, norm=None, + overwrite_x=False, workers=None, orthogonalize=None): + """Forward or backward 1-D DCT/DST + + Parameters + ---------- + forward : bool + Transform direction (determines type and normalisation) + transform : {pypocketfft.dct, pypocketfft.dst} + The transform to perform + """ + tmp = _asfarray(x) + overwrite_x = overwrite_x or _datacopied(tmp, x) + norm = _normalization(norm, forward) + workers = _workers(workers) + + if not forward: + if type == 2: + type = 3 + elif type == 3: + type = 2 + + if n is not None: + tmp, copied = _fix_shape_1d(tmp, n, axis) + overwrite_x = overwrite_x or copied + elif tmp.shape[axis] < 1: + raise ValueError(f"invalid number of data points ({tmp.shape[axis]}) specified") + + out = (tmp if overwrite_x else None) + + # For complex input, transform real and imaginary components separably + if np.iscomplexobj(x): + out = np.empty_like(tmp) if out is None else out + transform(tmp.real, type, (axis,), norm, out.real, workers) + transform(tmp.imag, type, (axis,), norm, out.imag, workers) + return out + + return transform(tmp, type, (axis,), norm, out, workers, orthogonalize) + + +dct = functools.partial(_r2r, True, pfft.dct) +dct.__name__ = 'dct' +idct = functools.partial(_r2r, False, pfft.dct) +idct.__name__ = 'idct' + +dst = functools.partial(_r2r, True, pfft.dst) +dst.__name__ = 'dst' +idst = functools.partial(_r2r, False, pfft.dst) +idst.__name__ = 'idst' + + +def _r2rn(forward, transform, x, type=2, s=None, axes=None, norm=None, + overwrite_x=False, workers=None, orthogonalize=None): + """Forward or backward nd DCT/DST + + Parameters + ---------- + forward : bool + Transform direction (determines type and normalisation) + transform : {pypocketfft.dct, pypocketfft.dst} + The transform to perform + """ + tmp = _asfarray(x) + + shape, axes = _init_nd_shape_and_axes(tmp, s, axes) + overwrite_x = overwrite_x or _datacopied(tmp, x) + + if len(axes) == 0: + return x + + tmp, copied = _fix_shape(tmp, shape, axes) + overwrite_x = overwrite_x or copied + + if not forward: + if type == 2: + type = 3 + elif type == 3: + type = 2 + + norm = _normalization(norm, forward) + workers = _workers(workers) + out = (tmp if overwrite_x else None) + + # For complex input, transform real and imaginary components separably + if np.iscomplexobj(x): + out = np.empty_like(tmp) if out is None else out + transform(tmp.real, type, axes, norm, out.real, workers) + transform(tmp.imag, type, axes, norm, out.imag, workers) + return out + + return transform(tmp, type, axes, norm, out, workers, orthogonalize) + + +dctn = functools.partial(_r2rn, True, pfft.dct) +dctn.__name__ = 'dctn' +idctn = functools.partial(_r2rn, False, pfft.dct) +idctn.__name__ = 'idctn' + +dstn = functools.partial(_r2rn, True, pfft.dst) +dstn.__name__ = 'dstn' +idstn = functools.partial(_r2rn, False, pfft.dst) +idstn.__name__ = 'idstn' diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/tests/__init__.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/tests/__pycache__/__init__.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28357236789a718284f6a7bbe224d4ca1239f165 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/tests/__pycache__/test_basic.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/tests/__pycache__/test_basic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f503cb46a433e1115682a9338f1c3c81b3d4c9e Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/tests/__pycache__/test_basic.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/tests/__pycache__/test_real_transforms.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/tests/__pycache__/test_real_transforms.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..721a23482370484b93c2c692255477e3cab3bf23 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/tests/__pycache__/test_real_transforms.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/tests/test_basic.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/tests/test_basic.py new file mode 100644 index 0000000000000000000000000000000000000000..8960cace3e081368d00efbad77059f91cef4dbdd --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/tests/test_basic.py @@ -0,0 +1,1005 @@ +# Created by Pearu Peterson, September 2002 + +from numpy.testing import (assert_, assert_equal, assert_array_almost_equal, + assert_array_almost_equal_nulp, assert_array_less, + assert_allclose) +import pytest +from pytest import raises as assert_raises +from scipy.fft._pocketfft import (ifft, fft, fftn, ifftn, + rfft, irfft, rfftn, irfftn, + hfft, ihfft, hfftn, ihfftn) + +from numpy import (arange, array, asarray, zeros, dot, exp, pi, + swapaxes, cdouble) +import numpy as np +import numpy.fft +from numpy.random import rand + +# "large" composite numbers supported by FFT._PYPOCKETFFT +LARGE_COMPOSITE_SIZES = [ + 2**13, + 2**5 * 3**5, + 2**3 * 3**3 * 5**2, +] +SMALL_COMPOSITE_SIZES = [ + 2, + 2*3*5, + 2*2*3*3, +] +# prime +LARGE_PRIME_SIZES = [ + 2011 +] +SMALL_PRIME_SIZES = [ + 29 +] + + +def _assert_close_in_norm(x, y, rtol, size, rdt): + # helper function for testing + err_msg = f"size: {size} rdt: {rdt}" + assert_array_less(np.linalg.norm(x - y), rtol*np.linalg.norm(x), err_msg) + + +def random(size): + return rand(*size) + +def swap_byteorder(arr): + """Returns the same array with swapped byteorder""" + dtype = arr.dtype.newbyteorder('S') + return arr.astype(dtype) + +def direct_dft(x): + x = asarray(x) + n = len(x) + y = zeros(n, dtype=cdouble) + w = -arange(n)*(2j*pi/n) + for i in range(n): + y[i] = dot(exp(i*w), x) + return y + + +def direct_idft(x): + x = asarray(x) + n = len(x) + y = zeros(n, dtype=cdouble) + w = arange(n)*(2j*pi/n) + for i in range(n): + y[i] = dot(exp(i*w), x)/n + return y + + +def direct_dftn(x): + x = asarray(x) + for axis in range(x.ndim): + x = fft(x, axis=axis) + return x + + +def direct_idftn(x): + x = asarray(x) + for axis in range(x.ndim): + x = ifft(x, axis=axis) + return x + + +def direct_rdft(x): + x = asarray(x) + n = len(x) + w = -arange(n)*(2j*pi/n) + y = zeros(n//2+1, dtype=cdouble) + for i in range(n//2+1): + y[i] = dot(exp(i*w), x) + return y + + +def direct_irdft(x, n): + x = asarray(x) + x1 = zeros(n, dtype=cdouble) + for i in range(n//2+1): + x1[i] = x[i] + if i > 0 and 2*i < n: + x1[n-i] = np.conj(x[i]) + return direct_idft(x1).real + + +def direct_rdftn(x): + return fftn(rfft(x), axes=range(x.ndim - 1)) + + +class _TestFFTBase: + def setup_method(self): + self.cdt = None + self.rdt = None + np.random.seed(1234) + + def test_definition(self): + x = np.array([1,2,3,4+1j,1,2,3,4+2j], dtype=self.cdt) + y = fft(x) + assert_equal(y.dtype, self.cdt) + y1 = direct_dft(x) + assert_array_almost_equal(y,y1) + x = np.array([1,2,3,4+0j,5], dtype=self.cdt) + assert_array_almost_equal(fft(x),direct_dft(x)) + + def test_n_argument_real(self): + x1 = np.array([1,2,3,4], dtype=self.rdt) + x2 = np.array([1,2,3,4], dtype=self.rdt) + y = fft([x1,x2],n=4) + assert_equal(y.dtype, self.cdt) + assert_equal(y.shape,(2,4)) + assert_array_almost_equal(y[0],direct_dft(x1)) + assert_array_almost_equal(y[1],direct_dft(x2)) + + def _test_n_argument_complex(self): + x1 = np.array([1,2,3,4+1j], dtype=self.cdt) + x2 = np.array([1,2,3,4+1j], dtype=self.cdt) + y = fft([x1,x2],n=4) + assert_equal(y.dtype, self.cdt) + assert_equal(y.shape,(2,4)) + assert_array_almost_equal(y[0],direct_dft(x1)) + assert_array_almost_equal(y[1],direct_dft(x2)) + + def test_djbfft(self): + for i in range(2,14): + n = 2**i + x = np.arange(n) + y = fft(x.astype(complex)) + y2 = numpy.fft.fft(x) + assert_array_almost_equal(y,y2) + y = fft(x) + assert_array_almost_equal(y,y2) + + def test_invalid_sizes(self): + assert_raises(ValueError, fft, []) + assert_raises(ValueError, fft, [[1,1],[2,2]], -5) + + +class TestLongDoubleFFT(_TestFFTBase): + def setup_method(self): + self.cdt = np.clongdouble + self.rdt = np.longdouble + + +class TestDoubleFFT(_TestFFTBase): + def setup_method(self): + self.cdt = np.cdouble + self.rdt = np.float64 + + +class TestSingleFFT(_TestFFTBase): + def setup_method(self): + self.cdt = np.complex64 + self.rdt = np.float32 + + +class TestFloat16FFT: + + def test_1_argument_real(self): + x1 = np.array([1, 2, 3, 4], dtype=np.float16) + y = fft(x1, n=4) + assert_equal(y.dtype, np.complex64) + assert_equal(y.shape, (4, )) + assert_array_almost_equal(y, direct_dft(x1.astype(np.float32))) + + def test_n_argument_real(self): + x1 = np.array([1, 2, 3, 4], dtype=np.float16) + x2 = np.array([1, 2, 3, 4], dtype=np.float16) + y = fft([x1, x2], n=4) + assert_equal(y.dtype, np.complex64) + assert_equal(y.shape, (2, 4)) + assert_array_almost_equal(y[0], direct_dft(x1.astype(np.float32))) + assert_array_almost_equal(y[1], direct_dft(x2.astype(np.float32))) + + +class _TestIFFTBase: + def setup_method(self): + np.random.seed(1234) + + def test_definition(self): + x = np.array([1,2,3,4+1j,1,2,3,4+2j], self.cdt) + y = ifft(x) + y1 = direct_idft(x) + assert_equal(y.dtype, self.cdt) + assert_array_almost_equal(y,y1) + + x = np.array([1,2,3,4+0j,5], self.cdt) + assert_array_almost_equal(ifft(x),direct_idft(x)) + + def test_definition_real(self): + x = np.array([1,2,3,4,1,2,3,4], self.rdt) + y = ifft(x) + assert_equal(y.dtype, self.cdt) + y1 = direct_idft(x) + assert_array_almost_equal(y,y1) + + x = np.array([1,2,3,4,5], dtype=self.rdt) + assert_equal(y.dtype, self.cdt) + assert_array_almost_equal(ifft(x),direct_idft(x)) + + def test_djbfft(self): + for i in range(2,14): + n = 2**i + x = np.arange(n) + y = ifft(x.astype(self.cdt)) + y2 = numpy.fft.ifft(x.astype(self.cdt)) + assert_allclose(y,y2, rtol=self.rtol, atol=self.atol) + y = ifft(x) + assert_allclose(y,y2, rtol=self.rtol, atol=self.atol) + + def test_random_complex(self): + for size in [1,51,111,100,200,64,128,256,1024]: + x = random([size]).astype(self.cdt) + x = random([size]).astype(self.cdt) + 1j*x + y1 = ifft(fft(x)) + y2 = fft(ifft(x)) + assert_equal(y1.dtype, self.cdt) + assert_equal(y2.dtype, self.cdt) + assert_array_almost_equal(y1, x) + assert_array_almost_equal(y2, x) + + def test_random_real(self): + for size in [1,51,111,100,200,64,128,256,1024]: + x = random([size]).astype(self.rdt) + y1 = ifft(fft(x)) + y2 = fft(ifft(x)) + assert_equal(y1.dtype, self.cdt) + assert_equal(y2.dtype, self.cdt) + assert_array_almost_equal(y1, x) + assert_array_almost_equal(y2, x) + + def test_size_accuracy(self): + # Sanity check for the accuracy for prime and non-prime sized inputs + for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES: + np.random.seed(1234) + x = np.random.rand(size).astype(self.rdt) + y = ifft(fft(x)) + _assert_close_in_norm(x, y, self.rtol, size, self.rdt) + y = fft(ifft(x)) + _assert_close_in_norm(x, y, self.rtol, size, self.rdt) + + x = (x + 1j*np.random.rand(size)).astype(self.cdt) + y = ifft(fft(x)) + _assert_close_in_norm(x, y, self.rtol, size, self.rdt) + y = fft(ifft(x)) + _assert_close_in_norm(x, y, self.rtol, size, self.rdt) + + def test_invalid_sizes(self): + assert_raises(ValueError, ifft, []) + assert_raises(ValueError, ifft, [[1,1],[2,2]], -5) + + +@pytest.mark.skipif(np.longdouble is np.float64, + reason="Long double is aliased to double") +class TestLongDoubleIFFT(_TestIFFTBase): + def setup_method(self): + self.cdt = np.clongdouble + self.rdt = np.longdouble + self.rtol = 1e-10 + self.atol = 1e-10 + + +class TestDoubleIFFT(_TestIFFTBase): + def setup_method(self): + self.cdt = np.complex128 + self.rdt = np.float64 + self.rtol = 1e-10 + self.atol = 1e-10 + + +class TestSingleIFFT(_TestIFFTBase): + def setup_method(self): + self.cdt = np.complex64 + self.rdt = np.float32 + self.rtol = 1e-5 + self.atol = 1e-4 + + +class _TestRFFTBase: + def setup_method(self): + np.random.seed(1234) + + def test_definition(self): + for t in [[1, 2, 3, 4, 1, 2, 3, 4], [1, 2, 3, 4, 1, 2, 3, 4, 5]]: + x = np.array(t, dtype=self.rdt) + y = rfft(x) + y1 = direct_rdft(x) + assert_array_almost_equal(y,y1) + assert_equal(y.dtype, self.cdt) + + def test_djbfft(self): + for i in range(2,14): + n = 2**i + x = np.arange(n) + y1 = np.fft.rfft(x) + y = rfft(x) + assert_array_almost_equal(y,y1) + + def test_invalid_sizes(self): + assert_raises(ValueError, rfft, []) + assert_raises(ValueError, rfft, [[1,1],[2,2]], -5) + + def test_complex_input(self): + x = np.zeros(10, dtype=self.cdt) + with assert_raises(TypeError, match="x must be a real sequence"): + rfft(x) + + # See gh-5790 + class MockSeries: + def __init__(self, data): + self.data = np.asarray(data) + + def __getattr__(self, item): + try: + return getattr(self.data, item) + except AttributeError as e: + raise AttributeError("'MockSeries' object " + f"has no attribute '{item}'") from e + + def test_non_ndarray_with_dtype(self): + x = np.array([1., 2., 3., 4., 5.]) + xs = _TestRFFTBase.MockSeries(x) + + expected = [1, 2, 3, 4, 5] + rfft(xs) + + # Data should not have been overwritten + assert_equal(x, expected) + assert_equal(xs.data, expected) + +@pytest.mark.skipif(np.longdouble is np.float64, + reason="Long double is aliased to double") +class TestRFFTLongDouble(_TestRFFTBase): + def setup_method(self): + self.cdt = np.clongdouble + self.rdt = np.longdouble + + +class TestRFFTDouble(_TestRFFTBase): + def setup_method(self): + self.cdt = np.complex128 + self.rdt = np.float64 + + +class TestRFFTSingle(_TestRFFTBase): + def setup_method(self): + self.cdt = np.complex64 + self.rdt = np.float32 + + +class _TestIRFFTBase: + def setup_method(self): + np.random.seed(1234) + + def test_definition(self): + x1 = [1,2+3j,4+1j,1+2j,3+4j] + x1_1 = [1,2+3j,4+1j,2+3j,4,2-3j,4-1j,2-3j] + x1 = x1_1[:5] + x2_1 = [1,2+3j,4+1j,2+3j,4+5j,4-5j,2-3j,4-1j,2-3j] + x2 = x2_1[:5] + + def _test(x, xr): + y = irfft(np.array(x, dtype=self.cdt), n=len(xr)) + y1 = direct_irdft(x, len(xr)) + assert_equal(y.dtype, self.rdt) + assert_array_almost_equal(y,y1, decimal=self.ndec) + assert_array_almost_equal(y,ifft(xr), decimal=self.ndec) + + _test(x1, x1_1) + _test(x2, x2_1) + + def test_djbfft(self): + for i in range(2,14): + n = 2**i + x = np.arange(-1, n, 2) + 1j * np.arange(0, n+1, 2) + x[0] = 0 + if n % 2 == 0: + x[-1] = np.real(x[-1]) + y1 = np.fft.irfft(x) + y = irfft(x) + assert_array_almost_equal(y,y1) + + def test_random_real(self): + for size in [1,51,111,100,200,64,128,256,1024]: + x = random([size]).astype(self.rdt) + y1 = irfft(rfft(x), n=size) + y2 = rfft(irfft(x, n=(size*2-1))) + assert_equal(y1.dtype, self.rdt) + assert_equal(y2.dtype, self.cdt) + assert_array_almost_equal(y1, x, decimal=self.ndec, + err_msg="size=%d" % size) + assert_array_almost_equal(y2, x, decimal=self.ndec, + err_msg="size=%d" % size) + + def test_size_accuracy(self): + # Sanity check for the accuracy for prime and non-prime sized inputs + if self.rdt == np.float32: + rtol = 1e-5 + elif self.rdt == np.float64: + rtol = 1e-10 + + for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES: + np.random.seed(1234) + x = np.random.rand(size).astype(self.rdt) + y = irfft(rfft(x), len(x)) + _assert_close_in_norm(x, y, rtol, size, self.rdt) + y = rfft(irfft(x, 2 * len(x) - 1)) + _assert_close_in_norm(x, y, rtol, size, self.rdt) + + def test_invalid_sizes(self): + assert_raises(ValueError, irfft, []) + assert_raises(ValueError, irfft, [[1,1],[2,2]], -5) + + +# self.ndec is bogus; we should have a assert_array_approx_equal for number of +# significant digits + +@pytest.mark.skipif(np.longdouble is np.float64, + reason="Long double is aliased to double") +class TestIRFFTLongDouble(_TestIRFFTBase): + def setup_method(self): + self.cdt = np.complex128 + self.rdt = np.float64 + self.ndec = 14 + + +class TestIRFFTDouble(_TestIRFFTBase): + def setup_method(self): + self.cdt = np.complex128 + self.rdt = np.float64 + self.ndec = 14 + + +class TestIRFFTSingle(_TestIRFFTBase): + def setup_method(self): + self.cdt = np.complex64 + self.rdt = np.float32 + self.ndec = 5 + + +class TestFftnSingle: + def setup_method(self): + np.random.seed(1234) + + def test_definition(self): + x = [[1, 2, 3], + [4, 5, 6], + [7, 8, 9]] + y = fftn(np.array(x, np.float32)) + assert_(y.dtype == np.complex64, + msg="double precision output with single precision") + + y_r = np.array(fftn(x), np.complex64) + assert_array_almost_equal_nulp(y, y_r) + + @pytest.mark.parametrize('size', SMALL_COMPOSITE_SIZES + SMALL_PRIME_SIZES) + def test_size_accuracy_small(self, size): + x = np.random.rand(size, size) + 1j*np.random.rand(size, size) + y1 = fftn(x.real.astype(np.float32)) + y2 = fftn(x.real.astype(np.float64)).astype(np.complex64) + + assert_equal(y1.dtype, np.complex64) + assert_array_almost_equal_nulp(y1, y2, 2000) + + @pytest.mark.parametrize('size', LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES) + def test_size_accuracy_large(self, size): + x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3) + y1 = fftn(x.real.astype(np.float32)) + y2 = fftn(x.real.astype(np.float64)).astype(np.complex64) + + assert_equal(y1.dtype, np.complex64) + assert_array_almost_equal_nulp(y1, y2, 2000) + + def test_definition_float16(self): + x = [[1, 2, 3], + [4, 5, 6], + [7, 8, 9]] + y = fftn(np.array(x, np.float16)) + assert_equal(y.dtype, np.complex64) + y_r = np.array(fftn(x), np.complex64) + assert_array_almost_equal_nulp(y, y_r) + + @pytest.mark.parametrize('size', SMALL_COMPOSITE_SIZES + SMALL_PRIME_SIZES) + def test_float16_input_small(self, size): + x = np.random.rand(size, size) + 1j*np.random.rand(size, size) + y1 = fftn(x.real.astype(np.float16)) + y2 = fftn(x.real.astype(np.float64)).astype(np.complex64) + + assert_equal(y1.dtype, np.complex64) + assert_array_almost_equal_nulp(y1, y2, 5e5) + + @pytest.mark.parametrize('size', LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES) + def test_float16_input_large(self, size): + x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3) + y1 = fftn(x.real.astype(np.float16)) + y2 = fftn(x.real.astype(np.float64)).astype(np.complex64) + + assert_equal(y1.dtype, np.complex64) + assert_array_almost_equal_nulp(y1, y2, 2e6) + + +class TestFftn: + def setup_method(self): + np.random.seed(1234) + + def test_definition(self): + x = [[1, 2, 3], + [4, 5, 6], + [7, 8, 9]] + y = fftn(x) + assert_array_almost_equal(y, direct_dftn(x)) + + x = random((20, 26)) + assert_array_almost_equal(fftn(x), direct_dftn(x)) + + x = random((5, 4, 3, 20)) + assert_array_almost_equal(fftn(x), direct_dftn(x)) + + def test_axes_argument(self): + # plane == ji_plane, x== kji_space + plane1 = [[1, 2, 3], + [4, 5, 6], + [7, 8, 9]] + plane2 = [[10, 11, 12], + [13, 14, 15], + [16, 17, 18]] + plane3 = [[19, 20, 21], + [22, 23, 24], + [25, 26, 27]] + ki_plane1 = [[1, 2, 3], + [10, 11, 12], + [19, 20, 21]] + ki_plane2 = [[4, 5, 6], + [13, 14, 15], + [22, 23, 24]] + ki_plane3 = [[7, 8, 9], + [16, 17, 18], + [25, 26, 27]] + jk_plane1 = [[1, 10, 19], + [4, 13, 22], + [7, 16, 25]] + jk_plane2 = [[2, 11, 20], + [5, 14, 23], + [8, 17, 26]] + jk_plane3 = [[3, 12, 21], + [6, 15, 24], + [9, 18, 27]] + kj_plane1 = [[1, 4, 7], + [10, 13, 16], [19, 22, 25]] + kj_plane2 = [[2, 5, 8], + [11, 14, 17], [20, 23, 26]] + kj_plane3 = [[3, 6, 9], + [12, 15, 18], [21, 24, 27]] + ij_plane1 = [[1, 4, 7], + [2, 5, 8], + [3, 6, 9]] + ij_plane2 = [[10, 13, 16], + [11, 14, 17], + [12, 15, 18]] + ij_plane3 = [[19, 22, 25], + [20, 23, 26], + [21, 24, 27]] + ik_plane1 = [[1, 10, 19], + [2, 11, 20], + [3, 12, 21]] + ik_plane2 = [[4, 13, 22], + [5, 14, 23], + [6, 15, 24]] + ik_plane3 = [[7, 16, 25], + [8, 17, 26], + [9, 18, 27]] + ijk_space = [jk_plane1, jk_plane2, jk_plane3] + ikj_space = [kj_plane1, kj_plane2, kj_plane3] + jik_space = [ik_plane1, ik_plane2, ik_plane3] + jki_space = [ki_plane1, ki_plane2, ki_plane3] + kij_space = [ij_plane1, ij_plane2, ij_plane3] + x = array([plane1, plane2, plane3]) + + assert_array_almost_equal(fftn(x), + fftn(x, axes=(-3, -2, -1))) # kji_space + assert_array_almost_equal(fftn(x), fftn(x, axes=(0, 1, 2))) + assert_array_almost_equal(fftn(x, axes=(0, 2)), fftn(x, axes=(0, -1))) + y = fftn(x, axes=(2, 1, 0)) # ijk_space + assert_array_almost_equal(swapaxes(y, -1, -3), fftn(ijk_space)) + y = fftn(x, axes=(2, 0, 1)) # ikj_space + assert_array_almost_equal(swapaxes(swapaxes(y, -1, -3), -1, -2), + fftn(ikj_space)) + y = fftn(x, axes=(1, 2, 0)) # jik_space + assert_array_almost_equal(swapaxes(swapaxes(y, -1, -3), -3, -2), + fftn(jik_space)) + y = fftn(x, axes=(1, 0, 2)) # jki_space + assert_array_almost_equal(swapaxes(y, -2, -3), fftn(jki_space)) + y = fftn(x, axes=(0, 2, 1)) # kij_space + assert_array_almost_equal(swapaxes(y, -2, -1), fftn(kij_space)) + + y = fftn(x, axes=(-2, -1)) # ji_plane + assert_array_almost_equal(fftn(plane1), y[0]) + assert_array_almost_equal(fftn(plane2), y[1]) + assert_array_almost_equal(fftn(plane3), y[2]) + + y = fftn(x, axes=(1, 2)) # ji_plane + assert_array_almost_equal(fftn(plane1), y[0]) + assert_array_almost_equal(fftn(plane2), y[1]) + assert_array_almost_equal(fftn(plane3), y[2]) + + y = fftn(x, axes=(-3, -2)) # kj_plane + assert_array_almost_equal(fftn(x[:, :, 0]), y[:, :, 0]) + assert_array_almost_equal(fftn(x[:, :, 1]), y[:, :, 1]) + assert_array_almost_equal(fftn(x[:, :, 2]), y[:, :, 2]) + + y = fftn(x, axes=(-3, -1)) # ki_plane + assert_array_almost_equal(fftn(x[:, 0, :]), y[:, 0, :]) + assert_array_almost_equal(fftn(x[:, 1, :]), y[:, 1, :]) + assert_array_almost_equal(fftn(x[:, 2, :]), y[:, 2, :]) + + y = fftn(x, axes=(-1, -2)) # ij_plane + assert_array_almost_equal(fftn(ij_plane1), swapaxes(y[0], -2, -1)) + assert_array_almost_equal(fftn(ij_plane2), swapaxes(y[1], -2, -1)) + assert_array_almost_equal(fftn(ij_plane3), swapaxes(y[2], -2, -1)) + + y = fftn(x, axes=(-1, -3)) # ik_plane + assert_array_almost_equal(fftn(ik_plane1), + swapaxes(y[:, 0, :], -1, -2)) + assert_array_almost_equal(fftn(ik_plane2), + swapaxes(y[:, 1, :], -1, -2)) + assert_array_almost_equal(fftn(ik_plane3), + swapaxes(y[:, 2, :], -1, -2)) + + y = fftn(x, axes=(-2, -3)) # jk_plane + assert_array_almost_equal(fftn(jk_plane1), + swapaxes(y[:, :, 0], -1, -2)) + assert_array_almost_equal(fftn(jk_plane2), + swapaxes(y[:, :, 1], -1, -2)) + assert_array_almost_equal(fftn(jk_plane3), + swapaxes(y[:, :, 2], -1, -2)) + + y = fftn(x, axes=(-1,)) # i_line + for i in range(3): + for j in range(3): + assert_array_almost_equal(fft(x[i, j, :]), y[i, j, :]) + y = fftn(x, axes=(-2,)) # j_line + for i in range(3): + for j in range(3): + assert_array_almost_equal(fft(x[i, :, j]), y[i, :, j]) + y = fftn(x, axes=(0,)) # k_line + for i in range(3): + for j in range(3): + assert_array_almost_equal(fft(x[:, i, j]), y[:, i, j]) + + y = fftn(x, axes=()) # point + assert_array_almost_equal(y, x) + + def test_shape_argument(self): + small_x = [[1, 2, 3], + [4, 5, 6]] + large_x1 = [[1, 2, 3, 0], + [4, 5, 6, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]] + + y = fftn(small_x, s=(4, 4)) + assert_array_almost_equal(y, fftn(large_x1)) + + y = fftn(small_x, s=(3, 4)) + assert_array_almost_equal(y, fftn(large_x1[:-1])) + + def test_shape_axes_argument(self): + small_x = [[1, 2, 3], + [4, 5, 6], + [7, 8, 9]] + large_x1 = array([[1, 2, 3, 0], + [4, 5, 6, 0], + [7, 8, 9, 0], + [0, 0, 0, 0]]) + y = fftn(small_x, s=(4, 4), axes=(-2, -1)) + assert_array_almost_equal(y, fftn(large_x1)) + y = fftn(small_x, s=(4, 4), axes=(-1, -2)) + + assert_array_almost_equal(y, swapaxes( + fftn(swapaxes(large_x1, -1, -2)), -1, -2)) + + def test_shape_axes_argument2(self): + # Change shape of the last axis + x = numpy.random.random((10, 5, 3, 7)) + y = fftn(x, axes=(-1,), s=(8,)) + assert_array_almost_equal(y, fft(x, axis=-1, n=8)) + + # Change shape of an arbitrary axis which is not the last one + x = numpy.random.random((10, 5, 3, 7)) + y = fftn(x, axes=(-2,), s=(8,)) + assert_array_almost_equal(y, fft(x, axis=-2, n=8)) + + # Change shape of axes: cf #244, where shape and axes were mixed up + x = numpy.random.random((4, 4, 2)) + y = fftn(x, axes=(-3, -2), s=(8, 8)) + assert_array_almost_equal(y, + numpy.fft.fftn(x, axes=(-3, -2), s=(8, 8))) + + def test_shape_argument_more(self): + x = zeros((4, 4, 2)) + with assert_raises(ValueError, + match="shape requires more axes than are present"): + fftn(x, s=(8, 8, 2, 1)) + + def test_invalid_sizes(self): + with assert_raises(ValueError, + match="invalid number of data points" + r" \(\[1, 0\]\) specified"): + fftn([[]]) + + with assert_raises(ValueError, + match="invalid number of data points" + r" \(\[4, -3\]\) specified"): + fftn([[1, 1], [2, 2]], (4, -3)) + + def test_no_axes(self): + x = numpy.random.random((2,2,2)) + assert_allclose(fftn(x, axes=[]), x, atol=1e-7) + + def test_regression_244(self): + """FFT returns wrong result with axes parameter.""" + # fftn (and hence fft2) used to break when both axes and shape were used + x = numpy.ones((4, 4, 2)) + y = fftn(x, s=(8, 8), axes=(-3, -2)) + y_r = numpy.fft.fftn(x, s=(8, 8), axes=(-3, -2)) + assert_allclose(y, y_r) + + +class TestIfftn: + dtype = None + cdtype = None + + def setup_method(self): + np.random.seed(1234) + + @pytest.mark.parametrize('dtype,cdtype,maxnlp', + [(np.float64, np.complex128, 2000), + (np.float32, np.complex64, 3500)]) + def test_definition(self, dtype, cdtype, maxnlp): + x = np.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]], dtype=dtype) + y = ifftn(x) + assert_equal(y.dtype, cdtype) + assert_array_almost_equal_nulp(y, direct_idftn(x), maxnlp) + + x = random((20, 26)) + assert_array_almost_equal_nulp(ifftn(x), direct_idftn(x), maxnlp) + + x = random((5, 4, 3, 20)) + assert_array_almost_equal_nulp(ifftn(x), direct_idftn(x), maxnlp) + + @pytest.mark.parametrize('maxnlp', [2000, 3500]) + @pytest.mark.parametrize('size', [1, 2, 51, 32, 64, 92]) + def test_random_complex(self, maxnlp, size): + x = random([size, size]) + 1j*random([size, size]) + assert_array_almost_equal_nulp(ifftn(fftn(x)), x, maxnlp) + assert_array_almost_equal_nulp(fftn(ifftn(x)), x, maxnlp) + + def test_invalid_sizes(self): + with assert_raises(ValueError, + match="invalid number of data points" + r" \(\[1, 0\]\) specified"): + ifftn([[]]) + + with assert_raises(ValueError, + match="invalid number of data points" + r" \(\[4, -3\]\) specified"): + ifftn([[1, 1], [2, 2]], (4, -3)) + + def test_no_axes(self): + x = numpy.random.random((2,2,2)) + assert_allclose(ifftn(x, axes=[]), x, atol=1e-7) + +class TestRfftn: + dtype = None + cdtype = None + + def setup_method(self): + np.random.seed(1234) + + @pytest.mark.parametrize('dtype,cdtype,maxnlp', + [(np.float64, np.complex128, 2000), + (np.float32, np.complex64, 3500)]) + def test_definition(self, dtype, cdtype, maxnlp): + x = np.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]], dtype=dtype) + y = rfftn(x) + assert_equal(y.dtype, cdtype) + assert_array_almost_equal_nulp(y, direct_rdftn(x), maxnlp) + + x = random((20, 26)) + assert_array_almost_equal_nulp(rfftn(x), direct_rdftn(x), maxnlp) + + x = random((5, 4, 3, 20)) + assert_array_almost_equal_nulp(rfftn(x), direct_rdftn(x), maxnlp) + + @pytest.mark.parametrize('size', [1, 2, 51, 32, 64, 92]) + def test_random(self, size): + x = random([size, size]) + assert_allclose(irfftn(rfftn(x), x.shape), x, atol=1e-10) + + @pytest.mark.parametrize('func', [rfftn, irfftn]) + def test_invalid_sizes(self, func): + with assert_raises(ValueError, + match="invalid number of data points" + r" \(\[1, 0\]\) specified"): + func([[]]) + + with assert_raises(ValueError, + match="invalid number of data points" + r" \(\[4, -3\]\) specified"): + func([[1, 1], [2, 2]], (4, -3)) + + @pytest.mark.parametrize('func', [rfftn, irfftn]) + def test_no_axes(self, func): + with assert_raises(ValueError, + match="at least 1 axis must be transformed"): + func([], axes=[]) + + def test_complex_input(self): + with assert_raises(TypeError, match="x must be a real sequence"): + rfftn(np.zeros(10, dtype=np.complex64)) + + +class FakeArray: + def __init__(self, data): + self._data = data + self.__array_interface__ = data.__array_interface__ + + +class FakeArray2: + def __init__(self, data): + self._data = data + + def __array__(self, dtype=None, copy=None): + return self._data + +# TODO: Is this test actually valuable? The behavior it's testing shouldn't be +# relied upon by users except for overwrite_x = False +class TestOverwrite: + """Check input overwrite behavior of the FFT functions.""" + + real_dtypes = [np.float32, np.float64, np.longdouble] + dtypes = real_dtypes + [np.complex64, np.complex128, np.clongdouble] + fftsizes = [8, 16, 32] + + def _check(self, x, routine, fftsize, axis, overwrite_x, should_overwrite): + x2 = x.copy() + for fake in [lambda x: x, FakeArray, FakeArray2]: + routine(fake(x2), fftsize, axis, overwrite_x=overwrite_x) + + sig = "{}({}{!r}, {!r}, axis={!r}, overwrite_x={!r})".format( + routine.__name__, x.dtype, x.shape, fftsize, axis, overwrite_x) + if not should_overwrite: + assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig) + + def _check_1d(self, routine, dtype, shape, axis, overwritable_dtypes, + fftsize, overwrite_x): + np.random.seed(1234) + if np.issubdtype(dtype, np.complexfloating): + data = np.random.randn(*shape) + 1j*np.random.randn(*shape) + else: + data = np.random.randn(*shape) + data = data.astype(dtype) + + should_overwrite = (overwrite_x + and dtype in overwritable_dtypes + and fftsize <= shape[axis]) + self._check(data, routine, fftsize, axis, + overwrite_x=overwrite_x, + should_overwrite=should_overwrite) + + @pytest.mark.parametrize('dtype', dtypes) + @pytest.mark.parametrize('fftsize', fftsizes) + @pytest.mark.parametrize('overwrite_x', [True, False]) + @pytest.mark.parametrize('shape,axes', [((16,), -1), + ((16, 2), 0), + ((2, 16), 1)]) + def test_fft_ifft(self, dtype, fftsize, overwrite_x, shape, axes): + overwritable = (np.clongdouble, np.complex128, np.complex64) + self._check_1d(fft, dtype, shape, axes, overwritable, + fftsize, overwrite_x) + self._check_1d(ifft, dtype, shape, axes, overwritable, + fftsize, overwrite_x) + + @pytest.mark.parametrize('dtype', real_dtypes) + @pytest.mark.parametrize('fftsize', fftsizes) + @pytest.mark.parametrize('overwrite_x', [True, False]) + @pytest.mark.parametrize('shape,axes', [((16,), -1), + ((16, 2), 0), + ((2, 16), 1)]) + def test_rfft_irfft(self, dtype, fftsize, overwrite_x, shape, axes): + overwritable = self.real_dtypes + self._check_1d(irfft, dtype, shape, axes, overwritable, + fftsize, overwrite_x) + self._check_1d(rfft, dtype, shape, axes, overwritable, + fftsize, overwrite_x) + + def _check_nd_one(self, routine, dtype, shape, axes, overwritable_dtypes, + overwrite_x): + np.random.seed(1234) + if np.issubdtype(dtype, np.complexfloating): + data = np.random.randn(*shape) + 1j*np.random.randn(*shape) + else: + data = np.random.randn(*shape) + data = data.astype(dtype) + + def fftshape_iter(shp): + if len(shp) <= 0: + yield () + else: + for j in (shp[0]//2, shp[0], shp[0]*2): + for rest in fftshape_iter(shp[1:]): + yield (j,) + rest + + def part_shape(shape, axes): + if axes is None: + return shape + else: + return tuple(np.take(shape, axes)) + + def should_overwrite(data, shape, axes): + s = part_shape(data.shape, axes) + return (overwrite_x and + np.prod(shape) <= np.prod(s) + and dtype in overwritable_dtypes) + + for fftshape in fftshape_iter(part_shape(shape, axes)): + self._check(data, routine, fftshape, axes, + overwrite_x=overwrite_x, + should_overwrite=should_overwrite(data, fftshape, axes)) + if data.ndim > 1: + # check fortran order + self._check(data.T, routine, fftshape, axes, + overwrite_x=overwrite_x, + should_overwrite=should_overwrite( + data.T, fftshape, axes)) + + @pytest.mark.parametrize('dtype', dtypes) + @pytest.mark.parametrize('overwrite_x', [True, False]) + @pytest.mark.parametrize('shape,axes', [((16,), None), + ((16,), (0,)), + ((16, 2), (0,)), + ((2, 16), (1,)), + ((8, 16), None), + ((8, 16), (0, 1)), + ((8, 16, 2), (0, 1)), + ((8, 16, 2), (1, 2)), + ((8, 16, 2), (0,)), + ((8, 16, 2), (1,)), + ((8, 16, 2), (2,)), + ((8, 16, 2), None), + ((8, 16, 2), (0, 1, 2))]) + def test_fftn_ifftn(self, dtype, overwrite_x, shape, axes): + overwritable = (np.clongdouble, np.complex128, np.complex64) + self._check_nd_one(fftn, dtype, shape, axes, overwritable, + overwrite_x) + self._check_nd_one(ifftn, dtype, shape, axes, overwritable, + overwrite_x) + + +@pytest.mark.parametrize('func', [fft, ifft, fftn, ifftn, + rfft, irfft, rfftn, irfftn]) +def test_invalid_norm(func): + x = np.arange(10, dtype=float) + with assert_raises(ValueError, + match='Invalid norm value \'o\', should be' + ' "backward", "ortho" or "forward"'): + func(x, norm='o') + + +@pytest.mark.parametrize('func', [fft, ifft, fftn, ifftn, + irfft, irfftn, hfft, hfftn]) +def test_swapped_byte_order_complex(func): + rng = np.random.RandomState(1234) + x = rng.rand(10) + 1j * rng.rand(10) + assert_allclose(func(swap_byteorder(x)), func(x)) + + +@pytest.mark.parametrize('func', [ihfft, ihfftn, rfft, rfftn]) +def test_swapped_byte_order_real(func): + rng = np.random.RandomState(1234) + x = rng.rand(10) + assert_allclose(func(swap_byteorder(x)), func(x)) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/tests/test_real_transforms.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/tests/test_real_transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..2cb47f40c6bc0a251a79bb3660fcc9a0f1b10725 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/tests/test_real_transforms.py @@ -0,0 +1,494 @@ +from os.path import join, dirname +from typing import Callable, Union + +import numpy as np +from numpy.testing import ( + assert_array_almost_equal, assert_equal, assert_allclose) +import pytest +from pytest import raises as assert_raises + +from scipy.fft._pocketfft.realtransforms import ( + dct, idct, dst, idst, dctn, idctn, dstn, idstn) + +fftpack_test_dir = join(dirname(__file__), '..', '..', '..', 'fftpack', 'tests') + +MDATA_COUNT = 8 +FFTWDATA_COUNT = 14 + +def is_longdouble_binary_compatible(): + try: + one = np.frombuffer( + b'\x00\x00\x00\x00\x00\x00\x00\x80\xff\x3f\x00\x00\x00\x00\x00\x00', + dtype=' decimal +dec_map: DecMapType = { + # DCT + (dct, np.float64, 1): 13, + (dct, np.float32, 1): 6, + + (dct, np.float64, 2): 14, + (dct, np.float32, 2): 5, + + (dct, np.float64, 3): 14, + (dct, np.float32, 3): 5, + + (dct, np.float64, 4): 13, + (dct, np.float32, 4): 6, + + # IDCT + (idct, np.float64, 1): 14, + (idct, np.float32, 1): 6, + + (idct, np.float64, 2): 14, + (idct, np.float32, 2): 5, + + (idct, np.float64, 3): 14, + (idct, np.float32, 3): 5, + + (idct, np.float64, 4): 14, + (idct, np.float32, 4): 6, + + # DST + (dst, np.float64, 1): 13, + (dst, np.float32, 1): 6, + + (dst, np.float64, 2): 14, + (dst, np.float32, 2): 6, + + (dst, np.float64, 3): 14, + (dst, np.float32, 3): 7, + + (dst, np.float64, 4): 13, + (dst, np.float32, 4): 5, + + # IDST + (idst, np.float64, 1): 14, + (idst, np.float32, 1): 6, + + (idst, np.float64, 2): 14, + (idst, np.float32, 2): 6, + + (idst, np.float64, 3): 14, + (idst, np.float32, 3): 6, + + (idst, np.float64, 4): 14, + (idst, np.float32, 4): 6, +} + +for k,v in dec_map.copy().items(): + if k[1] == np.float64: + dec_map[(k[0], np.longdouble, k[2])] = v + elif k[1] == np.float32: + dec_map[(k[0], int, k[2])] = v + + +@pytest.mark.parametrize('rdt', [np.longdouble, np.float64, np.float32, int]) +@pytest.mark.parametrize('type', [1, 2, 3, 4]) +class TestDCT: + def test_definition(self, rdt, type, fftwdata_size, reference_data): + x, yr, dt = fftw_dct_ref(type, fftwdata_size, rdt, reference_data) + y = dct(x, type=type) + assert_equal(y.dtype, dt) + dec = dec_map[(dct, rdt, type)] + assert_allclose(y, yr, rtol=0., atol=np.max(yr)*10**(-dec)) + + @pytest.mark.parametrize('size', [7, 8, 9, 16, 32, 64]) + def test_axis(self, rdt, type, size): + nt = 2 + dec = dec_map[(dct, rdt, type)] + x = np.random.randn(nt, size) + y = dct(x, type=type) + for j in range(nt): + assert_array_almost_equal(y[j], dct(x[j], type=type), + decimal=dec) + + x = x.T + y = dct(x, axis=0, type=type) + for j in range(nt): + assert_array_almost_equal(y[:,j], dct(x[:,j], type=type), + decimal=dec) + + +@pytest.mark.parametrize('rdt', [np.longdouble, np.float64, np.float32, int]) +def test_dct1_definition_ortho(rdt, mdata_x): + # Test orthornomal mode. + dec = dec_map[(dct, rdt, 1)] + x = np.array(mdata_x, dtype=rdt) + dt = np.result_type(np.float32, rdt) + y = dct(x, norm='ortho', type=1) + y2 = naive_dct1(x, norm='ortho') + assert_equal(y.dtype, dt) + assert_allclose(y, y2, rtol=0., atol=np.max(y2)*10**(-dec)) + + +@pytest.mark.parametrize('rdt', [np.longdouble, np.float64, np.float32, int]) +def test_dct2_definition_matlab(mdata_xy, rdt): + # Test correspondence with matlab (orthornomal mode). + dt = np.result_type(np.float32, rdt) + x = np.array(mdata_xy[0], dtype=dt) + + yr = mdata_xy[1] + y = dct(x, norm="ortho", type=2) + dec = dec_map[(dct, rdt, 2)] + assert_equal(y.dtype, dt) + assert_array_almost_equal(y, yr, decimal=dec) + + +@pytest.mark.parametrize('rdt', [np.longdouble, np.float64, np.float32, int]) +def test_dct3_definition_ortho(mdata_x, rdt): + # Test orthornomal mode. + x = np.array(mdata_x, dtype=rdt) + dt = np.result_type(np.float32, rdt) + y = dct(x, norm='ortho', type=2) + xi = dct(y, norm="ortho", type=3) + dec = dec_map[(dct, rdt, 3)] + assert_equal(xi.dtype, dt) + assert_array_almost_equal(xi, x, decimal=dec) + + +@pytest.mark.parametrize('rdt', [np.longdouble, np.float64, np.float32, int]) +def test_dct4_definition_ortho(mdata_x, rdt): + # Test orthornomal mode. + x = np.array(mdata_x, dtype=rdt) + dt = np.result_type(np.float32, rdt) + y = dct(x, norm='ortho', type=4) + y2 = naive_dct4(x, norm='ortho') + dec = dec_map[(dct, rdt, 4)] + assert_equal(y.dtype, dt) + assert_allclose(y, y2, rtol=0., atol=np.max(y2)*10**(-dec)) + + +@pytest.mark.parametrize('rdt', [np.longdouble, np.float64, np.float32, int]) +@pytest.mark.parametrize('type', [1, 2, 3, 4]) +def test_idct_definition(fftwdata_size, rdt, type, reference_data): + xr, yr, dt = fftw_dct_ref(type, fftwdata_size, rdt, reference_data) + x = idct(yr, type=type) + dec = dec_map[(idct, rdt, type)] + assert_equal(x.dtype, dt) + assert_allclose(x, xr, rtol=0., atol=np.max(xr)*10**(-dec)) + + +@pytest.mark.parametrize('rdt', [np.longdouble, np.float64, np.float32, int]) +@pytest.mark.parametrize('type', [1, 2, 3, 4]) +def test_definition(fftwdata_size, rdt, type, reference_data): + xr, yr, dt = fftw_dst_ref(type, fftwdata_size, rdt, reference_data) + y = dst(xr, type=type) + dec = dec_map[(dst, rdt, type)] + assert_equal(y.dtype, dt) + assert_allclose(y, yr, rtol=0., atol=np.max(yr)*10**(-dec)) + + +@pytest.mark.parametrize('rdt', [np.longdouble, np.float64, np.float32, int]) +def test_dst1_definition_ortho(rdt, mdata_x): + # Test orthornomal mode. + dec = dec_map[(dst, rdt, 1)] + x = np.array(mdata_x, dtype=rdt) + dt = np.result_type(np.float32, rdt) + y = dst(x, norm='ortho', type=1) + y2 = naive_dst1(x, norm='ortho') + assert_equal(y.dtype, dt) + assert_allclose(y, y2, rtol=0., atol=np.max(y2)*10**(-dec)) + + +@pytest.mark.parametrize('rdt', [np.longdouble, np.float64, np.float32, int]) +def test_dst4_definition_ortho(rdt, mdata_x): + # Test orthornomal mode. + dec = dec_map[(dst, rdt, 4)] + x = np.array(mdata_x, dtype=rdt) + dt = np.result_type(np.float32, rdt) + y = dst(x, norm='ortho', type=4) + y2 = naive_dst4(x, norm='ortho') + assert_equal(y.dtype, dt) + assert_array_almost_equal(y, y2, decimal=dec) + + +@pytest.mark.parametrize('rdt', [np.longdouble, np.float64, np.float32, int]) +@pytest.mark.parametrize('type', [1, 2, 3, 4]) +def test_idst_definition(fftwdata_size, rdt, type, reference_data): + xr, yr, dt = fftw_dst_ref(type, fftwdata_size, rdt, reference_data) + x = idst(yr, type=type) + dec = dec_map[(idst, rdt, type)] + assert_equal(x.dtype, dt) + assert_allclose(x, xr, rtol=0., atol=np.max(xr)*10**(-dec)) + + +@pytest.mark.parametrize('routine', [dct, dst, idct, idst]) +@pytest.mark.parametrize('dtype', [np.float32, np.float64, np.longdouble]) +@pytest.mark.parametrize('shape, axis', [ + ((16,), -1), ((16, 2), 0), ((2, 16), 1) +]) +@pytest.mark.parametrize('type', [1, 2, 3, 4]) +@pytest.mark.parametrize('overwrite_x', [True, False]) +@pytest.mark.parametrize('norm', [None, 'ortho']) +def test_overwrite(routine, dtype, shape, axis, type, norm, overwrite_x): + # Check input overwrite behavior + np.random.seed(1234) + if np.issubdtype(dtype, np.complexfloating): + x = np.random.randn(*shape) + 1j*np.random.randn(*shape) + else: + x = np.random.randn(*shape) + x = x.astype(dtype) + x2 = x.copy() + routine(x2, type, None, axis, norm, overwrite_x=overwrite_x) + + sig = "{}({}{!r}, {!r}, axis={!r}, overwrite_x={!r})".format( + routine.__name__, x.dtype, x.shape, None, axis, overwrite_x) + if not overwrite_x: + assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig) + + +class Test_DCTN_IDCTN: + dec = 14 + dct_type = [1, 2, 3, 4] + norms = [None, 'backward', 'ortho', 'forward'] + rstate = np.random.RandomState(1234) + shape = (32, 16) + data = rstate.randn(*shape) + + @pytest.mark.parametrize('fforward,finverse', [(dctn, idctn), + (dstn, idstn)]) + @pytest.mark.parametrize('axes', [None, + 1, (1,), [1], + 0, (0,), [0], + (0, 1), [0, 1], + (-2, -1), [-2, -1]]) + @pytest.mark.parametrize('dct_type', dct_type) + @pytest.mark.parametrize('norm', ['ortho']) + def test_axes_round_trip(self, fforward, finverse, axes, dct_type, norm): + tmp = fforward(self.data, type=dct_type, axes=axes, norm=norm) + tmp = finverse(tmp, type=dct_type, axes=axes, norm=norm) + assert_array_almost_equal(self.data, tmp, decimal=12) + + @pytest.mark.parametrize('funcn,func', [(dctn, dct), (dstn, dst)]) + @pytest.mark.parametrize('dct_type', dct_type) + @pytest.mark.parametrize('norm', norms) + def test_dctn_vs_2d_reference(self, funcn, func, dct_type, norm): + y1 = funcn(self.data, type=dct_type, axes=None, norm=norm) + y2 = ref_2d(func, self.data, type=dct_type, norm=norm) + assert_array_almost_equal(y1, y2, decimal=11) + + @pytest.mark.parametrize('funcn,func', [(idctn, idct), (idstn, idst)]) + @pytest.mark.parametrize('dct_type', dct_type) + @pytest.mark.parametrize('norm', norms) + def test_idctn_vs_2d_reference(self, funcn, func, dct_type, norm): + fdata = dctn(self.data, type=dct_type, norm=norm) + y1 = funcn(fdata, type=dct_type, norm=norm) + y2 = ref_2d(func, fdata, type=dct_type, norm=norm) + assert_array_almost_equal(y1, y2, decimal=11) + + @pytest.mark.parametrize('fforward,finverse', [(dctn, idctn), + (dstn, idstn)]) + def test_axes_and_shape(self, fforward, finverse): + with assert_raises(ValueError, + match="when given, axes and shape arguments" + " have to be of the same length"): + fforward(self.data, s=self.data.shape[0], axes=(0, 1)) + + with assert_raises(ValueError, + match="when given, axes and shape arguments" + " have to be of the same length"): + fforward(self.data, s=self.data.shape, axes=0) + + @pytest.mark.parametrize('fforward', [dctn, dstn]) + def test_shape(self, fforward): + tmp = fforward(self.data, s=(128, 128), axes=None) + assert_equal(tmp.shape, (128, 128)) + + @pytest.mark.parametrize('fforward,finverse', [(dctn, idctn), + (dstn, idstn)]) + @pytest.mark.parametrize('axes', [1, (1,), [1], + 0, (0,), [0]]) + def test_shape_is_none_with_axes(self, fforward, finverse, axes): + tmp = fforward(self.data, s=None, axes=axes, norm='ortho') + tmp = finverse(tmp, s=None, axes=axes, norm='ortho') + assert_array_almost_equal(self.data, tmp, decimal=self.dec) + + +@pytest.mark.parametrize('func', [dct, dctn, idct, idctn, + dst, dstn, idst, idstn]) +def test_swapped_byte_order(func): + rng = np.random.RandomState(1234) + x = rng.rand(10) + swapped_dt = x.dtype.newbyteorder('S') + assert_allclose(func(x.astype(swapped_dt)), func(x)) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_realtransforms.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_realtransforms.py new file mode 100644 index 0000000000000000000000000000000000000000..1c7a3d683dd78d3227a7de88f5c47569d2f4e17f --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_realtransforms.py @@ -0,0 +1,693 @@ +from ._basic import _dispatch +from scipy._lib.uarray import Dispatchable +import numpy as np + +__all__ = ['dct', 'idct', 'dst', 'idst', 'dctn', 'idctn', 'dstn', 'idstn'] + + +@_dispatch +def dctn(x, type=2, s=None, axes=None, norm=None, overwrite_x=False, + workers=None, *, orthogonalize=None): + """ + Return multidimensional Discrete Cosine Transform along the specified axes. + + Parameters + ---------- + x : array_like + The input array. + type : {1, 2, 3, 4}, optional + Type of the DCT (see Notes). Default type is 2. + s : int or array_like of ints or None, optional + The shape of the result. If both `s` and `axes` (see below) are None, + `s` is ``x.shape``; if `s` is None but `axes` is not None, then `s` is + ``numpy.take(x.shape, axes, axis=0)``. + If ``s[i] > x.shape[i]``, the ith dimension of the input is padded with zeros. + If ``s[i] < x.shape[i]``, the ith dimension of the input is truncated to length + ``s[i]``. + If any element of `s` is -1, the size of the corresponding dimension of + `x` is used. + axes : int or array_like of ints or None, optional + Axes over which the DCT is computed. If not given, the last ``len(s)`` + axes are used, or all axes if `s` is also not specified. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see Notes). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + orthogonalize : bool, optional + Whether to use the orthogonalized DCT variant (see Notes). + Defaults to ``True`` when ``norm="ortho"`` and ``False`` otherwise. + + .. versionadded:: 1.8.0 + + Returns + ------- + y : ndarray of real + The transformed input array. + + See Also + -------- + idctn : Inverse multidimensional DCT + + Notes + ----- + For full details of the DCT types and normalization modes, as well as + references, see `dct`. + + Examples + -------- + >>> import numpy as np + >>> from scipy.fft import dctn, idctn + >>> rng = np.random.default_rng() + >>> y = rng.standard_normal((16, 16)) + >>> np.allclose(y, idctn(dctn(y))) + True + + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def idctn(x, type=2, s=None, axes=None, norm=None, overwrite_x=False, + workers=None, orthogonalize=None): + """ + Return multidimensional Inverse Discrete Cosine Transform along the specified axes. + + Parameters + ---------- + x : array_like + The input array. + type : {1, 2, 3, 4}, optional + Type of the DCT (see Notes). Default type is 2. + s : int or array_like of ints or None, optional + The shape of the result. If both `s` and `axes` (see below) are + None, `s` is ``x.shape``; if `s` is None but `axes` is + not None, then `s` is ``numpy.take(x.shape, axes, axis=0)``. + If ``s[i] > x.shape[i]``, the ith dimension of the input is padded with zeros. + If ``s[i] < x.shape[i]``, the ith dimension of the input is truncated to length + ``s[i]``. + If any element of `s` is -1, the size of the corresponding dimension of + `x` is used. + axes : int or array_like of ints or None, optional + Axes over which the IDCT is computed. If not given, the last ``len(s)`` + axes are used, or all axes if `s` is also not specified. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see Notes). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + orthogonalize : bool, optional + Whether to use the orthogonalized IDCT variant (see Notes). + Defaults to ``True`` when ``norm="ortho"`` and ``False`` otherwise. + + .. versionadded:: 1.8.0 + + Returns + ------- + y : ndarray of real + The transformed input array. + + See Also + -------- + dctn : multidimensional DCT + + Notes + ----- + For full details of the IDCT types and normalization modes, as well as + references, see `idct`. + + Examples + -------- + >>> import numpy as np + >>> from scipy.fft import dctn, idctn + >>> rng = np.random.default_rng() + >>> y = rng.standard_normal((16, 16)) + >>> np.allclose(y, idctn(dctn(y))) + True + + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def dstn(x, type=2, s=None, axes=None, norm=None, overwrite_x=False, + workers=None, orthogonalize=None): + """ + Return multidimensional Discrete Sine Transform along the specified axes. + + Parameters + ---------- + x : array_like + The input array. + type : {1, 2, 3, 4}, optional + Type of the DST (see Notes). Default type is 2. + s : int or array_like of ints or None, optional + The shape of the result. If both `s` and `axes` (see below) are None, + `s` is ``x.shape``; if `s` is None but `axes` is not None, then `s` is + ``numpy.take(x.shape, axes, axis=0)``. + If ``s[i] > x.shape[i]``, the ith dimension of the input is padded with zeros. + If ``s[i] < x.shape[i]``, the ith dimension of the input is truncated to length + ``s[i]``. + If any element of `shape` is -1, the size of the corresponding dimension + of `x` is used. + axes : int or array_like of ints or None, optional + Axes over which the DST is computed. If not given, the last ``len(s)`` + axes are used, or all axes if `s` is also not specified. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see Notes). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + orthogonalize : bool, optional + Whether to use the orthogonalized DST variant (see Notes). + Defaults to ``True`` when ``norm="ortho"`` and ``False`` otherwise. + + .. versionadded:: 1.8.0 + + Returns + ------- + y : ndarray of real + The transformed input array. + + See Also + -------- + idstn : Inverse multidimensional DST + + Notes + ----- + For full details of the DST types and normalization modes, as well as + references, see `dst`. + + Examples + -------- + >>> import numpy as np + >>> from scipy.fft import dstn, idstn + >>> rng = np.random.default_rng() + >>> y = rng.standard_normal((16, 16)) + >>> np.allclose(y, idstn(dstn(y))) + True + + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def idstn(x, type=2, s=None, axes=None, norm=None, overwrite_x=False, + workers=None, orthogonalize=None): + """ + Return multidimensional Inverse Discrete Sine Transform along the specified axes. + + Parameters + ---------- + x : array_like + The input array. + type : {1, 2, 3, 4}, optional + Type of the DST (see Notes). Default type is 2. + s : int or array_like of ints or None, optional + The shape of the result. If both `s` and `axes` (see below) are None, + `s` is ``x.shape``; if `s` is None but `axes` is not None, then `s` is + ``numpy.take(x.shape, axes, axis=0)``. + If ``s[i] > x.shape[i]``, the ith dimension of the input is padded with zeros. + If ``s[i] < x.shape[i]``, the ith dimension of the input is truncated to length + ``s[i]``. + If any element of `s` is -1, the size of the corresponding dimension of + `x` is used. + axes : int or array_like of ints or None, optional + Axes over which the IDST is computed. If not given, the last ``len(s)`` + axes are used, or all axes if `s` is also not specified. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see Notes). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + orthogonalize : bool, optional + Whether to use the orthogonalized IDST variant (see Notes). + Defaults to ``True`` when ``norm="ortho"`` and ``False`` otherwise. + + .. versionadded:: 1.8.0 + + Returns + ------- + y : ndarray of real + The transformed input array. + + See Also + -------- + dstn : multidimensional DST + + Notes + ----- + For full details of the IDST types and normalization modes, as well as + references, see `idst`. + + Examples + -------- + >>> import numpy as np + >>> from scipy.fft import dstn, idstn + >>> rng = np.random.default_rng() + >>> y = rng.standard_normal((16, 16)) + >>> np.allclose(y, idstn(dstn(y))) + True + + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def dct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, + orthogonalize=None): + r"""Return the Discrete Cosine Transform of arbitrary type sequence x. + + Parameters + ---------- + x : array_like + The input array. + type : {1, 2, 3, 4}, optional + Type of the DCT (see Notes). Default type is 2. + n : int, optional + Length of the transform. If ``n < x.shape[axis]``, `x` is + truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The + default results in ``n = x.shape[axis]``. + axis : int, optional + Axis along which the dct is computed; the default is over the + last axis (i.e., ``axis=-1``). + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see Notes). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + orthogonalize : bool, optional + Whether to use the orthogonalized DCT variant (see Notes). + Defaults to ``True`` when ``norm="ortho"`` and ``False`` otherwise. + + .. versionadded:: 1.8.0 + + Returns + ------- + y : ndarray of real + The transformed input array. + + See Also + -------- + idct : Inverse DCT + + Notes + ----- + For a single dimension array ``x``, ``dct(x, norm='ortho')`` is equal to + MATLAB ``dct(x)``. + + .. warning:: For ``type in {1, 2, 3}``, ``norm="ortho"`` breaks the direct + correspondence with the direct Fourier transform. To recover + it you must specify ``orthogonalize=False``. + + For ``norm="ortho"`` both the `dct` and `idct` are scaled by the same + overall factor in both directions. By default, the transform is also + orthogonalized which for types 1, 2 and 3 means the transform definition is + modified to give orthogonality of the DCT matrix (see below). + + For ``norm="backward"``, there is no scaling on `dct` and the `idct` is + scaled by ``1/N`` where ``N`` is the "logical" size of the DCT. For + ``norm="forward"`` the ``1/N`` normalization is applied to the forward + `dct` instead and the `idct` is unnormalized. + + There are, theoretically, 8 types of the DCT, only the first 4 types are + implemented in SciPy.'The' DCT generally refers to DCT type 2, and 'the' + Inverse DCT generally refers to DCT type 3. + + **Type I** + + There are several definitions of the DCT-I; we use the following + (for ``norm="backward"``) + + .. math:: + + y_k = x_0 + (-1)^k x_{N-1} + 2 \sum_{n=1}^{N-2} x_n \cos\left( + \frac{\pi k n}{N-1} \right) + + If ``orthogonalize=True``, ``x[0]`` and ``x[N-1]`` are multiplied by a + scaling factor of :math:`\sqrt{2}`, and ``y[0]`` and ``y[N-1]`` are divided + by :math:`\sqrt{2}`. When combined with ``norm="ortho"``, this makes the + corresponding matrix of coefficients orthonormal (``O @ O.T = np.eye(N)``). + + .. note:: + The DCT-I is only supported for input size > 1. + + **Type II** + + There are several definitions of the DCT-II; we use the following + (for ``norm="backward"``) + + .. math:: + + y_k = 2 \sum_{n=0}^{N-1} x_n \cos\left(\frac{\pi k(2n+1)}{2N} \right) + + If ``orthogonalize=True``, ``y[0]`` is divided by :math:`\sqrt{2}` which, + when combined with ``norm="ortho"``, makes the corresponding matrix of + coefficients orthonormal (``O @ O.T = np.eye(N)``). + + **Type III** + + There are several definitions, we use the following (for + ``norm="backward"``) + + .. math:: + + y_k = x_0 + 2 \sum_{n=1}^{N-1} x_n \cos\left(\frac{\pi(2k+1)n}{2N}\right) + + If ``orthogonalize=True``, ``x[0]`` terms are multiplied by + :math:`\sqrt{2}` which, when combined with ``norm="ortho"``, makes the + corresponding matrix of coefficients orthonormal (``O @ O.T = np.eye(N)``). + + The (unnormalized) DCT-III is the inverse of the (unnormalized) DCT-II, up + to a factor `2N`. The orthonormalized DCT-III is exactly the inverse of + the orthonormalized DCT-II. + + **Type IV** + + There are several definitions of the DCT-IV; we use the following + (for ``norm="backward"``) + + .. math:: + + y_k = 2 \sum_{n=0}^{N-1} x_n \cos\left(\frac{\pi(2k+1)(2n+1)}{4N} \right) + + ``orthogonalize`` has no effect here, as the DCT-IV matrix is already + orthogonal up to a scale factor of ``2N``. + + References + ---------- + .. [1] 'A Fast Cosine Transform in One and Two Dimensions', by J. + Makhoul, `IEEE Transactions on acoustics, speech and signal + processing` vol. 28(1), pp. 27-34, + :doi:`10.1109/TASSP.1980.1163351` (1980). + .. [2] Wikipedia, "Discrete cosine transform", + https://en.wikipedia.org/wiki/Discrete_cosine_transform + + Examples + -------- + The Type 1 DCT is equivalent to the FFT (though faster) for real, + even-symmetrical inputs. The output is also real and even-symmetrical. + Half of the FFT input is used to generate half of the FFT output: + + >>> from scipy.fft import fft, dct + >>> import numpy as np + >>> fft(np.array([4., 3., 5., 10., 5., 3.])).real + array([ 30., -8., 6., -2., 6., -8.]) + >>> dct(np.array([4., 3., 5., 10.]), 1) + array([ 30., -8., 6., -2.]) + + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def idct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False, + workers=None, orthogonalize=None): + """ + Return the Inverse Discrete Cosine Transform of an arbitrary type sequence. + + Parameters + ---------- + x : array_like + The input array. + type : {1, 2, 3, 4}, optional + Type of the DCT (see Notes). Default type is 2. + n : int, optional + Length of the transform. If ``n < x.shape[axis]``, `x` is + truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The + default results in ``n = x.shape[axis]``. + axis : int, optional + Axis along which the idct is computed; the default is over the + last axis (i.e., ``axis=-1``). + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see Notes). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + orthogonalize : bool, optional + Whether to use the orthogonalized IDCT variant (see Notes). + Defaults to ``True`` when ``norm="ortho"`` and ``False`` otherwise. + + .. versionadded:: 1.8.0 + + Returns + ------- + idct : ndarray of real + The transformed input array. + + See Also + -------- + dct : Forward DCT + + Notes + ----- + For a single dimension array `x`, ``idct(x, norm='ortho')`` is equal to + MATLAB ``idct(x)``. + + .. warning:: For ``type in {1, 2, 3}``, ``norm="ortho"`` breaks the direct + correspondence with the inverse direct Fourier transform. To + recover it you must specify ``orthogonalize=False``. + + For ``norm="ortho"`` both the `dct` and `idct` are scaled by the same + overall factor in both directions. By default, the transform is also + orthogonalized which for types 1, 2 and 3 means the transform definition is + modified to give orthogonality of the IDCT matrix (see `dct` for the full + definitions). + + 'The' IDCT is the IDCT-II, which is the same as the normalized DCT-III. + + The IDCT is equivalent to a normal DCT except for the normalization and + type. DCT type 1 and 4 are their own inverse and DCTs 2 and 3 are each + other's inverses. + + Examples + -------- + The Type 1 DCT is equivalent to the DFT for real, even-symmetrical + inputs. The output is also real and even-symmetrical. Half of the IFFT + input is used to generate half of the IFFT output: + + >>> from scipy.fft import ifft, idct + >>> import numpy as np + >>> ifft(np.array([ 30., -8., 6., -2., 6., -8.])).real + array([ 4., 3., 5., 10., 5., 3.]) + >>> idct(np.array([ 30., -8., 6., -2.]), 1) + array([ 4., 3., 5., 10.]) + + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def dst(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, + orthogonalize=None): + r""" + Return the Discrete Sine Transform of arbitrary type sequence x. + + Parameters + ---------- + x : array_like + The input array. + type : {1, 2, 3, 4}, optional + Type of the DST (see Notes). Default type is 2. + n : int, optional + Length of the transform. If ``n < x.shape[axis]``, `x` is + truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The + default results in ``n = x.shape[axis]``. + axis : int, optional + Axis along which the dst is computed; the default is over the + last axis (i.e., ``axis=-1``). + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see Notes). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + orthogonalize : bool, optional + Whether to use the orthogonalized DST variant (see Notes). + Defaults to ``True`` when ``norm="ortho"`` and ``False`` otherwise. + + .. versionadded:: 1.8.0 + + Returns + ------- + dst : ndarray of reals + The transformed input array. + + See Also + -------- + idst : Inverse DST + + Notes + ----- + .. warning:: For ``type in {2, 3}``, ``norm="ortho"`` breaks the direct + correspondence with the direct Fourier transform. To recover + it you must specify ``orthogonalize=False``. + + For ``norm="ortho"`` both the `dst` and `idst` are scaled by the same + overall factor in both directions. By default, the transform is also + orthogonalized which for types 2 and 3 means the transform definition is + modified to give orthogonality of the DST matrix (see below). + + For ``norm="backward"``, there is no scaling on the `dst` and the `idst` is + scaled by ``1/N`` where ``N`` is the "logical" size of the DST. + + There are, theoretically, 8 types of the DST for different combinations of + even/odd boundary conditions and boundary off sets [1]_, only the first + 4 types are implemented in SciPy. + + **Type I** + + There are several definitions of the DST-I; we use the following for + ``norm="backward"``. DST-I assumes the input is odd around :math:`n=-1` and + :math:`n=N`. + + .. math:: + + y_k = 2 \sum_{n=0}^{N-1} x_n \sin\left(\frac{\pi(k+1)(n+1)}{N+1}\right) + + Note that the DST-I is only supported for input size > 1. + The (unnormalized) DST-I is its own inverse, up to a factor :math:`2(N+1)`. + The orthonormalized DST-I is exactly its own inverse. + + ``orthogonalize`` has no effect here, as the DST-I matrix is already + orthogonal up to a scale factor of ``2N``. + + **Type II** + + There are several definitions of the DST-II; we use the following for + ``norm="backward"``. DST-II assumes the input is odd around :math:`n=-1/2` and + :math:`n=N-1/2`; the output is odd around :math:`k=-1` and even around :math:`k=N-1` + + .. math:: + + y_k = 2 \sum_{n=0}^{N-1} x_n \sin\left(\frac{\pi(k+1)(2n+1)}{2N}\right) + + If ``orthogonalize=True``, ``y[-1]`` is divided :math:`\sqrt{2}` which, when + combined with ``norm="ortho"``, makes the corresponding matrix of + coefficients orthonormal (``O @ O.T = np.eye(N)``). + + **Type III** + + There are several definitions of the DST-III, we use the following (for + ``norm="backward"``). DST-III assumes the input is odd around :math:`n=-1` and + even around :math:`n=N-1` + + .. math:: + + y_k = (-1)^k x_{N-1} + 2 \sum_{n=0}^{N-2} x_n \sin\left( + \frac{\pi(2k+1)(n+1)}{2N}\right) + + If ``orthogonalize=True``, ``x[-1]`` is multiplied by :math:`\sqrt{2}` + which, when combined with ``norm="ortho"``, makes the corresponding matrix + of coefficients orthonormal (``O @ O.T = np.eye(N)``). + + The (unnormalized) DST-III is the inverse of the (unnormalized) DST-II, up + to a factor :math:`2N`. The orthonormalized DST-III is exactly the inverse of the + orthonormalized DST-II. + + **Type IV** + + There are several definitions of the DST-IV, we use the following (for + ``norm="backward"``). DST-IV assumes the input is odd around :math:`n=-0.5` and + even around :math:`n=N-0.5` + + .. math:: + + y_k = 2 \sum_{n=0}^{N-1} x_n \sin\left(\frac{\pi(2k+1)(2n+1)}{4N}\right) + + ``orthogonalize`` has no effect here, as the DST-IV matrix is already + orthogonal up to a scale factor of ``2N``. + + The (unnormalized) DST-IV is its own inverse, up to a factor :math:`2N`. The + orthonormalized DST-IV is exactly its own inverse. + + References + ---------- + .. [1] Wikipedia, "Discrete sine transform", + https://en.wikipedia.org/wiki/Discrete_sine_transform + + """ + return (Dispatchable(x, np.ndarray),) + + +@_dispatch +def idst(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False, + workers=None, orthogonalize=None): + """ + Return the Inverse Discrete Sine Transform of an arbitrary type sequence. + + Parameters + ---------- + x : array_like + The input array. + type : {1, 2, 3, 4}, optional + Type of the DST (see Notes). Default type is 2. + n : int, optional + Length of the transform. If ``n < x.shape[axis]``, `x` is + truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The + default results in ``n = x.shape[axis]``. + axis : int, optional + Axis along which the idst is computed; the default is over the + last axis (i.e., ``axis=-1``). + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see Notes). Default is "backward". + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + workers : int, optional + Maximum number of workers to use for parallel computation. If negative, + the value wraps around from ``os.cpu_count()``. + See :func:`~scipy.fft.fft` for more details. + orthogonalize : bool, optional + Whether to use the orthogonalized IDST variant (see Notes). + Defaults to ``True`` when ``norm="ortho"`` and ``False`` otherwise. + + .. versionadded:: 1.8.0 + + Returns + ------- + idst : ndarray of real + The transformed input array. + + See Also + -------- + dst : Forward DST + + Notes + ----- + .. warning:: For ``type in {2, 3}``, ``norm="ortho"`` breaks the direct + correspondence with the inverse direct Fourier transform. + + For ``norm="ortho"`` both the `dst` and `idst` are scaled by the same + overall factor in both directions. By default, the transform is also + orthogonalized which for types 2 and 3 means the transform definition is + modified to give orthogonality of the DST matrix (see `dst` for the full + definitions). + + 'The' IDST is the IDST-II, which is the same as the normalized DST-III. + + The IDST is equivalent to a normal DST except for the normalization and + type. DST type 1 and 4 are their own inverse and DSTs 2 and 3 are each + other's inverses. + + """ + return (Dispatchable(x, np.ndarray),) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_realtransforms_backend.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_realtransforms_backend.py new file mode 100644 index 0000000000000000000000000000000000000000..2042453733bec54860974cc1e20ba908e8c9b94d --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/_realtransforms_backend.py @@ -0,0 +1,63 @@ +from scipy._lib._array_api import array_namespace +import numpy as np +from . import _pocketfft + +__all__ = ['dct', 'idct', 'dst', 'idst', 'dctn', 'idctn', 'dstn', 'idstn'] + + +def _execute(pocketfft_func, x, type, s, axes, norm, + overwrite_x, workers, orthogonalize): + xp = array_namespace(x) + x = np.asarray(x) + y = pocketfft_func(x, type, s, axes, norm, + overwrite_x=overwrite_x, workers=workers, + orthogonalize=orthogonalize) + return xp.asarray(y) + + +def dctn(x, type=2, s=None, axes=None, norm=None, + overwrite_x=False, workers=None, *, orthogonalize=None): + return _execute(_pocketfft.dctn, x, type, s, axes, norm, + overwrite_x, workers, orthogonalize) + + +def idctn(x, type=2, s=None, axes=None, norm=None, + overwrite_x=False, workers=None, *, orthogonalize=None): + return _execute(_pocketfft.idctn, x, type, s, axes, norm, + overwrite_x, workers, orthogonalize) + + +def dstn(x, type=2, s=None, axes=None, norm=None, + overwrite_x=False, workers=None, orthogonalize=None): + return _execute(_pocketfft.dstn, x, type, s, axes, norm, + overwrite_x, workers, orthogonalize) + + +def idstn(x, type=2, s=None, axes=None, norm=None, + overwrite_x=False, workers=None, *, orthogonalize=None): + return _execute(_pocketfft.idstn, x, type, s, axes, norm, + overwrite_x, workers, orthogonalize) + + +def dct(x, type=2, n=None, axis=-1, norm=None, + overwrite_x=False, workers=None, orthogonalize=None): + return _execute(_pocketfft.dct, x, type, n, axis, norm, + overwrite_x, workers, orthogonalize) + + +def idct(x, type=2, n=None, axis=-1, norm=None, + overwrite_x=False, workers=None, orthogonalize=None): + return _execute(_pocketfft.idct, x, type, n, axis, norm, + overwrite_x, workers, orthogonalize) + + +def dst(x, type=2, n=None, axis=-1, norm=None, + overwrite_x=False, workers=None, orthogonalize=None): + return _execute(_pocketfft.dst, x, type, n, axis, norm, + overwrite_x, workers, orthogonalize) + + +def idst(x, type=2, n=None, axis=-1, norm=None, + overwrite_x=False, workers=None, orthogonalize=None): + return _execute(_pocketfft.idst, x, type, n, axis, norm, + overwrite_x, workers, orthogonalize) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/__init__.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/__pycache__/__init__.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..181be349ea606f3343ac837c9186054823e0906b Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/__pycache__/mock_backend.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/__pycache__/mock_backend.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b2a369d0c541f1b1b092380e2f4b87027d3e2251 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/__pycache__/mock_backend.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/__pycache__/test_backend.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/__pycache__/test_backend.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c62f30885c149a30b83e651142083e3ae54d8b9e Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/__pycache__/test_backend.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/__pycache__/test_basic.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/__pycache__/test_basic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12a6fc683658a7afdf037cdd1505c4cad3386517 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/__pycache__/test_basic.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/__pycache__/test_fftlog.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/__pycache__/test_fftlog.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd766ae49b45b6948274f705f4027bc3b09e0e33 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/__pycache__/test_fftlog.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/__pycache__/test_helper.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/__pycache__/test_helper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..551309aff4d6953b24ba1483e38abfdba749cc48 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/__pycache__/test_helper.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/__pycache__/test_multithreading.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/__pycache__/test_multithreading.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7584c6626ad6d758c43737e522b1005553cce21d Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/__pycache__/test_multithreading.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/__pycache__/test_real_transforms.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/__pycache__/test_real_transforms.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..77faac4896e74432757e3cf6704af304a59475da Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/__pycache__/test_real_transforms.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/mock_backend.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/mock_backend.py new file mode 100644 index 0000000000000000000000000000000000000000..c57a88e0af291ffd68a2a1d62218e8c9459986d5 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/mock_backend.py @@ -0,0 +1,92 @@ +import numpy as np +import scipy.fft + +class _MockFunction: + def __init__(self, return_value = None): + self.number_calls = 0 + self.return_value = return_value + self.last_args = ([], {}) + + def __call__(self, *args, **kwargs): + self.number_calls += 1 + self.last_args = (args, kwargs) + return self.return_value + + +fft = _MockFunction(np.random.random(10)) +fft2 = _MockFunction(np.random.random(10)) +fftn = _MockFunction(np.random.random(10)) + +ifft = _MockFunction(np.random.random(10)) +ifft2 = _MockFunction(np.random.random(10)) +ifftn = _MockFunction(np.random.random(10)) + +rfft = _MockFunction(np.random.random(10)) +rfft2 = _MockFunction(np.random.random(10)) +rfftn = _MockFunction(np.random.random(10)) + +irfft = _MockFunction(np.random.random(10)) +irfft2 = _MockFunction(np.random.random(10)) +irfftn = _MockFunction(np.random.random(10)) + +hfft = _MockFunction(np.random.random(10)) +hfft2 = _MockFunction(np.random.random(10)) +hfftn = _MockFunction(np.random.random(10)) + +ihfft = _MockFunction(np.random.random(10)) +ihfft2 = _MockFunction(np.random.random(10)) +ihfftn = _MockFunction(np.random.random(10)) + +dct = _MockFunction(np.random.random(10)) +idct = _MockFunction(np.random.random(10)) +dctn = _MockFunction(np.random.random(10)) +idctn = _MockFunction(np.random.random(10)) + +dst = _MockFunction(np.random.random(10)) +idst = _MockFunction(np.random.random(10)) +dstn = _MockFunction(np.random.random(10)) +idstn = _MockFunction(np.random.random(10)) + +fht = _MockFunction(np.random.random(10)) +ifht = _MockFunction(np.random.random(10)) + + +__ua_domain__ = "numpy.scipy.fft" + + +_implements = { + scipy.fft.fft: fft, + scipy.fft.fft2: fft2, + scipy.fft.fftn: fftn, + scipy.fft.ifft: ifft, + scipy.fft.ifft2: ifft2, + scipy.fft.ifftn: ifftn, + scipy.fft.rfft: rfft, + scipy.fft.rfft2: rfft2, + scipy.fft.rfftn: rfftn, + scipy.fft.irfft: irfft, + scipy.fft.irfft2: irfft2, + scipy.fft.irfftn: irfftn, + scipy.fft.hfft: hfft, + scipy.fft.hfft2: hfft2, + scipy.fft.hfftn: hfftn, + scipy.fft.ihfft: ihfft, + scipy.fft.ihfft2: ihfft2, + scipy.fft.ihfftn: ihfftn, + scipy.fft.dct: dct, + scipy.fft.idct: idct, + scipy.fft.dctn: dctn, + scipy.fft.idctn: idctn, + scipy.fft.dst: dst, + scipy.fft.idst: idst, + scipy.fft.dstn: dstn, + scipy.fft.idstn: idstn, + scipy.fft.fht: fht, + scipy.fft.ifht: ifht +} + + +def __ua_function__(method, args, kwargs): + fn = _implements.get(method) + return (fn(*args, **kwargs) if fn is not None + else NotImplemented) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/test_backend.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/test_backend.py new file mode 100644 index 0000000000000000000000000000000000000000..352ca8ff2a7ab1181b0a3226663dad7ef36ac0fe --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/test_backend.py @@ -0,0 +1,98 @@ +from functools import partial + +import numpy as np +import scipy.fft +from scipy.fft import _fftlog, _pocketfft, set_backend +from scipy.fft.tests import mock_backend + +from numpy.testing import assert_allclose, assert_equal +import pytest + +fnames = ('fft', 'fft2', 'fftn', + 'ifft', 'ifft2', 'ifftn', + 'rfft', 'rfft2', 'rfftn', + 'irfft', 'irfft2', 'irfftn', + 'dct', 'idct', 'dctn', 'idctn', + 'dst', 'idst', 'dstn', 'idstn', + 'fht', 'ifht') + +np_funcs = (np.fft.fft, np.fft.fft2, np.fft.fftn, + np.fft.ifft, np.fft.ifft2, np.fft.ifftn, + np.fft.rfft, np.fft.rfft2, np.fft.rfftn, + np.fft.irfft, np.fft.irfft2, np.fft.irfftn, + np.fft.hfft, _pocketfft.hfft2, _pocketfft.hfftn, # np has no hfftn + np.fft.ihfft, _pocketfft.ihfft2, _pocketfft.ihfftn, + _pocketfft.dct, _pocketfft.idct, _pocketfft.dctn, _pocketfft.idctn, + _pocketfft.dst, _pocketfft.idst, _pocketfft.dstn, _pocketfft.idstn, + # must provide required kwargs for fht, ifht + partial(_fftlog.fht, dln=2, mu=0.5), + partial(_fftlog.ifht, dln=2, mu=0.5)) + +funcs = (scipy.fft.fft, scipy.fft.fft2, scipy.fft.fftn, + scipy.fft.ifft, scipy.fft.ifft2, scipy.fft.ifftn, + scipy.fft.rfft, scipy.fft.rfft2, scipy.fft.rfftn, + scipy.fft.irfft, scipy.fft.irfft2, scipy.fft.irfftn, + scipy.fft.hfft, scipy.fft.hfft2, scipy.fft.hfftn, + scipy.fft.ihfft, scipy.fft.ihfft2, scipy.fft.ihfftn, + scipy.fft.dct, scipy.fft.idct, scipy.fft.dctn, scipy.fft.idctn, + scipy.fft.dst, scipy.fft.idst, scipy.fft.dstn, scipy.fft.idstn, + # must provide required kwargs for fht, ifht + partial(scipy.fft.fht, dln=2, mu=0.5), + partial(scipy.fft.ifht, dln=2, mu=0.5)) + +mocks = (mock_backend.fft, mock_backend.fft2, mock_backend.fftn, + mock_backend.ifft, mock_backend.ifft2, mock_backend.ifftn, + mock_backend.rfft, mock_backend.rfft2, mock_backend.rfftn, + mock_backend.irfft, mock_backend.irfft2, mock_backend.irfftn, + mock_backend.hfft, mock_backend.hfft2, mock_backend.hfftn, + mock_backend.ihfft, mock_backend.ihfft2, mock_backend.ihfftn, + mock_backend.dct, mock_backend.idct, + mock_backend.dctn, mock_backend.idctn, + mock_backend.dst, mock_backend.idst, + mock_backend.dstn, mock_backend.idstn, + mock_backend.fht, mock_backend.ifht) + + +@pytest.mark.parametrize("func, np_func, mock", zip(funcs, np_funcs, mocks)) +def test_backend_call(func, np_func, mock): + x = np.arange(20).reshape((10,2)) + answer = np_func(x.astype(np.float64)) + assert_allclose(func(x), answer, atol=1e-10) + + with set_backend(mock_backend, only=True): + mock.number_calls = 0 + y = func(x) + assert_equal(y, mock.return_value) + assert_equal(mock.number_calls, 1) + + assert_allclose(func(x), answer, atol=1e-10) + + +plan_funcs = (scipy.fft.fft, scipy.fft.fft2, scipy.fft.fftn, + scipy.fft.ifft, scipy.fft.ifft2, scipy.fft.ifftn, + scipy.fft.rfft, scipy.fft.rfft2, scipy.fft.rfftn, + scipy.fft.irfft, scipy.fft.irfft2, scipy.fft.irfftn, + scipy.fft.hfft, scipy.fft.hfft2, scipy.fft.hfftn, + scipy.fft.ihfft, scipy.fft.ihfft2, scipy.fft.ihfftn) + +plan_mocks = (mock_backend.fft, mock_backend.fft2, mock_backend.fftn, + mock_backend.ifft, mock_backend.ifft2, mock_backend.ifftn, + mock_backend.rfft, mock_backend.rfft2, mock_backend.rfftn, + mock_backend.irfft, mock_backend.irfft2, mock_backend.irfftn, + mock_backend.hfft, mock_backend.hfft2, mock_backend.hfftn, + mock_backend.ihfft, mock_backend.ihfft2, mock_backend.ihfftn) + + +@pytest.mark.parametrize("func, mock", zip(plan_funcs, plan_mocks)) +def test_backend_plan(func, mock): + x = np.arange(20).reshape((10, 2)) + + with pytest.raises(NotImplementedError, match='precomputed plan'): + func(x, plan='foo') + + with set_backend(mock_backend, only=True): + mock.number_calls = 0 + y = func(x, plan='foo') + assert_equal(y, mock.return_value) + assert_equal(mock.number_calls, 1) + assert_equal(mock.last_args[1]['plan'], 'foo') diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/test_basic.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/test_basic.py new file mode 100644 index 0000000000000000000000000000000000000000..2e1a12f1cd7768898aaa71d4fc7e3ba283770c77 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/test_basic.py @@ -0,0 +1,491 @@ +import queue +import threading +import multiprocessing +import numpy as np +import pytest +from numpy.random import random +from numpy.testing import assert_array_almost_equal, assert_allclose +from pytest import raises as assert_raises +import scipy.fft as fft +from scipy.conftest import array_api_compatible +from scipy._lib._array_api import ( + array_namespace, size, xp_assert_close, xp_assert_equal +) + +pytestmark = [array_api_compatible, pytest.mark.usefixtures("skip_xp_backends")] +skip_xp_backends = pytest.mark.skip_xp_backends + + +# Expected input dtypes. Note that `scipy.fft` is more flexible for numpy, +# but for C2C transforms like `fft.fft`, the array API standard only mandates +# that complex dtypes should work, float32/float64 aren't guaranteed to. +def get_expected_input_dtype(func, xp): + if func in [fft.fft, fft.fftn, fft.fft2, + fft.ifft, fft.ifftn, fft.ifft2, + fft.hfft, fft.hfftn, fft.hfft2, + fft.irfft, fft.irfftn, fft.irfft2]: + dtype = xp.complex128 + elif func in [fft.rfft, fft.rfftn, fft.rfft2, + fft.ihfft, fft.ihfftn, fft.ihfft2]: + dtype = xp.float64 + else: + raise ValueError(f'Unknown FFT function: {func}') + + return dtype + + +def fft1(x): + L = len(x) + phase = -2j*np.pi*(np.arange(L)/float(L)) + phase = np.arange(L).reshape(-1, 1) * phase + return np.sum(x*np.exp(phase), axis=1) + +class TestFFT: + + def test_identity(self, xp): + maxlen = 512 + x = xp.asarray(random(maxlen) + 1j*random(maxlen)) + xr = xp.asarray(random(maxlen)) + # Check some powers of 2 and some primes + for i in [1, 2, 16, 128, 512, 53, 149, 281, 397]: + xp_assert_close(fft.ifft(fft.fft(x[0:i])), x[0:i]) + xp_assert_close(fft.irfft(fft.rfft(xr[0:i]), i), xr[0:i]) + + @skip_xp_backends(np_only=True, reasons=['significant overhead for some backends']) + def test_identity_extensive(self, xp): + maxlen = 512 + x = xp.asarray(random(maxlen) + 1j*random(maxlen)) + xr = xp.asarray(random(maxlen)) + for i in range(1, maxlen): + xp_assert_close(fft.ifft(fft.fft(x[0:i])), x[0:i]) + xp_assert_close(fft.irfft(fft.rfft(xr[0:i]), i), xr[0:i]) + + def test_fft(self, xp): + x = random(30) + 1j*random(30) + expect = xp.asarray(fft1(x)) + x = xp.asarray(x) + xp_assert_close(fft.fft(x), expect) + xp_assert_close(fft.fft(x, norm="backward"), expect) + xp_assert_close(fft.fft(x, norm="ortho"), + expect / xp.sqrt(xp.asarray(30, dtype=xp.float64)),) + xp_assert_close(fft.fft(x, norm="forward"), expect / 30) + + @skip_xp_backends(np_only=True, reasons=['some backends allow `n=0`']) + def test_fft_n(self, xp): + x = xp.asarray([1, 2, 3], dtype=xp.complex128) + assert_raises(ValueError, fft.fft, x, 0) + + def test_ifft(self, xp): + x = xp.asarray(random(30) + 1j*random(30)) + xp_assert_close(fft.ifft(fft.fft(x)), x) + for norm in ["backward", "ortho", "forward"]: + xp_assert_close(fft.ifft(fft.fft(x, norm=norm), norm=norm), x) + + def test_fft2(self, xp): + x = xp.asarray(random((30, 20)) + 1j*random((30, 20))) + expect = fft.fft(fft.fft(x, axis=1), axis=0) + xp_assert_close(fft.fft2(x), expect) + xp_assert_close(fft.fft2(x, norm="backward"), expect) + xp_assert_close(fft.fft2(x, norm="ortho"), + expect / xp.sqrt(xp.asarray(30 * 20, dtype=xp.float64))) + xp_assert_close(fft.fft2(x, norm="forward"), expect / (30 * 20)) + + def test_ifft2(self, xp): + x = xp.asarray(random((30, 20)) + 1j*random((30, 20))) + expect = fft.ifft(fft.ifft(x, axis=1), axis=0) + xp_assert_close(fft.ifft2(x), expect) + xp_assert_close(fft.ifft2(x, norm="backward"), expect) + xp_assert_close(fft.ifft2(x, norm="ortho"), + expect * xp.sqrt(xp.asarray(30 * 20, dtype=xp.float64))) + xp_assert_close(fft.ifft2(x, norm="forward"), expect * (30 * 20)) + + def test_fftn(self, xp): + x = xp.asarray(random((30, 20, 10)) + 1j*random((30, 20, 10))) + expect = fft.fft(fft.fft(fft.fft(x, axis=2), axis=1), axis=0) + xp_assert_close(fft.fftn(x), expect) + xp_assert_close(fft.fftn(x, norm="backward"), expect) + xp_assert_close(fft.fftn(x, norm="ortho"), + expect / xp.sqrt(xp.asarray(30 * 20 * 10, dtype=xp.float64))) + xp_assert_close(fft.fftn(x, norm="forward"), expect / (30 * 20 * 10)) + + def test_ifftn(self, xp): + x = xp.asarray(random((30, 20, 10)) + 1j*random((30, 20, 10))) + expect = fft.ifft(fft.ifft(fft.ifft(x, axis=2), axis=1), axis=0) + xp_assert_close(fft.ifftn(x), expect, rtol=1e-7) + xp_assert_close(fft.ifftn(x, norm="backward"), expect, rtol=1e-7) + xp_assert_close( + fft.ifftn(x, norm="ortho"), + fft.ifftn(x) * xp.sqrt(xp.asarray(30 * 20 * 10, dtype=xp.float64)) + ) + xp_assert_close(fft.ifftn(x, norm="forward"), + expect * (30 * 20 * 10), + rtol=1e-7) + + def test_rfft(self, xp): + x = xp.asarray(random(29), dtype=xp.float64) + for n in [size(x), 2*size(x)]: + for norm in [None, "backward", "ortho", "forward"]: + xp_assert_close(fft.rfft(x, n=n, norm=norm), + fft.fft(xp.asarray(x, dtype=xp.complex128), + n=n, norm=norm)[:(n//2 + 1)]) + xp_assert_close( + fft.rfft(x, n=n, norm="ortho"), + fft.rfft(x, n=n) / xp.sqrt(xp.asarray(n, dtype=xp.float64)) + ) + + def test_irfft(self, xp): + x = xp.asarray(random(30)) + xp_assert_close(fft.irfft(fft.rfft(x)), x) + for norm in ["backward", "ortho", "forward"]: + xp_assert_close(fft.irfft(fft.rfft(x, norm=norm), norm=norm), x) + + def test_rfft2(self, xp): + x = xp.asarray(random((30, 20)), dtype=xp.float64) + expect = fft.fft2(xp.asarray(x, dtype=xp.complex128))[:, :11] + xp_assert_close(fft.rfft2(x), expect) + xp_assert_close(fft.rfft2(x, norm="backward"), expect) + xp_assert_close(fft.rfft2(x, norm="ortho"), + expect / xp.sqrt(xp.asarray(30 * 20, dtype=xp.float64))) + xp_assert_close(fft.rfft2(x, norm="forward"), expect / (30 * 20)) + + def test_irfft2(self, xp): + x = xp.asarray(random((30, 20))) + xp_assert_close(fft.irfft2(fft.rfft2(x)), x) + for norm in ["backward", "ortho", "forward"]: + xp_assert_close(fft.irfft2(fft.rfft2(x, norm=norm), norm=norm), x) + + def test_rfftn(self, xp): + x = xp.asarray(random((30, 20, 10)), dtype=xp.float64) + expect = fft.fftn(xp.asarray(x, dtype=xp.complex128))[:, :, :6] + xp_assert_close(fft.rfftn(x), expect) + xp_assert_close(fft.rfftn(x, norm="backward"), expect) + xp_assert_close(fft.rfftn(x, norm="ortho"), + expect / xp.sqrt(xp.asarray(30 * 20 * 10, dtype=xp.float64))) + xp_assert_close(fft.rfftn(x, norm="forward"), expect / (30 * 20 * 10)) + + def test_irfftn(self, xp): + x = xp.asarray(random((30, 20, 10))) + xp_assert_close(fft.irfftn(fft.rfftn(x)), x) + for norm in ["backward", "ortho", "forward"]: + xp_assert_close(fft.irfftn(fft.rfftn(x, norm=norm), norm=norm), x) + + def test_hfft(self, xp): + x = random(14) + 1j*random(14) + x_herm = np.concatenate((random(1), x, random(1))) + x = np.concatenate((x_herm, x[::-1].conj())) + x = xp.asarray(x) + x_herm = xp.asarray(x_herm) + expect = xp.real(fft.fft(x)) + xp_assert_close(fft.hfft(x_herm), expect) + xp_assert_close(fft.hfft(x_herm, norm="backward"), expect) + xp_assert_close(fft.hfft(x_herm, norm="ortho"), + expect / xp.sqrt(xp.asarray(30, dtype=xp.float64))) + xp_assert_close(fft.hfft(x_herm, norm="forward"), expect / 30) + + def test_ihfft(self, xp): + x = random(14) + 1j*random(14) + x_herm = np.concatenate((random(1), x, random(1))) + x = np.concatenate((x_herm, x[::-1].conj())) + x = xp.asarray(x) + x_herm = xp.asarray(x_herm) + xp_assert_close(fft.ihfft(fft.hfft(x_herm)), x_herm) + for norm in ["backward", "ortho", "forward"]: + xp_assert_close(fft.ihfft(fft.hfft(x_herm, norm=norm), norm=norm), x_herm) + + def test_hfft2(self, xp): + x = xp.asarray(random((30, 20))) + xp_assert_close(fft.hfft2(fft.ihfft2(x)), x) + for norm in ["backward", "ortho", "forward"]: + xp_assert_close(fft.hfft2(fft.ihfft2(x, norm=norm), norm=norm), x) + + def test_ihfft2(self, xp): + x = xp.asarray(random((30, 20)), dtype=xp.float64) + expect = fft.ifft2(xp.asarray(x, dtype=xp.complex128))[:, :11] + xp_assert_close(fft.ihfft2(x), expect) + xp_assert_close(fft.ihfft2(x, norm="backward"), expect) + xp_assert_close( + fft.ihfft2(x, norm="ortho"), + expect * xp.sqrt(xp.asarray(30 * 20, dtype=xp.float64)) + ) + xp_assert_close(fft.ihfft2(x, norm="forward"), expect * (30 * 20)) + + def test_hfftn(self, xp): + x = xp.asarray(random((30, 20, 10))) + xp_assert_close(fft.hfftn(fft.ihfftn(x)), x) + for norm in ["backward", "ortho", "forward"]: + xp_assert_close(fft.hfftn(fft.ihfftn(x, norm=norm), norm=norm), x) + + def test_ihfftn(self, xp): + x = xp.asarray(random((30, 20, 10)), dtype=xp.float64) + expect = fft.ifftn(xp.asarray(x, dtype=xp.complex128))[:, :, :6] + xp_assert_close(expect, fft.ihfftn(x)) + xp_assert_close(expect, fft.ihfftn(x, norm="backward")) + xp_assert_close( + fft.ihfftn(x, norm="ortho"), + expect * xp.sqrt(xp.asarray(30 * 20 * 10, dtype=xp.float64)) + ) + xp_assert_close(fft.ihfftn(x, norm="forward"), expect * (30 * 20 * 10)) + + def _check_axes(self, op, xp): + dtype = get_expected_input_dtype(op, xp) + x = xp.asarray(random((30, 20, 10)), dtype=dtype) + axes = [(0, 1, 2), (0, 2, 1), (1, 0, 2), (1, 2, 0), (2, 0, 1), (2, 1, 0)] + xp_test = array_namespace(x) + for a in axes: + op_tr = op(xp_test.permute_dims(x, axes=a)) + tr_op = xp_test.permute_dims(op(x, axes=a), axes=a) + xp_assert_close(op_tr, tr_op) + + @pytest.mark.parametrize("op", [fft.fftn, fft.ifftn, fft.rfftn, fft.irfftn]) + def test_axes_standard(self, op, xp): + self._check_axes(op, xp) + + @pytest.mark.parametrize("op", [fft.hfftn, fft.ihfftn]) + def test_axes_non_standard(self, op, xp): + self._check_axes(op, xp) + + @pytest.mark.parametrize("op", [fft.fftn, fft.ifftn, + fft.rfftn, fft.irfftn]) + def test_axes_subset_with_shape_standard(self, op, xp): + dtype = get_expected_input_dtype(op, xp) + x = xp.asarray(random((16, 8, 4)), dtype=dtype) + axes = [(0, 1, 2), (0, 2, 1), (1, 2, 0)] + xp_test = array_namespace(x) + for a in axes: + # different shape on the first two axes + shape = tuple([2*x.shape[ax] if ax in a[:2] else x.shape[ax] + for ax in range(x.ndim)]) + # transform only the first two axes + op_tr = op(xp_test.permute_dims(x, axes=a), + s=shape[:2], axes=(0, 1)) + tr_op = xp_test.permute_dims(op(x, s=shape[:2], axes=a[:2]), + axes=a) + xp_assert_close(op_tr, tr_op) + + @pytest.mark.parametrize("op", [fft.fft2, fft.ifft2, + fft.rfft2, fft.irfft2, + fft.hfft2, fft.ihfft2, + fft.hfftn, fft.ihfftn]) + def test_axes_subset_with_shape_non_standard(self, op, xp): + dtype = get_expected_input_dtype(op, xp) + x = xp.asarray(random((16, 8, 4)), dtype=dtype) + axes = [(0, 1, 2), (0, 2, 1), (1, 2, 0)] + xp_test = array_namespace(x) + for a in axes: + # different shape on the first two axes + shape = tuple([2*x.shape[ax] if ax in a[:2] else x.shape[ax] + for ax in range(x.ndim)]) + # transform only the first two axes + op_tr = op(xp_test.permute_dims(x, axes=a), s=shape[:2], axes=(0, 1)) + tr_op = xp_test.permute_dims(op(x, s=shape[:2], axes=a[:2]), axes=a) + xp_assert_close(op_tr, tr_op) + + def test_all_1d_norm_preserving(self, xp): + # verify that round-trip transforms are norm-preserving + x = xp.asarray(random(30), dtype=xp.float64) + xp_test = array_namespace(x) + x_norm = xp_test.linalg.vector_norm(x) + n = size(x) * 2 + func_pairs = [(fft.rfft, fft.irfft), + # hfft: order so the first function takes x.size samples + # (necessary for comparison to x_norm above) + (fft.ihfft, fft.hfft), + # functions that expect complex dtypes at the end + (fft.fft, fft.ifft), + ] + for forw, back in func_pairs: + if forw == fft.fft: + x = xp.asarray(x, dtype=xp.complex128) + x_norm = xp_test.linalg.vector_norm(x) + for n in [size(x), 2*size(x)]: + for norm in ['backward', 'ortho', 'forward']: + tmp = forw(x, n=n, norm=norm) + tmp = back(tmp, n=n, norm=norm) + xp_assert_close(xp_test.linalg.vector_norm(tmp), x_norm) + + @skip_xp_backends(np_only=True) + @pytest.mark.parametrize("dtype", [np.float16, np.longdouble]) + def test_dtypes_nonstandard(self, dtype): + x = random(30).astype(dtype) + out_dtypes = {np.float16: np.complex64, np.longdouble: np.clongdouble} + x_complex = x.astype(out_dtypes[dtype]) + + res_fft = fft.ifft(fft.fft(x)) + res_rfft = fft.irfft(fft.rfft(x)) + res_hfft = fft.hfft(fft.ihfft(x), x.shape[0]) + # Check both numerical results and exact dtype matches + assert_array_almost_equal(res_fft, x_complex) + assert_array_almost_equal(res_rfft, x) + assert_array_almost_equal(res_hfft, x) + assert res_fft.dtype == x_complex.dtype + assert res_rfft.dtype == np.result_type(np.float32, x.dtype) + assert res_hfft.dtype == np.result_type(np.float32, x.dtype) + + @pytest.mark.parametrize("dtype", ["float32", "float64"]) + def test_dtypes_real(self, dtype, xp): + x = xp.asarray(random(30), dtype=getattr(xp, dtype)) + + res_rfft = fft.irfft(fft.rfft(x)) + res_hfft = fft.hfft(fft.ihfft(x), x.shape[0]) + # Check both numerical results and exact dtype matches + xp_assert_close(res_rfft, x) + xp_assert_close(res_hfft, x) + + @pytest.mark.parametrize("dtype", ["complex64", "complex128"]) + def test_dtypes_complex(self, dtype, xp): + x = xp.asarray(random(30), dtype=getattr(xp, dtype)) + + res_fft = fft.ifft(fft.fft(x)) + # Check both numerical results and exact dtype matches + xp_assert_close(res_fft, x) + + @skip_xp_backends(np_only=True, + reasons=['array-likes only supported for NumPy backend']) + @pytest.mark.parametrize("op", [fft.fft, fft.ifft, + fft.fft2, fft.ifft2, + fft.fftn, fft.ifftn, + fft.rfft, fft.irfft, + fft.rfft2, fft.irfft2, + fft.rfftn, fft.irfftn, + fft.hfft, fft.ihfft, + fft.hfft2, fft.ihfft2, + fft.hfftn, fft.ihfftn,]) + def test_array_like(self, xp, op): + x = [[[1.0, 1.0], [1.0, 1.0]], + [[1.0, 1.0], [1.0, 1.0]], + [[1.0, 1.0], [1.0, 1.0]]] + xp_assert_close(op(x), op(xp.asarray(x))) + + +@skip_xp_backends(np_only=True) +@pytest.mark.parametrize( + "dtype", + [np.float32, np.float64, np.longdouble, + np.complex64, np.complex128, np.clongdouble]) +@pytest.mark.parametrize("order", ["F", 'non-contiguous']) +@pytest.mark.parametrize( + "fft", + [fft.fft, fft.fft2, fft.fftn, + fft.ifft, fft.ifft2, fft.ifftn]) +def test_fft_with_order(dtype, order, fft): + # Check that FFT/IFFT produces identical results for C, Fortran and + # non contiguous arrays + rng = np.random.RandomState(42) + X = rng.rand(8, 7, 13).astype(dtype, copy=False) + if order == 'F': + Y = np.asfortranarray(X) + else: + # Make a non contiguous array + Y = X[::-1] + X = np.ascontiguousarray(X[::-1]) + + if fft.__name__.endswith('fft'): + for axis in range(3): + X_res = fft(X, axis=axis) + Y_res = fft(Y, axis=axis) + assert_array_almost_equal(X_res, Y_res) + elif fft.__name__.endswith(('fft2', 'fftn')): + axes = [(0, 1), (1, 2), (0, 2)] + if fft.__name__.endswith('fftn'): + axes.extend([(0,), (1,), (2,), None]) + for ax in axes: + X_res = fft(X, axes=ax) + Y_res = fft(Y, axes=ax) + assert_array_almost_equal(X_res, Y_res) + else: + raise ValueError + + +@skip_xp_backends(cpu_only=True) +class TestFFTThreadSafe: + threads = 16 + input_shape = (800, 200) + + def _test_mtsame(self, func, *args, xp=None): + def worker(args, q): + q.put(func(*args)) + + q = queue.Queue() + expected = func(*args) + + # Spin off a bunch of threads to call the same function simultaneously + t = [threading.Thread(target=worker, args=(args, q)) + for i in range(self.threads)] + [x.start() for x in t] + + [x.join() for x in t] + + # Make sure all threads returned the correct value + for i in range(self.threads): + xp_assert_equal( + q.get(timeout=5), expected, + err_msg='Function returned wrong value in multithreaded context' + ) + + def test_fft(self, xp): + a = xp.ones(self.input_shape, dtype=xp.complex128) + self._test_mtsame(fft.fft, a, xp=xp) + + def test_ifft(self, xp): + a = xp.full(self.input_shape, 1+0j) + self._test_mtsame(fft.ifft, a, xp=xp) + + def test_rfft(self, xp): + a = xp.ones(self.input_shape) + self._test_mtsame(fft.rfft, a, xp=xp) + + def test_irfft(self, xp): + a = xp.full(self.input_shape, 1+0j) + self._test_mtsame(fft.irfft, a, xp=xp) + + def test_hfft(self, xp): + a = xp.ones(self.input_shape, dtype=xp.complex64) + self._test_mtsame(fft.hfft, a, xp=xp) + + def test_ihfft(self, xp): + a = xp.ones(self.input_shape) + self._test_mtsame(fft.ihfft, a, xp=xp) + + +@skip_xp_backends(np_only=True) +@pytest.mark.parametrize("func", [fft.fft, fft.ifft, fft.rfft, fft.irfft]) +def test_multiprocess(func): + # Test that fft still works after fork (gh-10422) + + with multiprocessing.Pool(2) as p: + res = p.map(func, [np.ones(100) for _ in range(4)]) + + expect = func(np.ones(100)) + for x in res: + assert_allclose(x, expect) + + +class TestIRFFTN: + + def test_not_last_axis_success(self, xp): + ar, ai = np.random.random((2, 16, 8, 32)) + a = ar + 1j*ai + a = xp.asarray(a) + + axes = (-2,) + + # Should not raise error + fft.irfftn(a, axes=axes) + + +@pytest.mark.parametrize("func", [fft.fft, fft.ifft, fft.rfft, fft.irfft, + fft.fftn, fft.ifftn, + fft.rfftn, fft.irfftn, fft.hfft, fft.ihfft]) +def test_non_standard_params(func, xp): + if func in [fft.rfft, fft.rfftn, fft.ihfft]: + dtype = xp.float64 + else: + dtype = xp.complex128 + + if xp.__name__ != 'numpy': + x = xp.asarray([1, 2, 3], dtype=dtype) + # func(x) should not raise an exception + func(x) + assert_raises(ValueError, func, x, workers=2) + # `plan` param is not tested since SciPy does not use it currently + # but should be tested if it comes into use diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/test_fftlog.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/test_fftlog.py new file mode 100644 index 0000000000000000000000000000000000000000..e9efd852d1b7437021f80da24e97488bc9f786eb --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/test_fftlog.py @@ -0,0 +1,179 @@ +import warnings +import numpy as np +import pytest + +from scipy.fft._fftlog import fht, ifht, fhtoffset +from scipy.special import poch + +from scipy.conftest import array_api_compatible +from scipy._lib._array_api import xp_assert_close + +pytestmark = [array_api_compatible, pytest.mark.usefixtures("skip_xp_backends"),] +skip_xp_backends = pytest.mark.skip_xp_backends + + +def test_fht_agrees_with_fftlog(xp): + # check that fht numerically agrees with the output from Fortran FFTLog, + # the results were generated with the provided `fftlogtest` program, + # after fixing how the k array is generated (divide range by n-1, not n) + + # test function, analytical Hankel transform is of the same form + def f(r, mu): + return r**(mu+1)*np.exp(-r**2/2) + + r = np.logspace(-4, 4, 16) + + dln = np.log(r[1]/r[0]) + mu = 0.3 + offset = 0.0 + bias = 0.0 + + a = xp.asarray(f(r, mu)) + + # test 1: compute as given + ours = fht(a, dln, mu, offset=offset, bias=bias) + theirs = [-0.1159922613593045E-02, +0.1625822618458832E-02, + -0.1949518286432330E-02, +0.3789220182554077E-02, + +0.5093959119952945E-03, +0.2785387803618774E-01, + +0.9944952700848897E-01, +0.4599202164586588E+00, + +0.3157462160881342E+00, -0.8201236844404755E-03, + -0.7834031308271878E-03, +0.3931444945110708E-03, + -0.2697710625194777E-03, +0.3568398050238820E-03, + -0.5554454827797206E-03, +0.8286331026468585E-03] + theirs = xp.asarray(theirs, dtype=xp.float64) + xp_assert_close(ours, theirs) + + # test 2: change to optimal offset + offset = fhtoffset(dln, mu, bias=bias) + ours = fht(a, dln, mu, offset=offset, bias=bias) + theirs = [+0.4353768523152057E-04, -0.9197045663594285E-05, + +0.3150140927838524E-03, +0.9149121960963704E-03, + +0.5808089753959363E-02, +0.2548065256377240E-01, + +0.1339477692089897E+00, +0.4821530509479356E+00, + +0.2659899781579785E+00, -0.1116475278448113E-01, + +0.1791441617592385E-02, -0.4181810476548056E-03, + +0.1314963536765343E-03, -0.5422057743066297E-04, + +0.3208681804170443E-04, -0.2696849476008234E-04] + theirs = xp.asarray(theirs, dtype=xp.float64) + xp_assert_close(ours, theirs) + + # test 3: positive bias + bias = 0.8 + offset = fhtoffset(dln, mu, bias=bias) + ours = fht(a, dln, mu, offset=offset, bias=bias) + theirs = [-7.3436673558316850E+00, +0.1710271207817100E+00, + +0.1065374386206564E+00, -0.5121739602708132E-01, + +0.2636649319269470E-01, +0.1697209218849693E-01, + +0.1250215614723183E+00, +0.4739583261486729E+00, + +0.2841149874912028E+00, -0.8312764741645729E-02, + +0.1024233505508988E-02, -0.1644902767389120E-03, + +0.3305775476926270E-04, -0.7786993194882709E-05, + +0.1962258449520547E-05, -0.8977895734909250E-06] + theirs = xp.asarray(theirs, dtype=xp.float64) + xp_assert_close(ours, theirs) + + # test 4: negative bias + bias = -0.8 + offset = fhtoffset(dln, mu, bias=bias) + ours = fht(a, dln, mu, offset=offset, bias=bias) + theirs = [+0.8985777068568745E-05, +0.4074898209936099E-04, + +0.2123969254700955E-03, +0.1009558244834628E-02, + +0.5131386375222176E-02, +0.2461678673516286E-01, + +0.1235812845384476E+00, +0.4719570096404403E+00, + +0.2893487490631317E+00, -0.1686570611318716E-01, + +0.2231398155172505E-01, -0.1480742256379873E-01, + +0.1692387813500801E+00, +0.3097490354365797E+00, + +2.7593607182401860E+00, 10.5251075070045800E+00] + theirs = xp.asarray(theirs, dtype=xp.float64) + xp_assert_close(ours, theirs) + + +@pytest.mark.parametrize('optimal', [True, False]) +@pytest.mark.parametrize('offset', [0.0, 1.0, -1.0]) +@pytest.mark.parametrize('bias', [0, 0.1, -0.1]) +@pytest.mark.parametrize('n', [64, 63]) +def test_fht_identity(n, bias, offset, optimal, xp): + rng = np.random.RandomState(3491349965) + + a = xp.asarray(rng.standard_normal(n)) + dln = rng.uniform(-1, 1) + mu = rng.uniform(-2, 2) + + if optimal: + offset = fhtoffset(dln, mu, initial=offset, bias=bias) + + A = fht(a, dln, mu, offset=offset, bias=bias) + a_ = ifht(A, dln, mu, offset=offset, bias=bias) + + xp_assert_close(a_, a, rtol=1.5e-7) + + +def test_fht_special_cases(xp): + rng = np.random.RandomState(3491349965) + + a = xp.asarray(rng.standard_normal(64)) + dln = rng.uniform(-1, 1) + + # let x = (mu+1+q)/2, y = (mu+1-q)/2, M = {0, -1, -2, ...} + + # case 1: x in M, y in M => well-defined transform + mu, bias = -4.0, 1.0 + with warnings.catch_warnings(record=True) as record: + fht(a, dln, mu, bias=bias) + assert not record, 'fht warned about a well-defined transform' + + # case 2: x not in M, y in M => well-defined transform + mu, bias = -2.5, 0.5 + with warnings.catch_warnings(record=True) as record: + fht(a, dln, mu, bias=bias) + assert not record, 'fht warned about a well-defined transform' + + # case 3: x in M, y not in M => singular transform + mu, bias = -3.5, 0.5 + with pytest.warns(Warning) as record: + fht(a, dln, mu, bias=bias) + assert record, 'fht did not warn about a singular transform' + + # case 4: x not in M, y in M => singular inverse transform + mu, bias = -2.5, 0.5 + with pytest.warns(Warning) as record: + ifht(a, dln, mu, bias=bias) + assert record, 'ifht did not warn about a singular transform' + + +@pytest.mark.parametrize('n', [64, 63]) +def test_fht_exact(n, xp): + rng = np.random.RandomState(3491349965) + + # for a(r) a power law r^\gamma, the fast Hankel transform produces the + # exact continuous Hankel transform if biased with q = \gamma + + mu = rng.uniform(0, 3) + + # convergence of HT: -1-mu < gamma < 1/2 + gamma = rng.uniform(-1-mu, 1/2) + + r = np.logspace(-2, 2, n) + a = xp.asarray(r**gamma) + + dln = np.log(r[1]/r[0]) + + offset = fhtoffset(dln, mu, initial=0.0, bias=gamma) + + A = fht(a, dln, mu, offset=offset, bias=gamma) + + k = np.exp(offset)/r[::-1] + + # analytical result + At = xp.asarray((2/k)**gamma * poch((mu+1-gamma)/2, gamma)) + + xp_assert_close(A, At) + +@skip_xp_backends(np_only=True, + reasons=['array-likes only supported for NumPy backend']) +@pytest.mark.parametrize("op", [fht, ifht]) +def test_array_like(xp, op): + x = [[[1.0, 1.0], [1.0, 1.0]], + [[1.0, 1.0], [1.0, 1.0]], + [[1.0, 1.0], [1.0, 1.0]]] + xp_assert_close(op(x, 1.0, 2.0), op(xp.asarray(x), 1.0, 2.0)) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/test_helper.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/test_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..9a102ddec9011710f5c0f763334ef9492dc767da --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/test_helper.py @@ -0,0 +1,570 @@ +"""Includes test functions for fftpack.helper module + +Copied from fftpack.helper by Pearu Peterson, October 2005 +Modified for Array API, 2023 + +""" +from scipy.fft._helper import next_fast_len, prev_fast_len, _init_nd_shape_and_axes +from numpy.testing import assert_equal +from pytest import raises as assert_raises +import pytest +import numpy as np +import sys +from scipy.conftest import array_api_compatible +from scipy._lib._array_api import ( + xp_assert_close, get_xp_devices, device, array_namespace +) +from scipy import fft + +pytestmark = [array_api_compatible, pytest.mark.usefixtures("skip_xp_backends")] +skip_xp_backends = pytest.mark.skip_xp_backends + +_5_smooth_numbers = [ + 2, 3, 4, 5, 6, 8, 9, 10, + 2 * 3 * 5, + 2**3 * 3**5, + 2**3 * 3**3 * 5**2, +] + +def test_next_fast_len(): + for n in _5_smooth_numbers: + assert_equal(next_fast_len(n), n) + + +def _assert_n_smooth(x, n): + x_orig = x + if n < 2: + assert False + + while True: + q, r = divmod(x, 2) + if r != 0: + break + x = q + + for d in range(3, n+1, 2): + while True: + q, r = divmod(x, d) + if r != 0: + break + x = q + + assert x == 1, \ + f'x={x_orig} is not {n}-smooth, remainder={x}' + + +@skip_xp_backends(np_only=True) +class TestNextFastLen: + + def test_next_fast_len(self): + np.random.seed(1234) + + def nums(): + yield from range(1, 1000) + yield 2**5 * 3**5 * 4**5 + 1 + + for n in nums(): + m = next_fast_len(n) + _assert_n_smooth(m, 11) + assert m == next_fast_len(n, False) + + m = next_fast_len(n, True) + _assert_n_smooth(m, 5) + + def test_np_integers(self): + ITYPES = [np.int16, np.int32, np.int64, np.uint16, np.uint32, np.uint64] + for ityp in ITYPES: + x = ityp(12345) + testN = next_fast_len(x) + assert_equal(testN, next_fast_len(int(x))) + + def testnext_fast_len_small(self): + hams = { + 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 8, 8: 8, 14: 15, 15: 15, + 16: 16, 17: 18, 1021: 1024, 1536: 1536, 51200000: 51200000 + } + for x, y in hams.items(): + assert_equal(next_fast_len(x, True), y) + + @pytest.mark.xfail(sys.maxsize < 2**32, + reason="Hamming Numbers too large for 32-bit", + raises=ValueError, strict=True) + def testnext_fast_len_big(self): + hams = { + 510183360: 510183360, 510183360 + 1: 512000000, + 511000000: 512000000, + 854296875: 854296875, 854296875 + 1: 859963392, + 196608000000: 196608000000, 196608000000 + 1: 196830000000, + 8789062500000: 8789062500000, 8789062500000 + 1: 8796093022208, + 206391214080000: 206391214080000, + 206391214080000 + 1: 206624260800000, + 470184984576000: 470184984576000, + 470184984576000 + 1: 470715894135000, + 7222041363087360: 7222041363087360, + 7222041363087360 + 1: 7230196133913600, + # power of 5 5**23 + 11920928955078125: 11920928955078125, + 11920928955078125 - 1: 11920928955078125, + # power of 3 3**34 + 16677181699666569: 16677181699666569, + 16677181699666569 - 1: 16677181699666569, + # power of 2 2**54 + 18014398509481984: 18014398509481984, + 18014398509481984 - 1: 18014398509481984, + # above this, int(ceil(n)) == int(ceil(n+1)) + 19200000000000000: 19200000000000000, + 19200000000000000 + 1: 19221679687500000, + 288230376151711744: 288230376151711744, + 288230376151711744 + 1: 288325195312500000, + 288325195312500000 - 1: 288325195312500000, + 288325195312500000: 288325195312500000, + 288325195312500000 + 1: 288555831593533440, + } + for x, y in hams.items(): + assert_equal(next_fast_len(x, True), y) + + def test_keyword_args(self): + assert next_fast_len(11, real=True) == 12 + assert next_fast_len(target=7, real=False) == 7 + +@skip_xp_backends(np_only=True) +class TestPrevFastLen: + + def test_prev_fast_len(self): + np.random.seed(1234) + + def nums(): + yield from range(1, 1000) + yield 2**5 * 3**5 * 4**5 + 1 + + for n in nums(): + m = prev_fast_len(n) + _assert_n_smooth(m, 11) + assert m == prev_fast_len(n, False) + + m = prev_fast_len(n, True) + _assert_n_smooth(m, 5) + + def test_np_integers(self): + ITYPES = [np.int16, np.int32, np.int64, np.uint16, np.uint32, + np.uint64] + for ityp in ITYPES: + x = ityp(12345) + testN = prev_fast_len(x) + assert_equal(testN, prev_fast_len(int(x))) + + testN = prev_fast_len(x, real=True) + assert_equal(testN, prev_fast_len(int(x), real=True)) + + def testprev_fast_len_small(self): + hams = { + 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 6, 8: 8, 14: 12, 15: 15, + 16: 16, 17: 16, 1021: 1000, 1536: 1536, 51200000: 51200000 + } + for x, y in hams.items(): + assert_equal(prev_fast_len(x, True), y) + + hams = { + 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, + 11: 11, 12: 12, 13: 12, 14: 14, 15: 15, 16: 16, 17: 16, 18: 18, + 19: 18, 20: 20, 21: 21, 22: 22, 120: 120, 121: 121, 122: 121, + 1021: 1008, 1536: 1536, 51200000: 51200000 + } + for x, y in hams.items(): + assert_equal(prev_fast_len(x, False), y) + + @pytest.mark.xfail(sys.maxsize < 2**32, + reason="Hamming Numbers too large for 32-bit", + raises=ValueError, strict=True) + def testprev_fast_len_big(self): + hams = { + # 2**6 * 3**13 * 5**1 + 510183360: 510183360, + 510183360 + 1: 510183360, + 510183360 - 1: 509607936, # 2**21 * 3**5 + # 2**6 * 5**6 * 7**1 * 73**1 + 511000000: 510183360, + 511000000 + 1: 510183360, + 511000000 - 1: 510183360, # 2**6 * 3**13 * 5**1 + # 3**7 * 5**8 + 854296875: 854296875, + 854296875 + 1: 854296875, + 854296875 - 1: 850305600, # 2**6 * 3**12 * 5**2 + # 2**22 * 3**1 * 5**6 + 196608000000: 196608000000, + 196608000000 + 1: 196608000000, + 196608000000 - 1: 195910410240, # 2**13 * 3**14 * 5**1 + # 2**5 * 3**2 * 5**15 + 8789062500000: 8789062500000, + 8789062500000 + 1: 8789062500000, + 8789062500000 - 1: 8748000000000, # 2**11 * 3**7 * 5**9 + # 2**24 * 3**9 * 5**4 + 206391214080000: 206391214080000, + 206391214080000 + 1: 206391214080000, + 206391214080000 - 1: 206158430208000, # 2**39 * 3**1 * 5**3 + # 2**18 * 3**15 * 5**3 + 470184984576000: 470184984576000, + 470184984576000 + 1: 470184984576000, + 470184984576000 - 1: 469654673817600, # 2**33 * 3**7 **5**2 + # 2**25 * 3**16 * 5**1 + 7222041363087360: 7222041363087360, + 7222041363087360 + 1: 7222041363087360, + 7222041363087360 - 1: 7213895789838336, # 2**40 * 3**8 + # power of 5 5**23 + 11920928955078125: 11920928955078125, + 11920928955078125 + 1: 11920928955078125, + 11920928955078125 - 1: 11901557422080000, # 2**14 * 3**19 * 5**4 + # power of 3 3**34 + 16677181699666569: 16677181699666569, + 16677181699666569 + 1: 16677181699666569, + 16677181699666569 - 1: 16607531250000000, # 2**7 * 3**12 * 5**12 + # power of 2 2**54 + 18014398509481984: 18014398509481984, + 18014398509481984 + 1: 18014398509481984, + 18014398509481984 - 1: 18000000000000000, # 2**16 * 3**2 * 5**15 + # 2**20 * 3**1 * 5**14 + 19200000000000000: 19200000000000000, + 19200000000000000 + 1: 19200000000000000, + 19200000000000000 - 1: 19131876000000000, # 2**11 * 3**14 * 5**9 + # 2**58 + 288230376151711744: 288230376151711744, + 288230376151711744 + 1: 288230376151711744, + 288230376151711744 - 1: 288000000000000000, # 2**20 * 3**2 * 5**15 + # 2**5 * 3**10 * 5**16 + 288325195312500000: 288325195312500000, + 288325195312500000 + 1: 288325195312500000, + 288325195312500000 - 1: 288230376151711744, # 2**58 + } + for x, y in hams.items(): + assert_equal(prev_fast_len(x, True), y) + + def test_keyword_args(self): + assert prev_fast_len(11, real=True) == 10 + assert prev_fast_len(target=7, real=False) == 7 + + +@skip_xp_backends(cpu_only=True) +class Test_init_nd_shape_and_axes: + + def test_py_0d_defaults(self, xp): + x = xp.asarray(4) + shape = None + axes = None + + shape_expected = () + axes_expected = [] + + shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes) + + assert shape_res == shape_expected + assert axes_res == axes_expected + + def test_xp_0d_defaults(self, xp): + x = xp.asarray(7.) + shape = None + axes = None + + shape_expected = () + axes_expected = [] + + shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes) + + assert shape_res == shape_expected + assert axes_res == axes_expected + + def test_py_1d_defaults(self, xp): + x = xp.asarray([1, 2, 3]) + shape = None + axes = None + + shape_expected = (3,) + axes_expected = [0] + + shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes) + + assert shape_res == shape_expected + assert axes_res == axes_expected + + def test_xp_1d_defaults(self, xp): + x = xp.arange(0, 1, .1) + shape = None + axes = None + + shape_expected = (10,) + axes_expected = [0] + + shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes) + + assert shape_res == shape_expected + assert axes_res == axes_expected + + def test_py_2d_defaults(self, xp): + x = xp.asarray([[1, 2, 3, 4], + [5, 6, 7, 8]]) + shape = None + axes = None + + shape_expected = (2, 4) + axes_expected = [0, 1] + + shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes) + + assert shape_res == shape_expected + assert axes_res == axes_expected + + def test_xp_2d_defaults(self, xp): + x = xp.arange(0, 1, .1) + x = xp.reshape(x, (5, 2)) + shape = None + axes = None + + shape_expected = (5, 2) + axes_expected = [0, 1] + + shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes) + + assert shape_res == shape_expected + assert axes_res == axes_expected + + def test_xp_5d_defaults(self, xp): + x = xp.zeros([6, 2, 5, 3, 4]) + shape = None + axes = None + + shape_expected = (6, 2, 5, 3, 4) + axes_expected = [0, 1, 2, 3, 4] + + shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes) + + assert shape_res == shape_expected + assert axes_res == axes_expected + + def test_xp_5d_set_shape(self, xp): + x = xp.zeros([6, 2, 5, 3, 4]) + shape = [10, -1, -1, 1, 4] + axes = None + + shape_expected = (10, 2, 5, 1, 4) + axes_expected = [0, 1, 2, 3, 4] + + shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes) + + assert shape_res == shape_expected + assert axes_res == axes_expected + + def test_xp_5d_set_axes(self, xp): + x = xp.zeros([6, 2, 5, 3, 4]) + shape = None + axes = [4, 1, 2] + + shape_expected = (4, 2, 5) + axes_expected = [4, 1, 2] + + shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes) + + assert shape_res == shape_expected + assert axes_res == axes_expected + + def test_xp_5d_set_shape_axes(self, xp): + x = xp.zeros([6, 2, 5, 3, 4]) + shape = [10, -1, 2] + axes = [1, 0, 3] + + shape_expected = (10, 6, 2) + axes_expected = [1, 0, 3] + + shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes) + + assert shape_res == shape_expected + assert axes_res == axes_expected + + def test_shape_axes_subset(self, xp): + x = xp.zeros((2, 3, 4, 5)) + shape, axes = _init_nd_shape_and_axes(x, shape=(5, 5, 5), axes=None) + + assert shape == (5, 5, 5) + assert axes == [1, 2, 3] + + def test_errors(self, xp): + x = xp.zeros(1) + with assert_raises(ValueError, match="axes must be a scalar or " + "iterable of integers"): + _init_nd_shape_and_axes(x, shape=None, axes=[[1, 2], [3, 4]]) + + with assert_raises(ValueError, match="axes must be a scalar or " + "iterable of integers"): + _init_nd_shape_and_axes(x, shape=None, axes=[1., 2., 3., 4.]) + + with assert_raises(ValueError, + match="axes exceeds dimensionality of input"): + _init_nd_shape_and_axes(x, shape=None, axes=[1]) + + with assert_raises(ValueError, + match="axes exceeds dimensionality of input"): + _init_nd_shape_and_axes(x, shape=None, axes=[-2]) + + with assert_raises(ValueError, + match="all axes must be unique"): + _init_nd_shape_and_axes(x, shape=None, axes=[0, 0]) + + with assert_raises(ValueError, match="shape must be a scalar or " + "iterable of integers"): + _init_nd_shape_and_axes(x, shape=[[1, 2], [3, 4]], axes=None) + + with assert_raises(ValueError, match="shape must be a scalar or " + "iterable of integers"): + _init_nd_shape_and_axes(x, shape=[1., 2., 3., 4.], axes=None) + + with assert_raises(ValueError, + match="when given, axes and shape arguments" + " have to be of the same length"): + _init_nd_shape_and_axes(xp.zeros([1, 1, 1, 1]), + shape=[1, 2, 3], axes=[1]) + + with assert_raises(ValueError, + match="invalid number of data points" + r" \(\[0\]\) specified"): + _init_nd_shape_and_axes(x, shape=[0], axes=None) + + with assert_raises(ValueError, + match="invalid number of data points" + r" \(\[-2\]\) specified"): + _init_nd_shape_and_axes(x, shape=-2, axes=None) + + +class TestFFTShift: + + def test_definition(self, xp): + x = xp.asarray([0., 1, 2, 3, 4, -4, -3, -2, -1]) + y = xp.asarray([-4., -3, -2, -1, 0, 1, 2, 3, 4]) + xp_assert_close(fft.fftshift(x), y) + xp_assert_close(fft.ifftshift(y), x) + x = xp.asarray([0., 1, 2, 3, 4, -5, -4, -3, -2, -1]) + y = xp.asarray([-5., -4, -3, -2, -1, 0, 1, 2, 3, 4]) + xp_assert_close(fft.fftshift(x), y) + xp_assert_close(fft.ifftshift(y), x) + + def test_inverse(self, xp): + for n in [1, 4, 9, 100, 211]: + x = xp.asarray(np.random.random((n,))) + xp_assert_close(fft.ifftshift(fft.fftshift(x)), x) + + def test_axes_keyword(self, xp): + freqs = xp.asarray([[0., 1, 2], [3, 4, -4], [-3, -2, -1]]) + shifted = xp.asarray([[-1., -3, -2], [2, 0, 1], [-4, 3, 4]]) + xp_assert_close(fft.fftshift(freqs, axes=(0, 1)), shifted) + xp_assert_close(fft.fftshift(freqs, axes=0), fft.fftshift(freqs, axes=(0,))) + xp_assert_close(fft.ifftshift(shifted, axes=(0, 1)), freqs) + xp_assert_close(fft.ifftshift(shifted, axes=0), + fft.ifftshift(shifted, axes=(0,))) + xp_assert_close(fft.fftshift(freqs), shifted) + xp_assert_close(fft.ifftshift(shifted), freqs) + + def test_uneven_dims(self, xp): + """ Test 2D input, which has uneven dimension sizes """ + freqs = xp.asarray([ + [0, 1], + [2, 3], + [4, 5] + ], dtype=xp.float64) + + # shift in dimension 0 + shift_dim0 = xp.asarray([ + [4, 5], + [0, 1], + [2, 3] + ], dtype=xp.float64) + xp_assert_close(fft.fftshift(freqs, axes=0), shift_dim0) + xp_assert_close(fft.ifftshift(shift_dim0, axes=0), freqs) + xp_assert_close(fft.fftshift(freqs, axes=(0,)), shift_dim0) + xp_assert_close(fft.ifftshift(shift_dim0, axes=[0]), freqs) + + # shift in dimension 1 + shift_dim1 = xp.asarray([ + [1, 0], + [3, 2], + [5, 4] + ], dtype=xp.float64) + xp_assert_close(fft.fftshift(freqs, axes=1), shift_dim1) + xp_assert_close(fft.ifftshift(shift_dim1, axes=1), freqs) + + # shift in both dimensions + shift_dim_both = xp.asarray([ + [5, 4], + [1, 0], + [3, 2] + ], dtype=xp.float64) + xp_assert_close(fft.fftshift(freqs, axes=(0, 1)), shift_dim_both) + xp_assert_close(fft.ifftshift(shift_dim_both, axes=(0, 1)), freqs) + xp_assert_close(fft.fftshift(freqs, axes=[0, 1]), shift_dim_both) + xp_assert_close(fft.ifftshift(shift_dim_both, axes=[0, 1]), freqs) + + # axes=None (default) shift in all dimensions + xp_assert_close(fft.fftshift(freqs, axes=None), shift_dim_both) + xp_assert_close(fft.ifftshift(shift_dim_both, axes=None), freqs) + xp_assert_close(fft.fftshift(freqs), shift_dim_both) + xp_assert_close(fft.ifftshift(shift_dim_both), freqs) + + +@skip_xp_backends("cupy", "jax.numpy", + reasons=["CuPy has not implemented the `device` param", + "JAX has not implemented the `device` param"]) +class TestFFTFreq: + + def test_definition(self, xp): + x = xp.asarray([0, 1, 2, 3, 4, -4, -3, -2, -1], dtype=xp.float64) + x2 = xp.asarray([0, 1, 2, 3, 4, -5, -4, -3, -2, -1], dtype=xp.float64) + + # default dtype varies across backends + + y = 9 * fft.fftfreq(9, xp=xp) + xp_assert_close(y, x, check_dtype=False, check_namespace=True) + + y = 9 * xp.pi * fft.fftfreq(9, xp.pi, xp=xp) + xp_assert_close(y, x, check_dtype=False) + + y = 10 * fft.fftfreq(10, xp=xp) + xp_assert_close(y, x2, check_dtype=False) + + y = 10 * xp.pi * fft.fftfreq(10, xp.pi, xp=xp) + xp_assert_close(y, x2, check_dtype=False) + + def test_device(self, xp): + xp_test = array_namespace(xp.empty(0)) + devices = get_xp_devices(xp) + for d in devices: + y = fft.fftfreq(9, xp=xp, device=d) + x = xp_test.empty(0, device=d) + assert device(y) == device(x) + + +@skip_xp_backends("cupy", "jax.numpy", + reasons=["CuPy has not implemented the `device` param", + "JAX has not implemented the `device` param"]) +class TestRFFTFreq: + + def test_definition(self, xp): + x = xp.asarray([0, 1, 2, 3, 4], dtype=xp.float64) + x2 = xp.asarray([0, 1, 2, 3, 4, 5], dtype=xp.float64) + + # default dtype varies across backends + + y = 9 * fft.rfftfreq(9, xp=xp) + xp_assert_close(y, x, check_dtype=False, check_namespace=True) + + y = 9 * xp.pi * fft.rfftfreq(9, xp.pi, xp=xp) + xp_assert_close(y, x, check_dtype=False) + + y = 10 * fft.rfftfreq(10, xp=xp) + xp_assert_close(y, x2, check_dtype=False) + + y = 10 * xp.pi * fft.rfftfreq(10, xp.pi, xp=xp) + xp_assert_close(y, x2, check_dtype=False) + + def test_device(self, xp): + xp_test = array_namespace(xp.empty(0)) + devices = get_xp_devices(xp) + for d in devices: + y = fft.rfftfreq(9, xp=xp, device=d) + x = xp_test.empty(0, device=d) + assert device(y) == device(x) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/test_multithreading.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/test_multithreading.py new file mode 100644 index 0000000000000000000000000000000000000000..1a6b71b830211f8bcbe56e97ff71098be75021c8 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/test_multithreading.py @@ -0,0 +1,84 @@ +from scipy import fft +import numpy as np +import pytest +from numpy.testing import assert_allclose +import multiprocessing +import os + + +@pytest.fixture(scope='module') +def x(): + return np.random.randn(512, 128) # Must be large enough to qualify for mt + + +@pytest.mark.parametrize("func", [ + fft.fft, fft.ifft, fft.fft2, fft.ifft2, fft.fftn, fft.ifftn, + fft.rfft, fft.irfft, fft.rfft2, fft.irfft2, fft.rfftn, fft.irfftn, + fft.hfft, fft.ihfft, fft.hfft2, fft.ihfft2, fft.hfftn, fft.ihfftn, + fft.dct, fft.idct, fft.dctn, fft.idctn, + fft.dst, fft.idst, fft.dstn, fft.idstn, +]) +@pytest.mark.parametrize("workers", [2, -1]) +def test_threaded_same(x, func, workers): + expected = func(x, workers=1) + actual = func(x, workers=workers) + assert_allclose(actual, expected) + + +def _mt_fft(x): + return fft.fft(x, workers=2) + + +@pytest.mark.slow +def test_mixed_threads_processes(x): + # Test that the fft threadpool is safe to use before & after fork + + expect = fft.fft(x, workers=2) + + with multiprocessing.Pool(2) as p: + res = p.map(_mt_fft, [x for _ in range(4)]) + + for r in res: + assert_allclose(r, expect) + + fft.fft(x, workers=2) + + +def test_invalid_workers(x): + cpus = os.cpu_count() + + fft.ifft([1], workers=-cpus) + + with pytest.raises(ValueError, match='workers must not be zero'): + fft.fft(x, workers=0) + + with pytest.raises(ValueError, match='workers value out of range'): + fft.ifft(x, workers=-cpus-1) + + +def test_set_get_workers(): + cpus = os.cpu_count() + assert fft.get_workers() == 1 + with fft.set_workers(4): + assert fft.get_workers() == 4 + + with fft.set_workers(-1): + assert fft.get_workers() == cpus + + assert fft.get_workers() == 4 + + assert fft.get_workers() == 1 + + with fft.set_workers(-cpus): + assert fft.get_workers() == 1 + + +def test_set_workers_invalid(): + + with pytest.raises(ValueError, match='workers must not be zero'): + with fft.set_workers(0): + pass + + with pytest.raises(ValueError, match='workers value out of range'): + with fft.set_workers(-os.cpu_count()-1): + pass diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/test_real_transforms.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/test_real_transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..26e4589bdb6dc23fb62765aa3cc7e52e06684268 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/fft/tests/test_real_transforms.py @@ -0,0 +1,249 @@ +import numpy as np +from numpy.testing import assert_allclose, assert_array_equal +import pytest +import math + +from scipy.fft import dct, idct, dctn, idctn, dst, idst, dstn, idstn +import scipy.fft as fft +from scipy import fftpack +from scipy.conftest import array_api_compatible +from scipy._lib._array_api import copy, xp_assert_close + +pytestmark = [array_api_compatible, pytest.mark.usefixtures("skip_xp_backends")] +skip_xp_backends = pytest.mark.skip_xp_backends + +SQRT_2 = math.sqrt(2) + +# scipy.fft wraps the fftpack versions but with normalized inverse transforms. +# So, the forward transforms and definitions are already thoroughly tested in +# fftpack/test_real_transforms.py + + +@skip_xp_backends(cpu_only=True) +@pytest.mark.parametrize("forward, backward", [(dct, idct), (dst, idst)]) +@pytest.mark.parametrize("type", [1, 2, 3, 4]) +@pytest.mark.parametrize("n", [2, 3, 4, 5, 10, 16]) +@pytest.mark.parametrize("axis", [0, 1]) +@pytest.mark.parametrize("norm", [None, 'backward', 'ortho', 'forward']) +@pytest.mark.parametrize("orthogonalize", [False, True]) +def test_identity_1d(forward, backward, type, n, axis, norm, orthogonalize, xp): + # Test the identity f^-1(f(x)) == x + x = xp.asarray(np.random.rand(n, n)) + + y = forward(x, type, axis=axis, norm=norm, orthogonalize=orthogonalize) + z = backward(y, type, axis=axis, norm=norm, orthogonalize=orthogonalize) + xp_assert_close(z, x) + + pad = [(0, 0)] * 2 + pad[axis] = (0, 4) + + y2 = xp.asarray(np.pad(np.asarray(y), pad, mode='edge')) + z2 = backward(y2, type, n, axis, norm, orthogonalize=orthogonalize) + xp_assert_close(z2, x) + + +@skip_xp_backends(np_only=True, + reasons=['`overwrite_x` only supported for NumPy backend.']) +@pytest.mark.parametrize("forward, backward", [(dct, idct), (dst, idst)]) +@pytest.mark.parametrize("type", [1, 2, 3, 4]) +@pytest.mark.parametrize("dtype", [np.float16, np.float32, np.float64, + np.complex64, np.complex128]) +@pytest.mark.parametrize("axis", [0, 1]) +@pytest.mark.parametrize("norm", [None, 'backward', 'ortho', 'forward']) +@pytest.mark.parametrize("overwrite_x", [True, False]) +def test_identity_1d_overwrite(forward, backward, type, dtype, axis, norm, + overwrite_x): + # Test the identity f^-1(f(x)) == x + x = np.random.rand(7, 8).astype(dtype) + x_orig = x.copy() + + y = forward(x, type, axis=axis, norm=norm, overwrite_x=overwrite_x) + y_orig = y.copy() + z = backward(y, type, axis=axis, norm=norm, overwrite_x=overwrite_x) + if not overwrite_x: + assert_allclose(z, x, rtol=1e-6, atol=1e-6) + assert_array_equal(x, x_orig) + assert_array_equal(y, y_orig) + else: + assert_allclose(z, x_orig, rtol=1e-6, atol=1e-6) + + +@skip_xp_backends(cpu_only=True) +@pytest.mark.parametrize("forward, backward", [(dctn, idctn), (dstn, idstn)]) +@pytest.mark.parametrize("type", [1, 2, 3, 4]) +@pytest.mark.parametrize("shape, axes", + [ + ((4, 4), 0), + ((4, 4), 1), + ((4, 4), None), + ((4, 4), (0, 1)), + ((10, 12), None), + ((10, 12), (0, 1)), + ((4, 5, 6), None), + ((4, 5, 6), 1), + ((4, 5, 6), (0, 2)), + ]) +@pytest.mark.parametrize("norm", [None, 'backward', 'ortho', 'forward']) +@pytest.mark.parametrize("orthogonalize", [False, True]) +def test_identity_nd(forward, backward, type, shape, axes, norm, + orthogonalize, xp): + # Test the identity f^-1(f(x)) == x + + x = xp.asarray(np.random.random(shape)) + + if axes is not None: + shape = np.take(shape, axes) + + y = forward(x, type, axes=axes, norm=norm, orthogonalize=orthogonalize) + z = backward(y, type, axes=axes, norm=norm, orthogonalize=orthogonalize) + xp_assert_close(z, x) + + if axes is None: + pad = [(0, 4)] * x.ndim + elif isinstance(axes, int): + pad = [(0, 0)] * x.ndim + pad[axes] = (0, 4) + else: + pad = [(0, 0)] * x.ndim + + for a in axes: + pad[a] = (0, 4) + + # TODO write an array-agnostic pad + y2 = xp.asarray(np.pad(np.asarray(y), pad, mode='edge')) + z2 = backward(y2, type, shape, axes, norm, orthogonalize=orthogonalize) + xp_assert_close(z2, x) + + +@skip_xp_backends(np_only=True, + reasons=['`overwrite_x` only supported for NumPy backend.']) +@pytest.mark.parametrize("forward, backward", [(dctn, idctn), (dstn, idstn)]) +@pytest.mark.parametrize("type", [1, 2, 3, 4]) +@pytest.mark.parametrize("shape, axes", + [ + ((4, 5), 0), + ((4, 5), 1), + ((4, 5), None), + ]) +@pytest.mark.parametrize("dtype", [np.float16, np.float32, np.float64, + np.complex64, np.complex128]) +@pytest.mark.parametrize("norm", [None, 'backward', 'ortho', 'forward']) +@pytest.mark.parametrize("overwrite_x", [False, True]) +def test_identity_nd_overwrite(forward, backward, type, shape, axes, dtype, + norm, overwrite_x): + # Test the identity f^-1(f(x)) == x + + x = np.random.random(shape).astype(dtype) + x_orig = x.copy() + + if axes is not None: + shape = np.take(shape, axes) + + y = forward(x, type, axes=axes, norm=norm) + y_orig = y.copy() + z = backward(y, type, axes=axes, norm=norm) + if overwrite_x: + assert_allclose(z, x_orig, rtol=1e-6, atol=1e-6) + else: + assert_allclose(z, x, rtol=1e-6, atol=1e-6) + assert_array_equal(x, x_orig) + assert_array_equal(y, y_orig) + + +@skip_xp_backends(cpu_only=True) +@pytest.mark.parametrize("func", ['dct', 'dst', 'dctn', 'dstn']) +@pytest.mark.parametrize("type", [1, 2, 3, 4]) +@pytest.mark.parametrize("norm", [None, 'backward', 'ortho', 'forward']) +def test_fftpack_equivalience(func, type, norm, xp): + x = np.random.rand(8, 16) + fftpack_res = xp.asarray(getattr(fftpack, func)(x, type, norm=norm)) + x = xp.asarray(x) + fft_res = getattr(fft, func)(x, type, norm=norm) + + xp_assert_close(fft_res, fftpack_res) + + +@skip_xp_backends(cpu_only=True) +@pytest.mark.parametrize("func", [dct, dst, dctn, dstn]) +@pytest.mark.parametrize("type", [1, 2, 3, 4]) +def test_orthogonalize_default(func, type, xp): + # Test orthogonalize is the default when norm="ortho", but not otherwise + x = xp.asarray(np.random.rand(100)) + + for norm, ortho in [ + ("forward", False), + ("backward", False), + ("ortho", True), + ]: + a = func(x, type=type, norm=norm, orthogonalize=ortho) + b = func(x, type=type, norm=norm) + xp_assert_close(a, b) + + +@skip_xp_backends(cpu_only=True) +@pytest.mark.parametrize("norm", ["backward", "ortho", "forward"]) +@pytest.mark.parametrize("func, type", [ + (dct, 4), (dst, 1), (dst, 4)]) +def test_orthogonalize_noop(func, type, norm, xp): + # Transforms where orthogonalize is a no-op + x = xp.asarray(np.random.rand(100)) + y1 = func(x, type=type, norm=norm, orthogonalize=True) + y2 = func(x, type=type, norm=norm, orthogonalize=False) + xp_assert_close(y1, y2) + + +@skip_xp_backends('jax.numpy', + reasons=['jax arrays do not support item assignment'], + cpu_only=True) +@pytest.mark.parametrize("norm", ["backward", "ortho", "forward"]) +def test_orthogonalize_dct1(norm, xp): + x = xp.asarray(np.random.rand(100)) + + x2 = copy(x, xp=xp) + x2[0] *= SQRT_2 + x2[-1] *= SQRT_2 + + y1 = dct(x, type=1, norm=norm, orthogonalize=True) + y2 = dct(x2, type=1, norm=norm, orthogonalize=False) + + y2[0] /= SQRT_2 + y2[-1] /= SQRT_2 + xp_assert_close(y1, y2) + + +@skip_xp_backends('jax.numpy', + reasons=['jax arrays do not support item assignment'], + cpu_only=True) +@pytest.mark.parametrize("norm", ["backward", "ortho", "forward"]) +@pytest.mark.parametrize("func", [dct, dst]) +def test_orthogonalize_dcst2(func, norm, xp): + x = xp.asarray(np.random.rand(100)) + y1 = func(x, type=2, norm=norm, orthogonalize=True) + y2 = func(x, type=2, norm=norm, orthogonalize=False) + + y2[0 if func == dct else -1] /= SQRT_2 + xp_assert_close(y1, y2) + + +@skip_xp_backends('jax.numpy', + reasons=['jax arrays do not support item assignment'], + cpu_only=True) +@pytest.mark.parametrize("norm", ["backward", "ortho", "forward"]) +@pytest.mark.parametrize("func", [dct, dst]) +def test_orthogonalize_dcst3(func, norm, xp): + x = xp.asarray(np.random.rand(100)) + x2 = copy(x, xp=xp) + x2[0 if func == dct else -1] *= SQRT_2 + + y1 = func(x, type=3, norm=norm, orthogonalize=True) + y2 = func(x2, type=3, norm=norm, orthogonalize=False) + xp_assert_close(y1, y2) + +@skip_xp_backends(np_only=True, + reasons=['array-likes only supported for NumPy backend']) +@pytest.mark.parametrize("func", [dct, idct, dctn, idctn, dst, idst, dstn, idstn]) +def test_array_like(xp, func): + x = [[[1.0, 1.0], [1.0, 1.0]], + [[1.0, 1.0], [1.0, 1.0]], + [[1.0, 1.0], [1.0, 1.0]]] + xp_assert_close(func(x), func(xp.asarray(x))) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/__init__.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..039234777d35a1aebce2b93b943261e4f013a6b5 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/__init__.py @@ -0,0 +1,110 @@ +""" +============================================= +Integration and ODEs (:mod:`scipy.integrate`) +============================================= + +.. currentmodule:: scipy.integrate + +Integrating functions, given function object +============================================ + +.. autosummary:: + :toctree: generated/ + + quad -- General purpose integration + quad_vec -- General purpose integration of vector-valued functions + dblquad -- General purpose double integration + tplquad -- General purpose triple integration + nquad -- General purpose N-D integration + fixed_quad -- Integrate func(x) using Gaussian quadrature of order n + quadrature -- Integrate with given tolerance using Gaussian quadrature + romberg -- Integrate func using Romberg integration + newton_cotes -- Weights and error coefficient for Newton-Cotes integration + qmc_quad -- N-D integration using Quasi-Monte Carlo quadrature + IntegrationWarning -- Warning on issues during integration + AccuracyWarning -- Warning on issues during quadrature integration + +Integrating functions, given fixed samples +========================================== + +.. autosummary:: + :toctree: generated/ + + trapezoid -- Use trapezoidal rule to compute integral. + cumulative_trapezoid -- Use trapezoidal rule to cumulatively compute integral. + simpson -- Use Simpson's rule to compute integral from samples. + cumulative_simpson -- Use Simpson's rule to cumulatively compute integral from samples. + romb -- Use Romberg Integration to compute integral from + -- (2**k + 1) evenly-spaced samples. + +.. seealso:: + + :mod:`scipy.special` for orthogonal polynomials (special) for Gaussian + quadrature roots and weights for other weighting factors and regions. + +Solving initial value problems for ODE systems +============================================== + +The solvers are implemented as individual classes, which can be used directly +(low-level usage) or through a convenience function. + +.. autosummary:: + :toctree: generated/ + + solve_ivp -- Convenient function for ODE integration. + RK23 -- Explicit Runge-Kutta solver of order 3(2). + RK45 -- Explicit Runge-Kutta solver of order 5(4). + DOP853 -- Explicit Runge-Kutta solver of order 8. + Radau -- Implicit Runge-Kutta solver of order 5. + BDF -- Implicit multi-step variable order (1 to 5) solver. + LSODA -- LSODA solver from ODEPACK Fortran package. + OdeSolver -- Base class for ODE solvers. + DenseOutput -- Local interpolant for computing a dense output. + OdeSolution -- Class which represents a continuous ODE solution. + + +Old API +------- + +These are the routines developed earlier for SciPy. They wrap older solvers +implemented in Fortran (mostly ODEPACK). While the interface to them is not +particularly convenient and certain features are missing compared to the new +API, the solvers themselves are of good quality and work fast as compiled +Fortran code. In some cases, it might be worth using this old API. + +.. autosummary:: + :toctree: generated/ + + odeint -- General integration of ordinary differential equations. + ode -- Integrate ODE using VODE and ZVODE routines. + complex_ode -- Convert a complex-valued ODE to real-valued and integrate. + ODEintWarning -- Warning raised during the execution of `odeint`. + + +Solving boundary value problems for ODE systems +=============================================== + +.. autosummary:: + :toctree: generated/ + + solve_bvp -- Solve a boundary value problem for a system of ODEs. +""" # noqa: E501 + + +from ._quadrature import * +from ._odepack_py import * +from ._quadpack_py import * +from ._ode import * +from ._bvp import solve_bvp +from ._ivp import (solve_ivp, OdeSolution, DenseOutput, + OdeSolver, RK23, RK45, DOP853, Radau, BDF, LSODA) +from ._quad_vec import quad_vec + +# Deprecated namespaces, to be removed in v2.0.0 +from . import dop, lsoda, vode, odepack, quadpack + +__all__ = [s for s in dir() if not s.startswith('_')] + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/__init__.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e4fd581bd31ced39c25dbf085fdd9a9df6c73556 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/__init__.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/_bvp.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/_bvp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f61dc258e6d35295d052e59e7d7bce97a50843e9 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/_bvp.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/_ode.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/_ode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3f6dc413ff192564f96b6eecb1519917858b832 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/_ode.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/_odepack_py.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/_odepack_py.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2fc926b28809ffe0725cf487cc8a861c4781c97e Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/_odepack_py.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quad_vec.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quad_vec.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec8fbb3e46122f61990802dbe8f6960877827a9c Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quad_vec.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quadpack_py.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quadpack_py.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2685f3d3a664e0c653d0d2fc75f12bf7786b5d02 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quadpack_py.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quadrature.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quadrature.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..60008511c35af32d6adb134a86cb10773fab31f4 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quadrature.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/_tanhsinh.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/_tanhsinh.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c422a5c4dcb0c5ca76f02df36eb2f49cf84bfa1 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/_tanhsinh.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/dop.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/dop.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7123dd79caaff661c3af4b112292ae93aab07fcd Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/dop.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/lsoda.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/lsoda.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6678927c66d3c28047c54ae96981363ad43aeb15 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/lsoda.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/odepack.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/odepack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..042a25a2c84b950324ce430db73f03ba3b419de8 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/odepack.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/quadpack.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/quadpack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b51d532e9d9a3b4883f90073e2c5043358332716 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/quadpack.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/vode.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/vode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4194af45c829d8df35acc36fe9ece00bc7dbf0bc Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/__pycache__/vode.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_bvp.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_bvp.py new file mode 100644 index 0000000000000000000000000000000000000000..f988fdd6e0527d3adc4f4edfa955cc33d9eb85f8 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_bvp.py @@ -0,0 +1,1155 @@ +"""Boundary value problem solver.""" +from warnings import warn + +import numpy as np +from numpy.linalg import pinv + +from scipy.sparse import coo_matrix, csc_matrix +from scipy.sparse.linalg import splu +from scipy.optimize import OptimizeResult + + +EPS = np.finfo(float).eps + + +def estimate_fun_jac(fun, x, y, p, f0=None): + """Estimate derivatives of an ODE system rhs with forward differences. + + Returns + ------- + df_dy : ndarray, shape (n, n, m) + Derivatives with respect to y. An element (i, j, q) corresponds to + d f_i(x_q, y_q) / d (y_q)_j. + df_dp : ndarray with shape (n, k, m) or None + Derivatives with respect to p. An element (i, j, q) corresponds to + d f_i(x_q, y_q, p) / d p_j. If `p` is empty, None is returned. + """ + n, m = y.shape + if f0 is None: + f0 = fun(x, y, p) + + dtype = y.dtype + + df_dy = np.empty((n, n, m), dtype=dtype) + h = EPS**0.5 * (1 + np.abs(y)) + for i in range(n): + y_new = y.copy() + y_new[i] += h[i] + hi = y_new[i] - y[i] + f_new = fun(x, y_new, p) + df_dy[:, i, :] = (f_new - f0) / hi + + k = p.shape[0] + if k == 0: + df_dp = None + else: + df_dp = np.empty((n, k, m), dtype=dtype) + h = EPS**0.5 * (1 + np.abs(p)) + for i in range(k): + p_new = p.copy() + p_new[i] += h[i] + hi = p_new[i] - p[i] + f_new = fun(x, y, p_new) + df_dp[:, i, :] = (f_new - f0) / hi + + return df_dy, df_dp + + +def estimate_bc_jac(bc, ya, yb, p, bc0=None): + """Estimate derivatives of boundary conditions with forward differences. + + Returns + ------- + dbc_dya : ndarray, shape (n + k, n) + Derivatives with respect to ya. An element (i, j) corresponds to + d bc_i / d ya_j. + dbc_dyb : ndarray, shape (n + k, n) + Derivatives with respect to yb. An element (i, j) corresponds to + d bc_i / d ya_j. + dbc_dp : ndarray with shape (n + k, k) or None + Derivatives with respect to p. An element (i, j) corresponds to + d bc_i / d p_j. If `p` is empty, None is returned. + """ + n = ya.shape[0] + k = p.shape[0] + + if bc0 is None: + bc0 = bc(ya, yb, p) + + dtype = ya.dtype + + dbc_dya = np.empty((n, n + k), dtype=dtype) + h = EPS**0.5 * (1 + np.abs(ya)) + for i in range(n): + ya_new = ya.copy() + ya_new[i] += h[i] + hi = ya_new[i] - ya[i] + bc_new = bc(ya_new, yb, p) + dbc_dya[i] = (bc_new - bc0) / hi + dbc_dya = dbc_dya.T + + h = EPS**0.5 * (1 + np.abs(yb)) + dbc_dyb = np.empty((n, n + k), dtype=dtype) + for i in range(n): + yb_new = yb.copy() + yb_new[i] += h[i] + hi = yb_new[i] - yb[i] + bc_new = bc(ya, yb_new, p) + dbc_dyb[i] = (bc_new - bc0) / hi + dbc_dyb = dbc_dyb.T + + if k == 0: + dbc_dp = None + else: + h = EPS**0.5 * (1 + np.abs(p)) + dbc_dp = np.empty((k, n + k), dtype=dtype) + for i in range(k): + p_new = p.copy() + p_new[i] += h[i] + hi = p_new[i] - p[i] + bc_new = bc(ya, yb, p_new) + dbc_dp[i] = (bc_new - bc0) / hi + dbc_dp = dbc_dp.T + + return dbc_dya, dbc_dyb, dbc_dp + + +def compute_jac_indices(n, m, k): + """Compute indices for the collocation system Jacobian construction. + + See `construct_global_jac` for the explanation. + """ + i_col = np.repeat(np.arange((m - 1) * n), n) + j_col = (np.tile(np.arange(n), n * (m - 1)) + + np.repeat(np.arange(m - 1) * n, n**2)) + + i_bc = np.repeat(np.arange((m - 1) * n, m * n + k), n) + j_bc = np.tile(np.arange(n), n + k) + + i_p_col = np.repeat(np.arange((m - 1) * n), k) + j_p_col = np.tile(np.arange(m * n, m * n + k), (m - 1) * n) + + i_p_bc = np.repeat(np.arange((m - 1) * n, m * n + k), k) + j_p_bc = np.tile(np.arange(m * n, m * n + k), n + k) + + i = np.hstack((i_col, i_col, i_bc, i_bc, i_p_col, i_p_bc)) + j = np.hstack((j_col, j_col + n, + j_bc, j_bc + (m - 1) * n, + j_p_col, j_p_bc)) + + return i, j + + +def stacked_matmul(a, b): + """Stacked matrix multiply: out[i,:,:] = np.dot(a[i,:,:], b[i,:,:]). + + Empirical optimization. Use outer Python loop and BLAS for large + matrices, otherwise use a single einsum call. + """ + if a.shape[1] > 50: + out = np.empty((a.shape[0], a.shape[1], b.shape[2])) + for i in range(a.shape[0]): + out[i] = np.dot(a[i], b[i]) + return out + else: + return np.einsum('...ij,...jk->...ik', a, b) + + +def construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy, df_dy_middle, df_dp, + df_dp_middle, dbc_dya, dbc_dyb, dbc_dp): + """Construct the Jacobian of the collocation system. + + There are n * m + k functions: m - 1 collocations residuals, each + containing n components, followed by n + k boundary condition residuals. + + There are n * m + k variables: m vectors of y, each containing n + components, followed by k values of vector p. + + For example, let m = 4, n = 2 and k = 1, then the Jacobian will have + the following sparsity structure: + + 1 1 2 2 0 0 0 0 5 + 1 1 2 2 0 0 0 0 5 + 0 0 1 1 2 2 0 0 5 + 0 0 1 1 2 2 0 0 5 + 0 0 0 0 1 1 2 2 5 + 0 0 0 0 1 1 2 2 5 + + 3 3 0 0 0 0 4 4 6 + 3 3 0 0 0 0 4 4 6 + 3 3 0 0 0 0 4 4 6 + + Zeros denote identically zero values, other values denote different kinds + of blocks in the matrix (see below). The blank row indicates the separation + of collocation residuals from boundary conditions. And the blank column + indicates the separation of y values from p values. + + Refer to [1]_ (p. 306) for the formula of n x n blocks for derivatives + of collocation residuals with respect to y. + + Parameters + ---------- + n : int + Number of equations in the ODE system. + m : int + Number of nodes in the mesh. + k : int + Number of the unknown parameters. + i_jac, j_jac : ndarray + Row and column indices returned by `compute_jac_indices`. They + represent different blocks in the Jacobian matrix in the following + order (see the scheme above): + + * 1: m - 1 diagonal n x n blocks for the collocation residuals. + * 2: m - 1 off-diagonal n x n blocks for the collocation residuals. + * 3 : (n + k) x n block for the dependency of the boundary + conditions on ya. + * 4: (n + k) x n block for the dependency of the boundary + conditions on yb. + * 5: (m - 1) * n x k block for the dependency of the collocation + residuals on p. + * 6: (n + k) x k block for the dependency of the boundary + conditions on p. + + df_dy : ndarray, shape (n, n, m) + Jacobian of f with respect to y computed at the mesh nodes. + df_dy_middle : ndarray, shape (n, n, m - 1) + Jacobian of f with respect to y computed at the middle between the + mesh nodes. + df_dp : ndarray with shape (n, k, m) or None + Jacobian of f with respect to p computed at the mesh nodes. + df_dp_middle : ndarray with shape (n, k, m - 1) or None + Jacobian of f with respect to p computed at the middle between the + mesh nodes. + dbc_dya, dbc_dyb : ndarray, shape (n, n) + Jacobian of bc with respect to ya and yb. + dbc_dp : ndarray with shape (n, k) or None + Jacobian of bc with respect to p. + + Returns + ------- + J : csc_matrix, shape (n * m + k, n * m + k) + Jacobian of the collocation system in a sparse form. + + References + ---------- + .. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual + Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27, + Number 3, pp. 299-316, 2001. + """ + df_dy = np.transpose(df_dy, (2, 0, 1)) + df_dy_middle = np.transpose(df_dy_middle, (2, 0, 1)) + + h = h[:, np.newaxis, np.newaxis] + + dtype = df_dy.dtype + + # Computing diagonal n x n blocks. + dPhi_dy_0 = np.empty((m - 1, n, n), dtype=dtype) + dPhi_dy_0[:] = -np.identity(n) + dPhi_dy_0 -= h / 6 * (df_dy[:-1] + 2 * df_dy_middle) + T = stacked_matmul(df_dy_middle, df_dy[:-1]) + dPhi_dy_0 -= h**2 / 12 * T + + # Computing off-diagonal n x n blocks. + dPhi_dy_1 = np.empty((m - 1, n, n), dtype=dtype) + dPhi_dy_1[:] = np.identity(n) + dPhi_dy_1 -= h / 6 * (df_dy[1:] + 2 * df_dy_middle) + T = stacked_matmul(df_dy_middle, df_dy[1:]) + dPhi_dy_1 += h**2 / 12 * T + + values = np.hstack((dPhi_dy_0.ravel(), dPhi_dy_1.ravel(), dbc_dya.ravel(), + dbc_dyb.ravel())) + + if k > 0: + df_dp = np.transpose(df_dp, (2, 0, 1)) + df_dp_middle = np.transpose(df_dp_middle, (2, 0, 1)) + T = stacked_matmul(df_dy_middle, df_dp[:-1] - df_dp[1:]) + df_dp_middle += 0.125 * h * T + dPhi_dp = -h/6 * (df_dp[:-1] + df_dp[1:] + 4 * df_dp_middle) + values = np.hstack((values, dPhi_dp.ravel(), dbc_dp.ravel())) + + J = coo_matrix((values, (i_jac, j_jac))) + return csc_matrix(J) + + +def collocation_fun(fun, y, p, x, h): + """Evaluate collocation residuals. + + This function lies in the core of the method. The solution is sought + as a cubic C1 continuous spline with derivatives matching the ODE rhs + at given nodes `x`. Collocation conditions are formed from the equality + of the spline derivatives and rhs of the ODE system in the middle points + between nodes. + + Such method is classified to Lobbato IIIA family in ODE literature. + Refer to [1]_ for the formula and some discussion. + + Returns + ------- + col_res : ndarray, shape (n, m - 1) + Collocation residuals at the middle points of the mesh intervals. + y_middle : ndarray, shape (n, m - 1) + Values of the cubic spline evaluated at the middle points of the mesh + intervals. + f : ndarray, shape (n, m) + RHS of the ODE system evaluated at the mesh nodes. + f_middle : ndarray, shape (n, m - 1) + RHS of the ODE system evaluated at the middle points of the mesh + intervals (and using `y_middle`). + + References + ---------- + .. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual + Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27, + Number 3, pp. 299-316, 2001. + """ + f = fun(x, y, p) + y_middle = (0.5 * (y[:, 1:] + y[:, :-1]) - + 0.125 * h * (f[:, 1:] - f[:, :-1])) + f_middle = fun(x[:-1] + 0.5 * h, y_middle, p) + col_res = y[:, 1:] - y[:, :-1] - h / 6 * (f[:, :-1] + f[:, 1:] + + 4 * f_middle) + + return col_res, y_middle, f, f_middle + + +def prepare_sys(n, m, k, fun, bc, fun_jac, bc_jac, x, h): + """Create the function and the Jacobian for the collocation system.""" + x_middle = x[:-1] + 0.5 * h + i_jac, j_jac = compute_jac_indices(n, m, k) + + def col_fun(y, p): + return collocation_fun(fun, y, p, x, h) + + def sys_jac(y, p, y_middle, f, f_middle, bc0): + if fun_jac is None: + df_dy, df_dp = estimate_fun_jac(fun, x, y, p, f) + df_dy_middle, df_dp_middle = estimate_fun_jac( + fun, x_middle, y_middle, p, f_middle) + else: + df_dy, df_dp = fun_jac(x, y, p) + df_dy_middle, df_dp_middle = fun_jac(x_middle, y_middle, p) + + if bc_jac is None: + dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(bc, y[:, 0], y[:, -1], + p, bc0) + else: + dbc_dya, dbc_dyb, dbc_dp = bc_jac(y[:, 0], y[:, -1], p) + + return construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy, + df_dy_middle, df_dp, df_dp_middle, dbc_dya, + dbc_dyb, dbc_dp) + + return col_fun, sys_jac + + +def solve_newton(n, m, h, col_fun, bc, jac, y, p, B, bvp_tol, bc_tol): + """Solve the nonlinear collocation system by a Newton method. + + This is a simple Newton method with a backtracking line search. As + advised in [1]_, an affine-invariant criterion function F = ||J^-1 r||^2 + is used, where J is the Jacobian matrix at the current iteration and r is + the vector or collocation residuals (values of the system lhs). + + The method alters between full Newton iterations and the fixed-Jacobian + iterations based + + There are other tricks proposed in [1]_, but they are not used as they + don't seem to improve anything significantly, and even break the + convergence on some test problems I tried. + + All important parameters of the algorithm are defined inside the function. + + Parameters + ---------- + n : int + Number of equations in the ODE system. + m : int + Number of nodes in the mesh. + h : ndarray, shape (m-1,) + Mesh intervals. + col_fun : callable + Function computing collocation residuals. + bc : callable + Function computing boundary condition residuals. + jac : callable + Function computing the Jacobian of the whole system (including + collocation and boundary condition residuals). It is supposed to + return csc_matrix. + y : ndarray, shape (n, m) + Initial guess for the function values at the mesh nodes. + p : ndarray, shape (k,) + Initial guess for the unknown parameters. + B : ndarray with shape (n, n) or None + Matrix to force the S y(a) = 0 condition for a problems with the + singular term. If None, the singular term is assumed to be absent. + bvp_tol : float + Tolerance to which we want to solve a BVP. + bc_tol : float + Tolerance to which we want to satisfy the boundary conditions. + + Returns + ------- + y : ndarray, shape (n, m) + Final iterate for the function values at the mesh nodes. + p : ndarray, shape (k,) + Final iterate for the unknown parameters. + singular : bool + True, if the LU decomposition failed because Jacobian turned out + to be singular. + + References + ---------- + .. [1] U. Ascher, R. Mattheij and R. Russell "Numerical Solution of + Boundary Value Problems for Ordinary Differential Equations" + """ + # We know that the solution residuals at the middle points of the mesh + # are connected with collocation residuals r_middle = 1.5 * col_res / h. + # As our BVP solver tries to decrease relative residuals below a certain + # tolerance, it seems reasonable to terminated Newton iterations by + # comparison of r_middle / (1 + np.abs(f_middle)) with a certain threshold, + # which we choose to be 1.5 orders lower than the BVP tolerance. We rewrite + # the condition as col_res < tol_r * (1 + np.abs(f_middle)), then tol_r + # should be computed as follows: + tol_r = 2/3 * h * 5e-2 * bvp_tol + + # Maximum allowed number of Jacobian evaluation and factorization, in + # other words, the maximum number of full Newton iterations. A small value + # is recommended in the literature. + max_njev = 4 + + # Maximum number of iterations, considering that some of them can be + # performed with the fixed Jacobian. In theory, such iterations are cheap, + # but it's not that simple in Python. + max_iter = 8 + + # Minimum relative improvement of the criterion function to accept the + # step (Armijo constant). + sigma = 0.2 + + # Step size decrease factor for backtracking. + tau = 0.5 + + # Maximum number of backtracking steps, the minimum step is then + # tau ** n_trial. + n_trial = 4 + + col_res, y_middle, f, f_middle = col_fun(y, p) + bc_res = bc(y[:, 0], y[:, -1], p) + res = np.hstack((col_res.ravel(order='F'), bc_res)) + + njev = 0 + singular = False + recompute_jac = True + for iteration in range(max_iter): + if recompute_jac: + J = jac(y, p, y_middle, f, f_middle, bc_res) + njev += 1 + try: + LU = splu(J) + except RuntimeError: + singular = True + break + + step = LU.solve(res) + cost = np.dot(step, step) + + y_step = step[:m * n].reshape((n, m), order='F') + p_step = step[m * n:] + + alpha = 1 + for trial in range(n_trial + 1): + y_new = y - alpha * y_step + if B is not None: + y_new[:, 0] = np.dot(B, y_new[:, 0]) + p_new = p - alpha * p_step + + col_res, y_middle, f, f_middle = col_fun(y_new, p_new) + bc_res = bc(y_new[:, 0], y_new[:, -1], p_new) + res = np.hstack((col_res.ravel(order='F'), bc_res)) + + step_new = LU.solve(res) + cost_new = np.dot(step_new, step_new) + if cost_new < (1 - 2 * alpha * sigma) * cost: + break + + if trial < n_trial: + alpha *= tau + + y = y_new + p = p_new + + if njev == max_njev: + break + + if (np.all(np.abs(col_res) < tol_r * (1 + np.abs(f_middle))) and + np.all(np.abs(bc_res) < bc_tol)): + break + + # If the full step was taken, then we are going to continue with + # the same Jacobian. This is the approach of BVP_SOLVER. + if alpha == 1: + step = step_new + cost = cost_new + recompute_jac = False + else: + recompute_jac = True + + return y, p, singular + + +def print_iteration_header(): + print("{:^15}{:^15}{:^15}{:^15}{:^15}".format( + "Iteration", "Max residual", "Max BC residual", "Total nodes", + "Nodes added")) + + +def print_iteration_progress(iteration, residual, bc_residual, total_nodes, + nodes_added): + print("{:^15}{:^15.2e}{:^15.2e}{:^15}{:^15}".format( + iteration, residual, bc_residual, total_nodes, nodes_added)) + + +class BVPResult(OptimizeResult): + pass + + +TERMINATION_MESSAGES = { + 0: "The algorithm converged to the desired accuracy.", + 1: "The maximum number of mesh nodes is exceeded.", + 2: "A singular Jacobian encountered when solving the collocation system.", + 3: "The solver was unable to satisfy boundary conditions tolerance on iteration 10." +} + + +def estimate_rms_residuals(fun, sol, x, h, p, r_middle, f_middle): + """Estimate rms values of collocation residuals using Lobatto quadrature. + + The residuals are defined as the difference between the derivatives of + our solution and rhs of the ODE system. We use relative residuals, i.e., + normalized by 1 + np.abs(f). RMS values are computed as sqrt from the + normalized integrals of the squared relative residuals over each interval. + Integrals are estimated using 5-point Lobatto quadrature [1]_, we use the + fact that residuals at the mesh nodes are identically zero. + + In [2] they don't normalize integrals by interval lengths, which gives + a higher rate of convergence of the residuals by the factor of h**0.5. + I chose to do such normalization for an ease of interpretation of return + values as RMS estimates. + + Returns + ------- + rms_res : ndarray, shape (m - 1,) + Estimated rms values of the relative residuals over each interval. + + References + ---------- + .. [1] http://mathworld.wolfram.com/LobattoQuadrature.html + .. [2] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual + Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27, + Number 3, pp. 299-316, 2001. + """ + x_middle = x[:-1] + 0.5 * h + s = 0.5 * h * (3/7)**0.5 + x1 = x_middle + s + x2 = x_middle - s + y1 = sol(x1) + y2 = sol(x2) + y1_prime = sol(x1, 1) + y2_prime = sol(x2, 1) + f1 = fun(x1, y1, p) + f2 = fun(x2, y2, p) + r1 = y1_prime - f1 + r2 = y2_prime - f2 + + r_middle /= 1 + np.abs(f_middle) + r1 /= 1 + np.abs(f1) + r2 /= 1 + np.abs(f2) + + r1 = np.sum(np.real(r1 * np.conj(r1)), axis=0) + r2 = np.sum(np.real(r2 * np.conj(r2)), axis=0) + r_middle = np.sum(np.real(r_middle * np.conj(r_middle)), axis=0) + + return (0.5 * (32 / 45 * r_middle + 49 / 90 * (r1 + r2))) ** 0.5 + + +def create_spline(y, yp, x, h): + """Create a cubic spline given values and derivatives. + + Formulas for the coefficients are taken from interpolate.CubicSpline. + + Returns + ------- + sol : PPoly + Constructed spline as a PPoly instance. + """ + from scipy.interpolate import PPoly + + n, m = y.shape + c = np.empty((4, n, m - 1), dtype=y.dtype) + slope = (y[:, 1:] - y[:, :-1]) / h + t = (yp[:, :-1] + yp[:, 1:] - 2 * slope) / h + c[0] = t / h + c[1] = (slope - yp[:, :-1]) / h - t + c[2] = yp[:, :-1] + c[3] = y[:, :-1] + c = np.moveaxis(c, 1, 0) + + return PPoly(c, x, extrapolate=True, axis=1) + + +def modify_mesh(x, insert_1, insert_2): + """Insert nodes into a mesh. + + Nodes removal logic is not established, its impact on the solver is + presumably negligible. So, only insertion is done in this function. + + Parameters + ---------- + x : ndarray, shape (m,) + Mesh nodes. + insert_1 : ndarray + Intervals to each insert 1 new node in the middle. + insert_2 : ndarray + Intervals to each insert 2 new nodes, such that divide an interval + into 3 equal parts. + + Returns + ------- + x_new : ndarray + New mesh nodes. + + Notes + ----- + `insert_1` and `insert_2` should not have common values. + """ + # Because np.insert implementation apparently varies with a version of + # NumPy, we use a simple and reliable approach with sorting. + return np.sort(np.hstack(( + x, + 0.5 * (x[insert_1] + x[insert_1 + 1]), + (2 * x[insert_2] + x[insert_2 + 1]) / 3, + (x[insert_2] + 2 * x[insert_2 + 1]) / 3 + ))) + + +def wrap_functions(fun, bc, fun_jac, bc_jac, k, a, S, D, dtype): + """Wrap functions for unified usage in the solver.""" + if fun_jac is None: + fun_jac_wrapped = None + + if bc_jac is None: + bc_jac_wrapped = None + + if k == 0: + def fun_p(x, y, _): + return np.asarray(fun(x, y), dtype) + + def bc_wrapped(ya, yb, _): + return np.asarray(bc(ya, yb), dtype) + + if fun_jac is not None: + def fun_jac_p(x, y, _): + return np.asarray(fun_jac(x, y), dtype), None + + if bc_jac is not None: + def bc_jac_wrapped(ya, yb, _): + dbc_dya, dbc_dyb = bc_jac(ya, yb) + return (np.asarray(dbc_dya, dtype), + np.asarray(dbc_dyb, dtype), None) + else: + def fun_p(x, y, p): + return np.asarray(fun(x, y, p), dtype) + + def bc_wrapped(x, y, p): + return np.asarray(bc(x, y, p), dtype) + + if fun_jac is not None: + def fun_jac_p(x, y, p): + df_dy, df_dp = fun_jac(x, y, p) + return np.asarray(df_dy, dtype), np.asarray(df_dp, dtype) + + if bc_jac is not None: + def bc_jac_wrapped(ya, yb, p): + dbc_dya, dbc_dyb, dbc_dp = bc_jac(ya, yb, p) + return (np.asarray(dbc_dya, dtype), np.asarray(dbc_dyb, dtype), + np.asarray(dbc_dp, dtype)) + + if S is None: + fun_wrapped = fun_p + else: + def fun_wrapped(x, y, p): + f = fun_p(x, y, p) + if x[0] == a: + f[:, 0] = np.dot(D, f[:, 0]) + f[:, 1:] += np.dot(S, y[:, 1:]) / (x[1:] - a) + else: + f += np.dot(S, y) / (x - a) + return f + + if fun_jac is not None: + if S is None: + fun_jac_wrapped = fun_jac_p + else: + Sr = S[:, :, np.newaxis] + + def fun_jac_wrapped(x, y, p): + df_dy, df_dp = fun_jac_p(x, y, p) + if x[0] == a: + df_dy[:, :, 0] = np.dot(D, df_dy[:, :, 0]) + df_dy[:, :, 1:] += Sr / (x[1:] - a) + else: + df_dy += Sr / (x - a) + + return df_dy, df_dp + + return fun_wrapped, bc_wrapped, fun_jac_wrapped, bc_jac_wrapped + + +def solve_bvp(fun, bc, x, y, p=None, S=None, fun_jac=None, bc_jac=None, + tol=1e-3, max_nodes=1000, verbose=0, bc_tol=None): + """Solve a boundary value problem for a system of ODEs. + + This function numerically solves a first order system of ODEs subject to + two-point boundary conditions:: + + dy / dx = f(x, y, p) + S * y / (x - a), a <= x <= b + bc(y(a), y(b), p) = 0 + + Here x is a 1-D independent variable, y(x) is an N-D + vector-valued function and p is a k-D vector of unknown + parameters which is to be found along with y(x). For the problem to be + determined, there must be n + k boundary conditions, i.e., bc must be an + (n + k)-D function. + + The last singular term on the right-hand side of the system is optional. + It is defined by an n-by-n matrix S, such that the solution must satisfy + S y(a) = 0. This condition will be forced during iterations, so it must not + contradict boundary conditions. See [2]_ for the explanation how this term + is handled when solving BVPs numerically. + + Problems in a complex domain can be solved as well. In this case, y and p + are considered to be complex, and f and bc are assumed to be complex-valued + functions, but x stays real. Note that f and bc must be complex + differentiable (satisfy Cauchy-Riemann equations [4]_), otherwise you + should rewrite your problem for real and imaginary parts separately. To + solve a problem in a complex domain, pass an initial guess for y with a + complex data type (see below). + + Parameters + ---------- + fun : callable + Right-hand side of the system. The calling signature is ``fun(x, y)``, + or ``fun(x, y, p)`` if parameters are present. All arguments are + ndarray: ``x`` with shape (m,), ``y`` with shape (n, m), meaning that + ``y[:, i]`` corresponds to ``x[i]``, and ``p`` with shape (k,). The + return value must be an array with shape (n, m) and with the same + layout as ``y``. + bc : callable + Function evaluating residuals of the boundary conditions. The calling + signature is ``bc(ya, yb)``, or ``bc(ya, yb, p)`` if parameters are + present. All arguments are ndarray: ``ya`` and ``yb`` with shape (n,), + and ``p`` with shape (k,). The return value must be an array with + shape (n + k,). + x : array_like, shape (m,) + Initial mesh. Must be a strictly increasing sequence of real numbers + with ``x[0]=a`` and ``x[-1]=b``. + y : array_like, shape (n, m) + Initial guess for the function values at the mesh nodes, ith column + corresponds to ``x[i]``. For problems in a complex domain pass `y` + with a complex data type (even if the initial guess is purely real). + p : array_like with shape (k,) or None, optional + Initial guess for the unknown parameters. If None (default), it is + assumed that the problem doesn't depend on any parameters. + S : array_like with shape (n, n) or None + Matrix defining the singular term. If None (default), the problem is + solved without the singular term. + fun_jac : callable or None, optional + Function computing derivatives of f with respect to y and p. The + calling signature is ``fun_jac(x, y)``, or ``fun_jac(x, y, p)`` if + parameters are present. The return must contain 1 or 2 elements in the + following order: + + * df_dy : array_like with shape (n, n, m), where an element + (i, j, q) equals to d f_i(x_q, y_q, p) / d (y_q)_j. + * df_dp : array_like with shape (n, k, m), where an element + (i, j, q) equals to d f_i(x_q, y_q, p) / d p_j. + + Here q numbers nodes at which x and y are defined, whereas i and j + number vector components. If the problem is solved without unknown + parameters, df_dp should not be returned. + + If `fun_jac` is None (default), the derivatives will be estimated + by the forward finite differences. + bc_jac : callable or None, optional + Function computing derivatives of bc with respect to ya, yb, and p. + The calling signature is ``bc_jac(ya, yb)``, or ``bc_jac(ya, yb, p)`` + if parameters are present. The return must contain 2 or 3 elements in + the following order: + + * dbc_dya : array_like with shape (n, n), where an element (i, j) + equals to d bc_i(ya, yb, p) / d ya_j. + * dbc_dyb : array_like with shape (n, n), where an element (i, j) + equals to d bc_i(ya, yb, p) / d yb_j. + * dbc_dp : array_like with shape (n, k), where an element (i, j) + equals to d bc_i(ya, yb, p) / d p_j. + + If the problem is solved without unknown parameters, dbc_dp should not + be returned. + + If `bc_jac` is None (default), the derivatives will be estimated by + the forward finite differences. + tol : float, optional + Desired tolerance of the solution. If we define ``r = y' - f(x, y)``, + where y is the found solution, then the solver tries to achieve on each + mesh interval ``norm(r / (1 + abs(f)) < tol``, where ``norm`` is + estimated in a root mean squared sense (using a numerical quadrature + formula). Default is 1e-3. + max_nodes : int, optional + Maximum allowed number of the mesh nodes. If exceeded, the algorithm + terminates. Default is 1000. + verbose : {0, 1, 2}, optional + Level of algorithm's verbosity: + + * 0 (default) : work silently. + * 1 : display a termination report. + * 2 : display progress during iterations. + bc_tol : float, optional + Desired absolute tolerance for the boundary condition residuals: `bc` + value should satisfy ``abs(bc) < bc_tol`` component-wise. + Equals to `tol` by default. Up to 10 iterations are allowed to achieve this + tolerance. + + Returns + ------- + Bunch object with the following fields defined: + sol : PPoly + Found solution for y as `scipy.interpolate.PPoly` instance, a C1 + continuous cubic spline. + p : ndarray or None, shape (k,) + Found parameters. None, if the parameters were not present in the + problem. + x : ndarray, shape (m,) + Nodes of the final mesh. + y : ndarray, shape (n, m) + Solution values at the mesh nodes. + yp : ndarray, shape (n, m) + Solution derivatives at the mesh nodes. + rms_residuals : ndarray, shape (m - 1,) + RMS values of the relative residuals over each mesh interval (see the + description of `tol` parameter). + niter : int + Number of completed iterations. + status : int + Reason for algorithm termination: + + * 0: The algorithm converged to the desired accuracy. + * 1: The maximum number of mesh nodes is exceeded. + * 2: A singular Jacobian encountered when solving the collocation + system. + + message : string + Verbal description of the termination reason. + success : bool + True if the algorithm converged to the desired accuracy (``status=0``). + + Notes + ----- + This function implements a 4th order collocation algorithm with the + control of residuals similar to [1]_. A collocation system is solved + by a damped Newton method with an affine-invariant criterion function as + described in [3]_. + + Note that in [1]_ integral residuals are defined without normalization + by interval lengths. So, their definition is different by a multiplier of + h**0.5 (h is an interval length) from the definition used here. + + .. versionadded:: 0.18.0 + + References + ---------- + .. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual + Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27, + Number 3, pp. 299-316, 2001. + .. [2] L.F. Shampine, P. H. Muir and H. Xu, "A User-Friendly Fortran BVP + Solver". + .. [3] U. Ascher, R. Mattheij and R. Russell "Numerical Solution of + Boundary Value Problems for Ordinary Differential Equations". + .. [4] `Cauchy-Riemann equations + `_ on + Wikipedia. + + Examples + -------- + In the first example, we solve Bratu's problem:: + + y'' + k * exp(y) = 0 + y(0) = y(1) = 0 + + for k = 1. + + We rewrite the equation as a first-order system and implement its + right-hand side evaluation:: + + y1' = y2 + y2' = -exp(y1) + + >>> import numpy as np + >>> def fun(x, y): + ... return np.vstack((y[1], -np.exp(y[0]))) + + Implement evaluation of the boundary condition residuals: + + >>> def bc(ya, yb): + ... return np.array([ya[0], yb[0]]) + + Define the initial mesh with 5 nodes: + + >>> x = np.linspace(0, 1, 5) + + This problem is known to have two solutions. To obtain both of them, we + use two different initial guesses for y. We denote them by subscripts + a and b. + + >>> y_a = np.zeros((2, x.size)) + >>> y_b = np.zeros((2, x.size)) + >>> y_b[0] = 3 + + Now we are ready to run the solver. + + >>> from scipy.integrate import solve_bvp + >>> res_a = solve_bvp(fun, bc, x, y_a) + >>> res_b = solve_bvp(fun, bc, x, y_b) + + Let's plot the two found solutions. We take an advantage of having the + solution in a spline form to produce a smooth plot. + + >>> x_plot = np.linspace(0, 1, 100) + >>> y_plot_a = res_a.sol(x_plot)[0] + >>> y_plot_b = res_b.sol(x_plot)[0] + >>> import matplotlib.pyplot as plt + >>> plt.plot(x_plot, y_plot_a, label='y_a') + >>> plt.plot(x_plot, y_plot_b, label='y_b') + >>> plt.legend() + >>> plt.xlabel("x") + >>> plt.ylabel("y") + >>> plt.show() + + We see that the two solutions have similar shape, but differ in scale + significantly. + + In the second example, we solve a simple Sturm-Liouville problem:: + + y'' + k**2 * y = 0 + y(0) = y(1) = 0 + + It is known that a non-trivial solution y = A * sin(k * x) is possible for + k = pi * n, where n is an integer. To establish the normalization constant + A = 1 we add a boundary condition:: + + y'(0) = k + + Again, we rewrite our equation as a first-order system and implement its + right-hand side evaluation:: + + y1' = y2 + y2' = -k**2 * y1 + + >>> def fun(x, y, p): + ... k = p[0] + ... return np.vstack((y[1], -k**2 * y[0])) + + Note that parameters p are passed as a vector (with one element in our + case). + + Implement the boundary conditions: + + >>> def bc(ya, yb, p): + ... k = p[0] + ... return np.array([ya[0], yb[0], ya[1] - k]) + + Set up the initial mesh and guess for y. We aim to find the solution for + k = 2 * pi, to achieve that we set values of y to approximately follow + sin(2 * pi * x): + + >>> x = np.linspace(0, 1, 5) + >>> y = np.zeros((2, x.size)) + >>> y[0, 1] = 1 + >>> y[0, 3] = -1 + + Run the solver with 6 as an initial guess for k. + + >>> sol = solve_bvp(fun, bc, x, y, p=[6]) + + We see that the found k is approximately correct: + + >>> sol.p[0] + 6.28329460046 + + And, finally, plot the solution to see the anticipated sinusoid: + + >>> x_plot = np.linspace(0, 1, 100) + >>> y_plot = sol.sol(x_plot)[0] + >>> plt.plot(x_plot, y_plot) + >>> plt.xlabel("x") + >>> plt.ylabel("y") + >>> plt.show() + """ + x = np.asarray(x, dtype=float) + if x.ndim != 1: + raise ValueError("`x` must be 1 dimensional.") + h = np.diff(x) + if np.any(h <= 0): + raise ValueError("`x` must be strictly increasing.") + a = x[0] + + y = np.asarray(y) + if np.issubdtype(y.dtype, np.complexfloating): + dtype = complex + else: + dtype = float + y = y.astype(dtype, copy=False) + + if y.ndim != 2: + raise ValueError("`y` must be 2 dimensional.") + if y.shape[1] != x.shape[0]: + raise ValueError(f"`y` is expected to have {x.shape[0]} columns, but actually " + f"has {y.shape[1]}.") + + if p is None: + p = np.array([]) + else: + p = np.asarray(p, dtype=dtype) + if p.ndim != 1: + raise ValueError("`p` must be 1 dimensional.") + + if tol < 100 * EPS: + warn(f"`tol` is too low, setting to {100 * EPS:.2e}", stacklevel=2) + tol = 100 * EPS + + if verbose not in [0, 1, 2]: + raise ValueError("`verbose` must be in [0, 1, 2].") + + n = y.shape[0] + k = p.shape[0] + + if S is not None: + S = np.asarray(S, dtype=dtype) + if S.shape != (n, n): + raise ValueError(f"`S` is expected to have shape {(n, n)}, " + f"but actually has {S.shape}") + + # Compute I - S^+ S to impose necessary boundary conditions. + B = np.identity(n) - np.dot(pinv(S), S) + + y[:, 0] = np.dot(B, y[:, 0]) + + # Compute (I - S)^+ to correct derivatives at x=a. + D = pinv(np.identity(n) - S) + else: + B = None + D = None + + if bc_tol is None: + bc_tol = tol + + # Maximum number of iterations + max_iteration = 10 + + fun_wrapped, bc_wrapped, fun_jac_wrapped, bc_jac_wrapped = wrap_functions( + fun, bc, fun_jac, bc_jac, k, a, S, D, dtype) + + f = fun_wrapped(x, y, p) + if f.shape != y.shape: + raise ValueError(f"`fun` return is expected to have shape {y.shape}, " + f"but actually has {f.shape}.") + + bc_res = bc_wrapped(y[:, 0], y[:, -1], p) + if bc_res.shape != (n + k,): + raise ValueError(f"`bc` return is expected to have shape {(n + k,)}, " + f"but actually has {bc_res.shape}.") + + status = 0 + iteration = 0 + if verbose == 2: + print_iteration_header() + + while True: + m = x.shape[0] + + col_fun, jac_sys = prepare_sys(n, m, k, fun_wrapped, bc_wrapped, + fun_jac_wrapped, bc_jac_wrapped, x, h) + y, p, singular = solve_newton(n, m, h, col_fun, bc_wrapped, jac_sys, + y, p, B, tol, bc_tol) + iteration += 1 + + col_res, y_middle, f, f_middle = collocation_fun(fun_wrapped, y, + p, x, h) + bc_res = bc_wrapped(y[:, 0], y[:, -1], p) + max_bc_res = np.max(abs(bc_res)) + + # This relation is not trivial, but can be verified. + r_middle = 1.5 * col_res / h + sol = create_spline(y, f, x, h) + rms_res = estimate_rms_residuals(fun_wrapped, sol, x, h, p, + r_middle, f_middle) + max_rms_res = np.max(rms_res) + + if singular: + status = 2 + break + + insert_1, = np.nonzero((rms_res > tol) & (rms_res < 100 * tol)) + insert_2, = np.nonzero(rms_res >= 100 * tol) + nodes_added = insert_1.shape[0] + 2 * insert_2.shape[0] + + if m + nodes_added > max_nodes: + status = 1 + if verbose == 2: + nodes_added = f"({nodes_added})" + print_iteration_progress(iteration, max_rms_res, max_bc_res, + m, nodes_added) + break + + if verbose == 2: + print_iteration_progress(iteration, max_rms_res, max_bc_res, m, + nodes_added) + + if nodes_added > 0: + x = modify_mesh(x, insert_1, insert_2) + h = np.diff(x) + y = sol(x) + elif max_bc_res <= bc_tol: + status = 0 + break + elif iteration >= max_iteration: + status = 3 + break + + if verbose > 0: + if status == 0: + print(f"Solved in {iteration} iterations, number of nodes {x.shape[0]}. \n" + f"Maximum relative residual: {max_rms_res:.2e} \n" + f"Maximum boundary residual: {max_bc_res:.2e}") + elif status == 1: + print(f"Number of nodes is exceeded after iteration {iteration}. \n" + f"Maximum relative residual: {max_rms_res:.2e} \n" + f"Maximum boundary residual: {max_bc_res:.2e}") + elif status == 2: + print("Singular Jacobian encountered when solving the collocation " + f"system on iteration {iteration}. \n" + f"Maximum relative residual: {max_rms_res:.2e} \n" + f"Maximum boundary residual: {max_bc_res:.2e}") + elif status == 3: + print("The solver was unable to satisfy boundary conditions " + f"tolerance on iteration {iteration}. \n" + f"Maximum relative residual: {max_rms_res:.2e} \n" + f"Maximum boundary residual: {max_bc_res:.2e}") + + if p.size == 0: + p = None + + return BVPResult(sol=sol, p=p, x=x, y=y, yp=f, rms_residuals=rms_res, + niter=iteration, status=status, + message=TERMINATION_MESSAGES[status], success=status == 0) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__init__.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f3c8aaa36588651ae5e48b58fbb1d443bc71fc77 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__init__.py @@ -0,0 +1,8 @@ +"""Suite of ODE solvers implemented in Python.""" +from .ivp import solve_ivp +from .rk import RK23, RK45, DOP853 +from .radau import Radau +from .bdf import BDF +from .lsoda import LSODA +from .common import OdeSolution +from .base import DenseOutput, OdeSolver diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/__init__.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0d832a21a3cc95e919a14137097d00f6e84ac0e Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/__init__.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/base.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a716ae08c6d1d98ce9b92a8248c579412234e9a Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/base.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/bdf.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/bdf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d02c664bd763cbd3069980795ce2ef0fe71ae1df Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/bdf.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/common.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d68ee34106b01c3af79c048fbec74955d8e7ce3 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/common.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/dop853_coefficients.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/dop853_coefficients.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed25b715c92370619c2b92c976b36e8a668e6f1d Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/dop853_coefficients.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/ivp.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/ivp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b92f926d5b4c5e8e54d15ceda0f575404649533e Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/ivp.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/lsoda.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/lsoda.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c7eaa91fd7da81e065612bd56c6151b65cab5454 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/lsoda.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/radau.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/radau.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2583787b20aaacca994b01b47263244cb0f1c4be Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/radau.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/rk.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/rk.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a872a5da1626702f964b518dc8f215a18c4ac867 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/rk.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/base.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/base.py new file mode 100644 index 0000000000000000000000000000000000000000..46db9a69dfb3e7aee5c150ac6795234cd455dfe5 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/base.py @@ -0,0 +1,290 @@ +import numpy as np + + +def check_arguments(fun, y0, support_complex): + """Helper function for checking arguments common to all solvers.""" + y0 = np.asarray(y0) + if np.issubdtype(y0.dtype, np.complexfloating): + if not support_complex: + raise ValueError("`y0` is complex, but the chosen solver does " + "not support integration in a complex domain.") + dtype = complex + else: + dtype = float + y0 = y0.astype(dtype, copy=False) + + if y0.ndim != 1: + raise ValueError("`y0` must be 1-dimensional.") + + if not np.isfinite(y0).all(): + raise ValueError("All components of the initial state `y0` must be finite.") + + def fun_wrapped(t, y): + return np.asarray(fun(t, y), dtype=dtype) + + return fun_wrapped, y0 + + +class OdeSolver: + """Base class for ODE solvers. + + In order to implement a new solver you need to follow the guidelines: + + 1. A constructor must accept parameters presented in the base class + (listed below) along with any other parameters specific to a solver. + 2. A constructor must accept arbitrary extraneous arguments + ``**extraneous``, but warn that these arguments are irrelevant + using `common.warn_extraneous` function. Do not pass these + arguments to the base class. + 3. A solver must implement a private method `_step_impl(self)` which + propagates a solver one step further. It must return tuple + ``(success, message)``, where ``success`` is a boolean indicating + whether a step was successful, and ``message`` is a string + containing description of a failure if a step failed or None + otherwise. + 4. A solver must implement a private method `_dense_output_impl(self)`, + which returns a `DenseOutput` object covering the last successful + step. + 5. A solver must have attributes listed below in Attributes section. + Note that ``t_old`` and ``step_size`` are updated automatically. + 6. Use `fun(self, t, y)` method for the system rhs evaluation, this + way the number of function evaluations (`nfev`) will be tracked + automatically. + 7. For convenience, a base class provides `fun_single(self, t, y)` and + `fun_vectorized(self, t, y)` for evaluating the rhs in + non-vectorized and vectorized fashions respectively (regardless of + how `fun` from the constructor is implemented). These calls don't + increment `nfev`. + 8. If a solver uses a Jacobian matrix and LU decompositions, it should + track the number of Jacobian evaluations (`njev`) and the number of + LU decompositions (`nlu`). + 9. By convention, the function evaluations used to compute a finite + difference approximation of the Jacobian should not be counted in + `nfev`, thus use `fun_single(self, t, y)` or + `fun_vectorized(self, t, y)` when computing a finite difference + approximation of the Jacobian. + + Parameters + ---------- + fun : callable + Right-hand side of the system: the time derivative of the state ``y`` + at time ``t``. The calling signature is ``fun(t, y)``, where ``t`` is a + scalar and ``y`` is an ndarray with ``len(y) = len(y0)``. ``fun`` must + return an array of the same shape as ``y``. See `vectorized` for more + information. + t0 : float + Initial time. + y0 : array_like, shape (n,) + Initial state. + t_bound : float + Boundary time --- the integration won't continue beyond it. It also + determines the direction of the integration. + vectorized : bool + Whether `fun` can be called in a vectorized fashion. Default is False. + + If ``vectorized`` is False, `fun` will always be called with ``y`` of + shape ``(n,)``, where ``n = len(y0)``. + + If ``vectorized`` is True, `fun` may be called with ``y`` of shape + ``(n, k)``, where ``k`` is an integer. In this case, `fun` must behave + such that ``fun(t, y)[:, i] == fun(t, y[:, i])`` (i.e. each column of + the returned array is the time derivative of the state corresponding + with a column of ``y``). + + Setting ``vectorized=True`` allows for faster finite difference + approximation of the Jacobian by methods 'Radau' and 'BDF', but + will result in slower execution for other methods. It can also + result in slower overall execution for 'Radau' and 'BDF' in some + circumstances (e.g. small ``len(y0)``). + support_complex : bool, optional + Whether integration in a complex domain should be supported. + Generally determined by a derived solver class capabilities. + Default is False. + + Attributes + ---------- + n : int + Number of equations. + status : string + Current status of the solver: 'running', 'finished' or 'failed'. + t_bound : float + Boundary time. + direction : float + Integration direction: +1 or -1. + t : float + Current time. + y : ndarray + Current state. + t_old : float + Previous time. None if no steps were made yet. + step_size : float + Size of the last successful step. None if no steps were made yet. + nfev : int + Number of the system's rhs evaluations. + njev : int + Number of the Jacobian evaluations. + nlu : int + Number of LU decompositions. + """ + TOO_SMALL_STEP = "Required step size is less than spacing between numbers." + + def __init__(self, fun, t0, y0, t_bound, vectorized, + support_complex=False): + self.t_old = None + self.t = t0 + self._fun, self.y = check_arguments(fun, y0, support_complex) + self.t_bound = t_bound + self.vectorized = vectorized + + if vectorized: + def fun_single(t, y): + return self._fun(t, y[:, None]).ravel() + fun_vectorized = self._fun + else: + fun_single = self._fun + + def fun_vectorized(t, y): + f = np.empty_like(y) + for i, yi in enumerate(y.T): + f[:, i] = self._fun(t, yi) + return f + + def fun(t, y): + self.nfev += 1 + return self.fun_single(t, y) + + self.fun = fun + self.fun_single = fun_single + self.fun_vectorized = fun_vectorized + + self.direction = np.sign(t_bound - t0) if t_bound != t0 else 1 + self.n = self.y.size + self.status = 'running' + + self.nfev = 0 + self.njev = 0 + self.nlu = 0 + + @property + def step_size(self): + if self.t_old is None: + return None + else: + return np.abs(self.t - self.t_old) + + def step(self): + """Perform one integration step. + + Returns + ------- + message : string or None + Report from the solver. Typically a reason for a failure if + `self.status` is 'failed' after the step was taken or None + otherwise. + """ + if self.status != 'running': + raise RuntimeError("Attempt to step on a failed or finished " + "solver.") + + if self.n == 0 or self.t == self.t_bound: + # Handle corner cases of empty solver or no integration. + self.t_old = self.t + self.t = self.t_bound + message = None + self.status = 'finished' + else: + t = self.t + success, message = self._step_impl() + + if not success: + self.status = 'failed' + else: + self.t_old = t + if self.direction * (self.t - self.t_bound) >= 0: + self.status = 'finished' + + return message + + def dense_output(self): + """Compute a local interpolant over the last successful step. + + Returns + ------- + sol : `DenseOutput` + Local interpolant over the last successful step. + """ + if self.t_old is None: + raise RuntimeError("Dense output is available after a successful " + "step was made.") + + if self.n == 0 or self.t == self.t_old: + # Handle corner cases of empty solver and no integration. + return ConstantDenseOutput(self.t_old, self.t, self.y) + else: + return self._dense_output_impl() + + def _step_impl(self): + raise NotImplementedError + + def _dense_output_impl(self): + raise NotImplementedError + + +class DenseOutput: + """Base class for local interpolant over step made by an ODE solver. + + It interpolates between `t_min` and `t_max` (see Attributes below). + Evaluation outside this interval is not forbidden, but the accuracy is not + guaranteed. + + Attributes + ---------- + t_min, t_max : float + Time range of the interpolation. + """ + def __init__(self, t_old, t): + self.t_old = t_old + self.t = t + self.t_min = min(t, t_old) + self.t_max = max(t, t_old) + + def __call__(self, t): + """Evaluate the interpolant. + + Parameters + ---------- + t : float or array_like with shape (n_points,) + Points to evaluate the solution at. + + Returns + ------- + y : ndarray, shape (n,) or (n, n_points) + Computed values. Shape depends on whether `t` was a scalar or a + 1-D array. + """ + t = np.asarray(t) + if t.ndim > 1: + raise ValueError("`t` must be a float or a 1-D array.") + return self._call_impl(t) + + def _call_impl(self, t): + raise NotImplementedError + + +class ConstantDenseOutput(DenseOutput): + """Constant value interpolator. + + This class used for degenerate integration cases: equal integration limits + or a system with 0 equations. + """ + def __init__(self, t_old, t, value): + super().__init__(t_old, t) + self.value = value + + def _call_impl(self, t): + if t.ndim == 0: + return self.value + else: + ret = np.empty((self.value.shape[0], t.shape[0])) + ret[:] = self.value[:, None] + return ret diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/bdf.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/bdf.py new file mode 100644 index 0000000000000000000000000000000000000000..29bd9461519255c0bdab6e2172a9e549cb709cbd --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/bdf.py @@ -0,0 +1,480 @@ +import numpy as np +from scipy.linalg import lu_factor, lu_solve +from scipy.sparse import issparse, csc_matrix, eye +from scipy.sparse.linalg import splu +from scipy.optimize._numdiff import group_columns +from .common import (validate_max_step, validate_tol, select_initial_step, + norm, EPS, num_jac, validate_first_step, + warn_extraneous) +from .base import OdeSolver, DenseOutput + + +MAX_ORDER = 5 +NEWTON_MAXITER = 4 +MIN_FACTOR = 0.2 +MAX_FACTOR = 10 + + +def compute_R(order, factor): + """Compute the matrix for changing the differences array.""" + I = np.arange(1, order + 1)[:, None] + J = np.arange(1, order + 1) + M = np.zeros((order + 1, order + 1)) + M[1:, 1:] = (I - 1 - factor * J) / I + M[0] = 1 + return np.cumprod(M, axis=0) + + +def change_D(D, order, factor): + """Change differences array in-place when step size is changed.""" + R = compute_R(order, factor) + U = compute_R(order, 1) + RU = R.dot(U) + D[:order + 1] = np.dot(RU.T, D[:order + 1]) + + +def solve_bdf_system(fun, t_new, y_predict, c, psi, LU, solve_lu, scale, tol): + """Solve the algebraic system resulting from BDF method.""" + d = 0 + y = y_predict.copy() + dy_norm_old = None + converged = False + for k in range(NEWTON_MAXITER): + f = fun(t_new, y) + if not np.all(np.isfinite(f)): + break + + dy = solve_lu(LU, c * f - psi - d) + dy_norm = norm(dy / scale) + + if dy_norm_old is None: + rate = None + else: + rate = dy_norm / dy_norm_old + + if (rate is not None and (rate >= 1 or + rate ** (NEWTON_MAXITER - k) / (1 - rate) * dy_norm > tol)): + break + + y += dy + d += dy + + if (dy_norm == 0 or + rate is not None and rate / (1 - rate) * dy_norm < tol): + converged = True + break + + dy_norm_old = dy_norm + + return converged, k + 1, y, d + + +class BDF(OdeSolver): + """Implicit method based on backward-differentiation formulas. + + This is a variable order method with the order varying automatically from + 1 to 5. The general framework of the BDF algorithm is described in [1]_. + This class implements a quasi-constant step size as explained in [2]_. + The error estimation strategy for the constant-step BDF is derived in [3]_. + An accuracy enhancement using modified formulas (NDF) [2]_ is also implemented. + + Can be applied in the complex domain. + + Parameters + ---------- + fun : callable + Right-hand side of the system: the time derivative of the state ``y`` + at time ``t``. The calling signature is ``fun(t, y)``, where ``t`` is a + scalar and ``y`` is an ndarray with ``len(y) = len(y0)``. ``fun`` must + return an array of the same shape as ``y``. See `vectorized` for more + information. + t0 : float + Initial time. + y0 : array_like, shape (n,) + Initial state. + t_bound : float + Boundary time - the integration won't continue beyond it. It also + determines the direction of the integration. + first_step : float or None, optional + Initial step size. Default is ``None`` which means that the algorithm + should choose. + max_step : float, optional + Maximum allowed step size. Default is np.inf, i.e., the step size is not + bounded and determined solely by the solver. + rtol, atol : float and array_like, optional + Relative and absolute tolerances. The solver keeps the local error + estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a + relative accuracy (number of correct digits), while `atol` controls + absolute accuracy (number of correct decimal places). To achieve the + desired `rtol`, set `atol` to be smaller than the smallest value that + can be expected from ``rtol * abs(y)`` so that `rtol` dominates the + allowable error. If `atol` is larger than ``rtol * abs(y)`` the + number of correct digits is not guaranteed. Conversely, to achieve the + desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller + than `atol`. If components of y have different scales, it might be + beneficial to set different `atol` values for different components by + passing array_like with shape (n,) for `atol`. Default values are + 1e-3 for `rtol` and 1e-6 for `atol`. + jac : {None, array_like, sparse_matrix, callable}, optional + Jacobian matrix of the right-hand side of the system with respect to y, + required by this method. The Jacobian matrix has shape (n, n) and its + element (i, j) is equal to ``d f_i / d y_j``. + There are three ways to define the Jacobian: + + * If array_like or sparse_matrix, the Jacobian is assumed to + be constant. + * If callable, the Jacobian is assumed to depend on both + t and y; it will be called as ``jac(t, y)`` as necessary. + For the 'Radau' and 'BDF' methods, the return value might be a + sparse matrix. + * If None (default), the Jacobian will be approximated by + finite differences. + + It is generally recommended to provide the Jacobian rather than + relying on a finite-difference approximation. + jac_sparsity : {None, array_like, sparse matrix}, optional + Defines a sparsity structure of the Jacobian matrix for a + finite-difference approximation. Its shape must be (n, n). This argument + is ignored if `jac` is not `None`. If the Jacobian has only few non-zero + elements in *each* row, providing the sparsity structure will greatly + speed up the computations [4]_. A zero entry means that a corresponding + element in the Jacobian is always zero. If None (default), the Jacobian + is assumed to be dense. + vectorized : bool, optional + Whether `fun` can be called in a vectorized fashion. Default is False. + + If ``vectorized`` is False, `fun` will always be called with ``y`` of + shape ``(n,)``, where ``n = len(y0)``. + + If ``vectorized`` is True, `fun` may be called with ``y`` of shape + ``(n, k)``, where ``k`` is an integer. In this case, `fun` must behave + such that ``fun(t, y)[:, i] == fun(t, y[:, i])`` (i.e. each column of + the returned array is the time derivative of the state corresponding + with a column of ``y``). + + Setting ``vectorized=True`` allows for faster finite difference + approximation of the Jacobian by this method, but may result in slower + execution overall in some circumstances (e.g. small ``len(y0)``). + + Attributes + ---------- + n : int + Number of equations. + status : string + Current status of the solver: 'running', 'finished' or 'failed'. + t_bound : float + Boundary time. + direction : float + Integration direction: +1 or -1. + t : float + Current time. + y : ndarray + Current state. + t_old : float + Previous time. None if no steps were made yet. + step_size : float + Size of the last successful step. None if no steps were made yet. + nfev : int + Number of evaluations of the right-hand side. + njev : int + Number of evaluations of the Jacobian. + nlu : int + Number of LU decompositions. + + References + ---------- + .. [1] G. D. Byrne, A. C. Hindmarsh, "A Polyalgorithm for the Numerical + Solution of Ordinary Differential Equations", ACM Transactions on + Mathematical Software, Vol. 1, No. 1, pp. 71-96, March 1975. + .. [2] L. F. Shampine, M. W. Reichelt, "THE MATLAB ODE SUITE", SIAM J. SCI. + COMPUTE., Vol. 18, No. 1, pp. 1-22, January 1997. + .. [3] E. Hairer, G. Wanner, "Solving Ordinary Differential Equations I: + Nonstiff Problems", Sec. III.2. + .. [4] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of + sparse Jacobian matrices", Journal of the Institute of Mathematics + and its Applications, 13, pp. 117-120, 1974. + """ + def __init__(self, fun, t0, y0, t_bound, max_step=np.inf, + rtol=1e-3, atol=1e-6, jac=None, jac_sparsity=None, + vectorized=False, first_step=None, **extraneous): + warn_extraneous(extraneous) + super().__init__(fun, t0, y0, t_bound, vectorized, + support_complex=True) + self.max_step = validate_max_step(max_step) + self.rtol, self.atol = validate_tol(rtol, atol, self.n) + f = self.fun(self.t, self.y) + if first_step is None: + self.h_abs = select_initial_step(self.fun, self.t, self.y, + t_bound, max_step, f, + self.direction, 1, + self.rtol, self.atol) + else: + self.h_abs = validate_first_step(first_step, t0, t_bound) + self.h_abs_old = None + self.error_norm_old = None + + self.newton_tol = max(10 * EPS / rtol, min(0.03, rtol ** 0.5)) + + self.jac_factor = None + self.jac, self.J = self._validate_jac(jac, jac_sparsity) + if issparse(self.J): + def lu(A): + self.nlu += 1 + return splu(A) + + def solve_lu(LU, b): + return LU.solve(b) + + I = eye(self.n, format='csc', dtype=self.y.dtype) + else: + def lu(A): + self.nlu += 1 + return lu_factor(A, overwrite_a=True) + + def solve_lu(LU, b): + return lu_solve(LU, b, overwrite_b=True) + + I = np.identity(self.n, dtype=self.y.dtype) + + self.lu = lu + self.solve_lu = solve_lu + self.I = I + + kappa = np.array([0, -0.1850, -1/9, -0.0823, -0.0415, 0]) + self.gamma = np.hstack((0, np.cumsum(1 / np.arange(1, MAX_ORDER + 1)))) + self.alpha = (1 - kappa) * self.gamma + self.error_const = kappa * self.gamma + 1 / np.arange(1, MAX_ORDER + 2) + + D = np.empty((MAX_ORDER + 3, self.n), dtype=self.y.dtype) + D[0] = self.y + D[1] = f * self.h_abs * self.direction + self.D = D + + self.order = 1 + self.n_equal_steps = 0 + self.LU = None + + def _validate_jac(self, jac, sparsity): + t0 = self.t + y0 = self.y + + if jac is None: + if sparsity is not None: + if issparse(sparsity): + sparsity = csc_matrix(sparsity) + groups = group_columns(sparsity) + sparsity = (sparsity, groups) + + def jac_wrapped(t, y): + self.njev += 1 + f = self.fun_single(t, y) + J, self.jac_factor = num_jac(self.fun_vectorized, t, y, f, + self.atol, self.jac_factor, + sparsity) + return J + J = jac_wrapped(t0, y0) + elif callable(jac): + J = jac(t0, y0) + self.njev += 1 + if issparse(J): + J = csc_matrix(J, dtype=y0.dtype) + + def jac_wrapped(t, y): + self.njev += 1 + return csc_matrix(jac(t, y), dtype=y0.dtype) + else: + J = np.asarray(J, dtype=y0.dtype) + + def jac_wrapped(t, y): + self.njev += 1 + return np.asarray(jac(t, y), dtype=y0.dtype) + + if J.shape != (self.n, self.n): + raise ValueError("`jac` is expected to have shape {}, but " + "actually has {}." + .format((self.n, self.n), J.shape)) + else: + if issparse(jac): + J = csc_matrix(jac, dtype=y0.dtype) + else: + J = np.asarray(jac, dtype=y0.dtype) + + if J.shape != (self.n, self.n): + raise ValueError("`jac` is expected to have shape {}, but " + "actually has {}." + .format((self.n, self.n), J.shape)) + jac_wrapped = None + + return jac_wrapped, J + + def _step_impl(self): + t = self.t + D = self.D + + max_step = self.max_step + min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t) + if self.h_abs > max_step: + h_abs = max_step + change_D(D, self.order, max_step / self.h_abs) + self.n_equal_steps = 0 + elif self.h_abs < min_step: + h_abs = min_step + change_D(D, self.order, min_step / self.h_abs) + self.n_equal_steps = 0 + else: + h_abs = self.h_abs + + atol = self.atol + rtol = self.rtol + order = self.order + + alpha = self.alpha + gamma = self.gamma + error_const = self.error_const + + J = self.J + LU = self.LU + current_jac = self.jac is None + + step_accepted = False + while not step_accepted: + if h_abs < min_step: + return False, self.TOO_SMALL_STEP + + h = h_abs * self.direction + t_new = t + h + + if self.direction * (t_new - self.t_bound) > 0: + t_new = self.t_bound + change_D(D, order, np.abs(t_new - t) / h_abs) + self.n_equal_steps = 0 + LU = None + + h = t_new - t + h_abs = np.abs(h) + + y_predict = np.sum(D[:order + 1], axis=0) + + scale = atol + rtol * np.abs(y_predict) + psi = np.dot(D[1: order + 1].T, gamma[1: order + 1]) / alpha[order] + + converged = False + c = h / alpha[order] + while not converged: + if LU is None: + LU = self.lu(self.I - c * J) + + converged, n_iter, y_new, d = solve_bdf_system( + self.fun, t_new, y_predict, c, psi, LU, self.solve_lu, + scale, self.newton_tol) + + if not converged: + if current_jac: + break + J = self.jac(t_new, y_predict) + LU = None + current_jac = True + + if not converged: + factor = 0.5 + h_abs *= factor + change_D(D, order, factor) + self.n_equal_steps = 0 + LU = None + continue + + safety = 0.9 * (2 * NEWTON_MAXITER + 1) / (2 * NEWTON_MAXITER + + n_iter) + + scale = atol + rtol * np.abs(y_new) + error = error_const[order] * d + error_norm = norm(error / scale) + + if error_norm > 1: + factor = max(MIN_FACTOR, + safety * error_norm ** (-1 / (order + 1))) + h_abs *= factor + change_D(D, order, factor) + self.n_equal_steps = 0 + # As we didn't have problems with convergence, we don't + # reset LU here. + else: + step_accepted = True + + self.n_equal_steps += 1 + + self.t = t_new + self.y = y_new + + self.h_abs = h_abs + self.J = J + self.LU = LU + + # Update differences. The principal relation here is + # D^{j + 1} y_n = D^{j} y_n - D^{j} y_{n - 1}. Keep in mind that D + # contained difference for previous interpolating polynomial and + # d = D^{k + 1} y_n. Thus this elegant code follows. + D[order + 2] = d - D[order + 1] + D[order + 1] = d + for i in reversed(range(order + 1)): + D[i] += D[i + 1] + + if self.n_equal_steps < order + 1: + return True, None + + if order > 1: + error_m = error_const[order - 1] * D[order] + error_m_norm = norm(error_m / scale) + else: + error_m_norm = np.inf + + if order < MAX_ORDER: + error_p = error_const[order + 1] * D[order + 2] + error_p_norm = norm(error_p / scale) + else: + error_p_norm = np.inf + + error_norms = np.array([error_m_norm, error_norm, error_p_norm]) + with np.errstate(divide='ignore'): + factors = error_norms ** (-1 / np.arange(order, order + 3)) + + delta_order = np.argmax(factors) - 1 + order += delta_order + self.order = order + + factor = min(MAX_FACTOR, safety * np.max(factors)) + self.h_abs *= factor + change_D(D, order, factor) + self.n_equal_steps = 0 + self.LU = None + + return True, None + + def _dense_output_impl(self): + return BdfDenseOutput(self.t_old, self.t, self.h_abs * self.direction, + self.order, self.D[:self.order + 1].copy()) + + +class BdfDenseOutput(DenseOutput): + def __init__(self, t_old, t, h, order, D): + super().__init__(t_old, t) + self.order = order + self.t_shift = self.t - h * np.arange(self.order) + self.denom = h * (1 + np.arange(self.order)) + self.D = D + + def _call_impl(self, t): + if t.ndim == 0: + x = (t - self.t_shift) / self.denom + p = np.cumprod(x) + else: + x = (t - self.t_shift[:, None]) / self.denom[:, None] + p = np.cumprod(x, axis=0) + + y = np.dot(self.D[1:].T, p) + if y.ndim == 1: + y += self.D[0] + else: + y += self.D[0, :, None] + + return y diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/common.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/common.py new file mode 100644 index 0000000000000000000000000000000000000000..4ff0b7056a0e7d117232b87fe9148bb23b7cf2ba --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/common.py @@ -0,0 +1,451 @@ +from itertools import groupby +from warnings import warn +import numpy as np +from scipy.sparse import find, coo_matrix + + +EPS = np.finfo(float).eps + + +def validate_first_step(first_step, t0, t_bound): + """Assert that first_step is valid and return it.""" + if first_step <= 0: + raise ValueError("`first_step` must be positive.") + if first_step > np.abs(t_bound - t0): + raise ValueError("`first_step` exceeds bounds.") + return first_step + + +def validate_max_step(max_step): + """Assert that max_Step is valid and return it.""" + if max_step <= 0: + raise ValueError("`max_step` must be positive.") + return max_step + + +def warn_extraneous(extraneous): + """Display a warning for extraneous keyword arguments. + + The initializer of each solver class is expected to collect keyword + arguments that it doesn't understand and warn about them. This function + prints a warning for each key in the supplied dictionary. + + Parameters + ---------- + extraneous : dict + Extraneous keyword arguments + """ + if extraneous: + warn("The following arguments have no effect for a chosen solver: {}." + .format(", ".join(f"`{x}`" for x in extraneous)), + stacklevel=3) + + +def validate_tol(rtol, atol, n): + """Validate tolerance values.""" + + if np.any(rtol < 100 * EPS): + warn("At least one element of `rtol` is too small. " + f"Setting `rtol = np.maximum(rtol, {100 * EPS})`.", + stacklevel=3) + rtol = np.maximum(rtol, 100 * EPS) + + atol = np.asarray(atol) + if atol.ndim > 0 and atol.shape != (n,): + raise ValueError("`atol` has wrong shape.") + + if np.any(atol < 0): + raise ValueError("`atol` must be positive.") + + return rtol, atol + + +def norm(x): + """Compute RMS norm.""" + return np.linalg.norm(x) / x.size ** 0.5 + + +def select_initial_step(fun, t0, y0, t_bound, + max_step, f0, direction, order, rtol, atol): + """Empirically select a good initial step. + + The algorithm is described in [1]_. + + Parameters + ---------- + fun : callable + Right-hand side of the system. + t0 : float + Initial value of the independent variable. + y0 : ndarray, shape (n,) + Initial value of the dependent variable. + t_bound : float + End-point of integration interval; used to ensure that t0+step<=tbound + and that fun is only evaluated in the interval [t0,tbound] + max_step : float + Maximum allowable step size. + f0 : ndarray, shape (n,) + Initial value of the derivative, i.e., ``fun(t0, y0)``. + direction : float + Integration direction. + order : float + Error estimator order. It means that the error controlled by the + algorithm is proportional to ``step_size ** (order + 1)`. + rtol : float + Desired relative tolerance. + atol : float + Desired absolute tolerance. + + Returns + ------- + h_abs : float + Absolute value of the suggested initial step. + + References + ---------- + .. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential + Equations I: Nonstiff Problems", Sec. II.4. + """ + if y0.size == 0: + return np.inf + + interval_length = abs(t_bound - t0) + if interval_length == 0.0: + return 0.0 + + scale = atol + np.abs(y0) * rtol + d0 = norm(y0 / scale) + d1 = norm(f0 / scale) + if d0 < 1e-5 or d1 < 1e-5: + h0 = 1e-6 + else: + h0 = 0.01 * d0 / d1 + # Check t0+h0*direction doesn't take us beyond t_bound + h0 = min(h0, interval_length) + y1 = y0 + h0 * direction * f0 + f1 = fun(t0 + h0 * direction, y1) + d2 = norm((f1 - f0) / scale) / h0 + + if d1 <= 1e-15 and d2 <= 1e-15: + h1 = max(1e-6, h0 * 1e-3) + else: + h1 = (0.01 / max(d1, d2)) ** (1 / (order + 1)) + + return min(100 * h0, h1, interval_length, max_step) + + +class OdeSolution: + """Continuous ODE solution. + + It is organized as a collection of `DenseOutput` objects which represent + local interpolants. It provides an algorithm to select a right interpolant + for each given point. + + The interpolants cover the range between `t_min` and `t_max` (see + Attributes below). Evaluation outside this interval is not forbidden, but + the accuracy is not guaranteed. + + When evaluating at a breakpoint (one of the values in `ts`) a segment with + the lower index is selected. + + Parameters + ---------- + ts : array_like, shape (n_segments + 1,) + Time instants between which local interpolants are defined. Must + be strictly increasing or decreasing (zero segment with two points is + also allowed). + interpolants : list of DenseOutput with n_segments elements + Local interpolants. An i-th interpolant is assumed to be defined + between ``ts[i]`` and ``ts[i + 1]``. + alt_segment : boolean + Requests the alternative interpolant segment selection scheme. At each + solver integration point, two interpolant segments are available. The + default (False) and alternative (True) behaviours select the segment + for which the requested time corresponded to ``t`` and ``t_old``, + respectively. This functionality is only relevant for testing the + interpolants' accuracy: different integrators use different + construction strategies. + + Attributes + ---------- + t_min, t_max : float + Time range of the interpolation. + """ + def __init__(self, ts, interpolants, alt_segment=False): + ts = np.asarray(ts) + d = np.diff(ts) + # The first case covers integration on zero segment. + if not ((ts.size == 2 and ts[0] == ts[-1]) + or np.all(d > 0) or np.all(d < 0)): + raise ValueError("`ts` must be strictly increasing or decreasing.") + + self.n_segments = len(interpolants) + if ts.shape != (self.n_segments + 1,): + raise ValueError("Numbers of time stamps and interpolants " + "don't match.") + + self.ts = ts + self.interpolants = interpolants + if ts[-1] >= ts[0]: + self.t_min = ts[0] + self.t_max = ts[-1] + self.ascending = True + self.side = "right" if alt_segment else "left" + self.ts_sorted = ts + else: + self.t_min = ts[-1] + self.t_max = ts[0] + self.ascending = False + self.side = "left" if alt_segment else "right" + self.ts_sorted = ts[::-1] + + def _call_single(self, t): + # Here we preserve a certain symmetry that when t is in self.ts, + # if alt_segment=False, then we prioritize a segment with a lower + # index. + ind = np.searchsorted(self.ts_sorted, t, side=self.side) + + segment = min(max(ind - 1, 0), self.n_segments - 1) + if not self.ascending: + segment = self.n_segments - 1 - segment + + return self.interpolants[segment](t) + + def __call__(self, t): + """Evaluate the solution. + + Parameters + ---------- + t : float or array_like with shape (n_points,) + Points to evaluate at. + + Returns + ------- + y : ndarray, shape (n_states,) or (n_states, n_points) + Computed values. Shape depends on whether `t` is a scalar or a + 1-D array. + """ + t = np.asarray(t) + + if t.ndim == 0: + return self._call_single(t) + + order = np.argsort(t) + reverse = np.empty_like(order) + reverse[order] = np.arange(order.shape[0]) + t_sorted = t[order] + + # See comment in self._call_single. + segments = np.searchsorted(self.ts_sorted, t_sorted, side=self.side) + segments -= 1 + segments[segments < 0] = 0 + segments[segments > self.n_segments - 1] = self.n_segments - 1 + if not self.ascending: + segments = self.n_segments - 1 - segments + + ys = [] + group_start = 0 + for segment, group in groupby(segments): + group_end = group_start + len(list(group)) + y = self.interpolants[segment](t_sorted[group_start:group_end]) + ys.append(y) + group_start = group_end + + ys = np.hstack(ys) + ys = ys[:, reverse] + + return ys + + +NUM_JAC_DIFF_REJECT = EPS ** 0.875 +NUM_JAC_DIFF_SMALL = EPS ** 0.75 +NUM_JAC_DIFF_BIG = EPS ** 0.25 +NUM_JAC_MIN_FACTOR = 1e3 * EPS +NUM_JAC_FACTOR_INCREASE = 10 +NUM_JAC_FACTOR_DECREASE = 0.1 + + +def num_jac(fun, t, y, f, threshold, factor, sparsity=None): + """Finite differences Jacobian approximation tailored for ODE solvers. + + This function computes finite difference approximation to the Jacobian + matrix of `fun` with respect to `y` using forward differences. + The Jacobian matrix has shape (n, n) and its element (i, j) is equal to + ``d f_i / d y_j``. + + A special feature of this function is the ability to correct the step + size from iteration to iteration. The main idea is to keep the finite + difference significantly separated from its round-off error which + approximately equals ``EPS * np.abs(f)``. It reduces a possibility of a + huge error and assures that the estimated derivative are reasonably close + to the true values (i.e., the finite difference approximation is at least + qualitatively reflects the structure of the true Jacobian). + + Parameters + ---------- + fun : callable + Right-hand side of the system implemented in a vectorized fashion. + t : float + Current time. + y : ndarray, shape (n,) + Current state. + f : ndarray, shape (n,) + Value of the right hand side at (t, y). + threshold : float + Threshold for `y` value used for computing the step size as + ``factor * np.maximum(np.abs(y), threshold)``. Typically, the value of + absolute tolerance (atol) for a solver should be passed as `threshold`. + factor : ndarray with shape (n,) or None + Factor to use for computing the step size. Pass None for the very + evaluation, then use the value returned from this function. + sparsity : tuple (structure, groups) or None + Sparsity structure of the Jacobian, `structure` must be csc_matrix. + + Returns + ------- + J : ndarray or csc_matrix, shape (n, n) + Jacobian matrix. + factor : ndarray, shape (n,) + Suggested `factor` for the next evaluation. + """ + y = np.asarray(y) + n = y.shape[0] + if n == 0: + return np.empty((0, 0)), factor + + if factor is None: + factor = np.full(n, EPS ** 0.5) + else: + factor = factor.copy() + + # Direct the step as ODE dictates, hoping that such a step won't lead to + # a problematic region. For complex ODEs it makes sense to use the real + # part of f as we use steps along real axis. + f_sign = 2 * (np.real(f) >= 0).astype(float) - 1 + y_scale = f_sign * np.maximum(threshold, np.abs(y)) + h = (y + factor * y_scale) - y + + # Make sure that the step is not 0 to start with. Not likely it will be + # executed often. + for i in np.nonzero(h == 0)[0]: + while h[i] == 0: + factor[i] *= 10 + h[i] = (y[i] + factor[i] * y_scale[i]) - y[i] + + if sparsity is None: + return _dense_num_jac(fun, t, y, f, h, factor, y_scale) + else: + structure, groups = sparsity + return _sparse_num_jac(fun, t, y, f, h, factor, y_scale, + structure, groups) + + +def _dense_num_jac(fun, t, y, f, h, factor, y_scale): + n = y.shape[0] + h_vecs = np.diag(h) + f_new = fun(t, y[:, None] + h_vecs) + diff = f_new - f[:, None] + max_ind = np.argmax(np.abs(diff), axis=0) + r = np.arange(n) + max_diff = np.abs(diff[max_ind, r]) + scale = np.maximum(np.abs(f[max_ind]), np.abs(f_new[max_ind, r])) + + diff_too_small = max_diff < NUM_JAC_DIFF_REJECT * scale + if np.any(diff_too_small): + ind, = np.nonzero(diff_too_small) + new_factor = NUM_JAC_FACTOR_INCREASE * factor[ind] + h_new = (y[ind] + new_factor * y_scale[ind]) - y[ind] + h_vecs[ind, ind] = h_new + f_new = fun(t, y[:, None] + h_vecs[:, ind]) + diff_new = f_new - f[:, None] + max_ind = np.argmax(np.abs(diff_new), axis=0) + r = np.arange(ind.shape[0]) + max_diff_new = np.abs(diff_new[max_ind, r]) + scale_new = np.maximum(np.abs(f[max_ind]), np.abs(f_new[max_ind, r])) + + update = max_diff[ind] * scale_new < max_diff_new * scale[ind] + if np.any(update): + update, = np.nonzero(update) + update_ind = ind[update] + factor[update_ind] = new_factor[update] + h[update_ind] = h_new[update] + diff[:, update_ind] = diff_new[:, update] + scale[update_ind] = scale_new[update] + max_diff[update_ind] = max_diff_new[update] + + diff /= h + + factor[max_diff < NUM_JAC_DIFF_SMALL * scale] *= NUM_JAC_FACTOR_INCREASE + factor[max_diff > NUM_JAC_DIFF_BIG * scale] *= NUM_JAC_FACTOR_DECREASE + factor = np.maximum(factor, NUM_JAC_MIN_FACTOR) + + return diff, factor + + +def _sparse_num_jac(fun, t, y, f, h, factor, y_scale, structure, groups): + n = y.shape[0] + n_groups = np.max(groups) + 1 + h_vecs = np.empty((n_groups, n)) + for group in range(n_groups): + e = np.equal(group, groups) + h_vecs[group] = h * e + h_vecs = h_vecs.T + + f_new = fun(t, y[:, None] + h_vecs) + df = f_new - f[:, None] + + i, j, _ = find(structure) + diff = coo_matrix((df[i, groups[j]], (i, j)), shape=(n, n)).tocsc() + max_ind = np.array(abs(diff).argmax(axis=0)).ravel() + r = np.arange(n) + max_diff = np.asarray(np.abs(diff[max_ind, r])).ravel() + scale = np.maximum(np.abs(f[max_ind]), + np.abs(f_new[max_ind, groups[r]])) + + diff_too_small = max_diff < NUM_JAC_DIFF_REJECT * scale + if np.any(diff_too_small): + ind, = np.nonzero(diff_too_small) + new_factor = NUM_JAC_FACTOR_INCREASE * factor[ind] + h_new = (y[ind] + new_factor * y_scale[ind]) - y[ind] + h_new_all = np.zeros(n) + h_new_all[ind] = h_new + + groups_unique = np.unique(groups[ind]) + groups_map = np.empty(n_groups, dtype=int) + h_vecs = np.empty((groups_unique.shape[0], n)) + for k, group in enumerate(groups_unique): + e = np.equal(group, groups) + h_vecs[k] = h_new_all * e + groups_map[group] = k + h_vecs = h_vecs.T + + f_new = fun(t, y[:, None] + h_vecs) + df = f_new - f[:, None] + i, j, _ = find(structure[:, ind]) + diff_new = coo_matrix((df[i, groups_map[groups[ind[j]]]], + (i, j)), shape=(n, ind.shape[0])).tocsc() + + max_ind_new = np.array(abs(diff_new).argmax(axis=0)).ravel() + r = np.arange(ind.shape[0]) + max_diff_new = np.asarray(np.abs(diff_new[max_ind_new, r])).ravel() + scale_new = np.maximum( + np.abs(f[max_ind_new]), + np.abs(f_new[max_ind_new, groups_map[groups[ind]]])) + + update = max_diff[ind] * scale_new < max_diff_new * scale[ind] + if np.any(update): + update, = np.nonzero(update) + update_ind = ind[update] + factor[update_ind] = new_factor[update] + h[update_ind] = h_new[update] + diff[:, update_ind] = diff_new[:, update] + scale[update_ind] = scale_new[update] + max_diff[update_ind] = max_diff_new[update] + + diff.data /= np.repeat(h, np.diff(diff.indptr)) + + factor[max_diff < NUM_JAC_DIFF_SMALL * scale] *= NUM_JAC_FACTOR_INCREASE + factor[max_diff > NUM_JAC_DIFF_BIG * scale] *= NUM_JAC_FACTOR_DECREASE + factor = np.maximum(factor, NUM_JAC_MIN_FACTOR) + + return diff, factor diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/dop853_coefficients.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/dop853_coefficients.py new file mode 100644 index 0000000000000000000000000000000000000000..f39f2f3650d321e2c475d4e220f9769139118a5e --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/dop853_coefficients.py @@ -0,0 +1,193 @@ +import numpy as np + +N_STAGES = 12 +N_STAGES_EXTENDED = 16 +INTERPOLATOR_POWER = 7 + +C = np.array([0.0, + 0.526001519587677318785587544488e-01, + 0.789002279381515978178381316732e-01, + 0.118350341907227396726757197510, + 0.281649658092772603273242802490, + 0.333333333333333333333333333333, + 0.25, + 0.307692307692307692307692307692, + 0.651282051282051282051282051282, + 0.6, + 0.857142857142857142857142857142, + 1.0, + 1.0, + 0.1, + 0.2, + 0.777777777777777777777777777778]) + +A = np.zeros((N_STAGES_EXTENDED, N_STAGES_EXTENDED)) +A[1, 0] = 5.26001519587677318785587544488e-2 + +A[2, 0] = 1.97250569845378994544595329183e-2 +A[2, 1] = 5.91751709536136983633785987549e-2 + +A[3, 0] = 2.95875854768068491816892993775e-2 +A[3, 2] = 8.87627564304205475450678981324e-2 + +A[4, 0] = 2.41365134159266685502369798665e-1 +A[4, 2] = -8.84549479328286085344864962717e-1 +A[4, 3] = 9.24834003261792003115737966543e-1 + +A[5, 0] = 3.7037037037037037037037037037e-2 +A[5, 3] = 1.70828608729473871279604482173e-1 +A[5, 4] = 1.25467687566822425016691814123e-1 + +A[6, 0] = 3.7109375e-2 +A[6, 3] = 1.70252211019544039314978060272e-1 +A[6, 4] = 6.02165389804559606850219397283e-2 +A[6, 5] = -1.7578125e-2 + +A[7, 0] = 3.70920001185047927108779319836e-2 +A[7, 3] = 1.70383925712239993810214054705e-1 +A[7, 4] = 1.07262030446373284651809199168e-1 +A[7, 5] = -1.53194377486244017527936158236e-2 +A[7, 6] = 8.27378916381402288758473766002e-3 + +A[8, 0] = 6.24110958716075717114429577812e-1 +A[8, 3] = -3.36089262944694129406857109825 +A[8, 4] = -8.68219346841726006818189891453e-1 +A[8, 5] = 2.75920996994467083049415600797e1 +A[8, 6] = 2.01540675504778934086186788979e1 +A[8, 7] = -4.34898841810699588477366255144e1 + +A[9, 0] = 4.77662536438264365890433908527e-1 +A[9, 3] = -2.48811461997166764192642586468 +A[9, 4] = -5.90290826836842996371446475743e-1 +A[9, 5] = 2.12300514481811942347288949897e1 +A[9, 6] = 1.52792336328824235832596922938e1 +A[9, 7] = -3.32882109689848629194453265587e1 +A[9, 8] = -2.03312017085086261358222928593e-2 + +A[10, 0] = -9.3714243008598732571704021658e-1 +A[10, 3] = 5.18637242884406370830023853209 +A[10, 4] = 1.09143734899672957818500254654 +A[10, 5] = -8.14978701074692612513997267357 +A[10, 6] = -1.85200656599969598641566180701e1 +A[10, 7] = 2.27394870993505042818970056734e1 +A[10, 8] = 2.49360555267965238987089396762 +A[10, 9] = -3.0467644718982195003823669022 + +A[11, 0] = 2.27331014751653820792359768449 +A[11, 3] = -1.05344954667372501984066689879e1 +A[11, 4] = -2.00087205822486249909675718444 +A[11, 5] = -1.79589318631187989172765950534e1 +A[11, 6] = 2.79488845294199600508499808837e1 +A[11, 7] = -2.85899827713502369474065508674 +A[11, 8] = -8.87285693353062954433549289258 +A[11, 9] = 1.23605671757943030647266201528e1 +A[11, 10] = 6.43392746015763530355970484046e-1 + +A[12, 0] = 5.42937341165687622380535766363e-2 +A[12, 5] = 4.45031289275240888144113950566 +A[12, 6] = 1.89151789931450038304281599044 +A[12, 7] = -5.8012039600105847814672114227 +A[12, 8] = 3.1116436695781989440891606237e-1 +A[12, 9] = -1.52160949662516078556178806805e-1 +A[12, 10] = 2.01365400804030348374776537501e-1 +A[12, 11] = 4.47106157277725905176885569043e-2 + +A[13, 0] = 5.61675022830479523392909219681e-2 +A[13, 6] = 2.53500210216624811088794765333e-1 +A[13, 7] = -2.46239037470802489917441475441e-1 +A[13, 8] = -1.24191423263816360469010140626e-1 +A[13, 9] = 1.5329179827876569731206322685e-1 +A[13, 10] = 8.20105229563468988491666602057e-3 +A[13, 11] = 7.56789766054569976138603589584e-3 +A[13, 12] = -8.298e-3 + +A[14, 0] = 3.18346481635021405060768473261e-2 +A[14, 5] = 2.83009096723667755288322961402e-2 +A[14, 6] = 5.35419883074385676223797384372e-2 +A[14, 7] = -5.49237485713909884646569340306e-2 +A[14, 10] = -1.08347328697249322858509316994e-4 +A[14, 11] = 3.82571090835658412954920192323e-4 +A[14, 12] = -3.40465008687404560802977114492e-4 +A[14, 13] = 1.41312443674632500278074618366e-1 + +A[15, 0] = -4.28896301583791923408573538692e-1 +A[15, 5] = -4.69762141536116384314449447206 +A[15, 6] = 7.68342119606259904184240953878 +A[15, 7] = 4.06898981839711007970213554331 +A[15, 8] = 3.56727187455281109270669543021e-1 +A[15, 12] = -1.39902416515901462129418009734e-3 +A[15, 13] = 2.9475147891527723389556272149 +A[15, 14] = -9.15095847217987001081870187138 + + +B = A[N_STAGES, :N_STAGES] + +E3 = np.zeros(N_STAGES + 1) +E3[:-1] = B.copy() +E3[0] -= 0.244094488188976377952755905512 +E3[8] -= 0.733846688281611857341361741547 +E3[11] -= 0.220588235294117647058823529412e-1 + +E5 = np.zeros(N_STAGES + 1) +E5[0] = 0.1312004499419488073250102996e-1 +E5[5] = -0.1225156446376204440720569753e+1 +E5[6] = -0.4957589496572501915214079952 +E5[7] = 0.1664377182454986536961530415e+1 +E5[8] = -0.3503288487499736816886487290 +E5[9] = 0.3341791187130174790297318841 +E5[10] = 0.8192320648511571246570742613e-1 +E5[11] = -0.2235530786388629525884427845e-1 + +# First 3 coefficients are computed separately. +D = np.zeros((INTERPOLATOR_POWER - 3, N_STAGES_EXTENDED)) +D[0, 0] = -0.84289382761090128651353491142e+1 +D[0, 5] = 0.56671495351937776962531783590 +D[0, 6] = -0.30689499459498916912797304727e+1 +D[0, 7] = 0.23846676565120698287728149680e+1 +D[0, 8] = 0.21170345824450282767155149946e+1 +D[0, 9] = -0.87139158377797299206789907490 +D[0, 10] = 0.22404374302607882758541771650e+1 +D[0, 11] = 0.63157877876946881815570249290 +D[0, 12] = -0.88990336451333310820698117400e-1 +D[0, 13] = 0.18148505520854727256656404962e+2 +D[0, 14] = -0.91946323924783554000451984436e+1 +D[0, 15] = -0.44360363875948939664310572000e+1 + +D[1, 0] = 0.10427508642579134603413151009e+2 +D[1, 5] = 0.24228349177525818288430175319e+3 +D[1, 6] = 0.16520045171727028198505394887e+3 +D[1, 7] = -0.37454675472269020279518312152e+3 +D[1, 8] = -0.22113666853125306036270938578e+2 +D[1, 9] = 0.77334326684722638389603898808e+1 +D[1, 10] = -0.30674084731089398182061213626e+2 +D[1, 11] = -0.93321305264302278729567221706e+1 +D[1, 12] = 0.15697238121770843886131091075e+2 +D[1, 13] = -0.31139403219565177677282850411e+2 +D[1, 14] = -0.93529243588444783865713862664e+1 +D[1, 15] = 0.35816841486394083752465898540e+2 + +D[2, 0] = 0.19985053242002433820987653617e+2 +D[2, 5] = -0.38703730874935176555105901742e+3 +D[2, 6] = -0.18917813819516756882830838328e+3 +D[2, 7] = 0.52780815920542364900561016686e+3 +D[2, 8] = -0.11573902539959630126141871134e+2 +D[2, 9] = 0.68812326946963000169666922661e+1 +D[2, 10] = -0.10006050966910838403183860980e+1 +D[2, 11] = 0.77771377980534432092869265740 +D[2, 12] = -0.27782057523535084065932004339e+1 +D[2, 13] = -0.60196695231264120758267380846e+2 +D[2, 14] = 0.84320405506677161018159903784e+2 +D[2, 15] = 0.11992291136182789328035130030e+2 + +D[3, 0] = -0.25693933462703749003312586129e+2 +D[3, 5] = -0.15418974869023643374053993627e+3 +D[3, 6] = -0.23152937917604549567536039109e+3 +D[3, 7] = 0.35763911791061412378285349910e+3 +D[3, 8] = 0.93405324183624310003907691704e+2 +D[3, 9] = -0.37458323136451633156875139351e+2 +D[3, 10] = 0.10409964950896230045147246184e+3 +D[3, 11] = 0.29840293426660503123344363579e+2 +D[3, 12] = -0.43533456590011143754432175058e+2 +D[3, 13] = 0.96324553959188282948394950600e+2 +D[3, 14] = -0.39177261675615439165231486172e+2 +D[3, 15] = -0.14972683625798562581422125276e+3 diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/ivp.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/ivp.py new file mode 100644 index 0000000000000000000000000000000000000000..13d4732bd644832857d31fde5cf33e2b169051e6 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/ivp.py @@ -0,0 +1,748 @@ +import inspect +import numpy as np +from .bdf import BDF +from .radau import Radau +from .rk import RK23, RK45, DOP853 +from .lsoda import LSODA +from scipy.optimize import OptimizeResult +from .common import EPS, OdeSolution +from .base import OdeSolver + + +METHODS = {'RK23': RK23, + 'RK45': RK45, + 'DOP853': DOP853, + 'Radau': Radau, + 'BDF': BDF, + 'LSODA': LSODA} + + +MESSAGES = {0: "The solver successfully reached the end of the integration interval.", + 1: "A termination event occurred."} + + +class OdeResult(OptimizeResult): + pass + + +def prepare_events(events): + """Standardize event functions and extract attributes.""" + if callable(events): + events = (events,) + + max_events = np.empty(len(events)) + direction = np.empty(len(events)) + for i, event in enumerate(events): + terminal = getattr(event, 'terminal', None) + direction[i] = getattr(event, 'direction', 0) + + message = ('The `terminal` attribute of each event ' + 'must be a boolean or positive integer.') + if terminal is None or terminal == 0: + max_events[i] = np.inf + elif int(terminal) == terminal and terminal > 0: + max_events[i] = terminal + else: + raise ValueError(message) + + return events, max_events, direction + + +def solve_event_equation(event, sol, t_old, t): + """Solve an equation corresponding to an ODE event. + + The equation is ``event(t, y(t)) = 0``, here ``y(t)`` is known from an + ODE solver using some sort of interpolation. It is solved by + `scipy.optimize.brentq` with xtol=atol=4*EPS. + + Parameters + ---------- + event : callable + Function ``event(t, y)``. + sol : callable + Function ``sol(t)`` which evaluates an ODE solution between `t_old` + and `t`. + t_old, t : float + Previous and new values of time. They will be used as a bracketing + interval. + + Returns + ------- + root : float + Found solution. + """ + from scipy.optimize import brentq + return brentq(lambda t: event(t, sol(t)), t_old, t, + xtol=4 * EPS, rtol=4 * EPS) + + +def handle_events(sol, events, active_events, event_count, max_events, + t_old, t): + """Helper function to handle events. + + Parameters + ---------- + sol : DenseOutput + Function ``sol(t)`` which evaluates an ODE solution between `t_old` + and `t`. + events : list of callables, length n_events + Event functions with signatures ``event(t, y)``. + active_events : ndarray + Indices of events which occurred. + event_count : ndarray + Current number of occurrences for each event. + max_events : ndarray, shape (n_events,) + Number of occurrences allowed for each event before integration + termination is issued. + t_old, t : float + Previous and new values of time. + + Returns + ------- + root_indices : ndarray + Indices of events which take zero between `t_old` and `t` and before + a possible termination. + roots : ndarray + Values of t at which events occurred. + terminate : bool + Whether a terminal event occurred. + """ + roots = [solve_event_equation(events[event_index], sol, t_old, t) + for event_index in active_events] + + roots = np.asarray(roots) + + if np.any(event_count[active_events] >= max_events[active_events]): + if t > t_old: + order = np.argsort(roots) + else: + order = np.argsort(-roots) + active_events = active_events[order] + roots = roots[order] + t = np.nonzero(event_count[active_events] + >= max_events[active_events])[0][0] + active_events = active_events[:t + 1] + roots = roots[:t + 1] + terminate = True + else: + terminate = False + + return active_events, roots, terminate + + +def find_active_events(g, g_new, direction): + """Find which event occurred during an integration step. + + Parameters + ---------- + g, g_new : array_like, shape (n_events,) + Values of event functions at a current and next points. + direction : ndarray, shape (n_events,) + Event "direction" according to the definition in `solve_ivp`. + + Returns + ------- + active_events : ndarray + Indices of events which occurred during the step. + """ + g, g_new = np.asarray(g), np.asarray(g_new) + up = (g <= 0) & (g_new >= 0) + down = (g >= 0) & (g_new <= 0) + either = up | down + mask = (up & (direction > 0) | + down & (direction < 0) | + either & (direction == 0)) + + return np.nonzero(mask)[0] + + +def solve_ivp(fun, t_span, y0, method='RK45', t_eval=None, dense_output=False, + events=None, vectorized=False, args=None, **options): + """Solve an initial value problem for a system of ODEs. + + This function numerically integrates a system of ordinary differential + equations given an initial value:: + + dy / dt = f(t, y) + y(t0) = y0 + + Here t is a 1-D independent variable (time), y(t) is an + N-D vector-valued function (state), and an N-D + vector-valued function f(t, y) determines the differential equations. + The goal is to find y(t) approximately satisfying the differential + equations, given an initial value y(t0)=y0. + + Some of the solvers support integration in the complex domain, but note + that for stiff ODE solvers, the right-hand side must be + complex-differentiable (satisfy Cauchy-Riemann equations [11]_). + To solve a problem in the complex domain, pass y0 with a complex data type. + Another option always available is to rewrite your problem for real and + imaginary parts separately. + + Parameters + ---------- + fun : callable + Right-hand side of the system: the time derivative of the state ``y`` + at time ``t``. The calling signature is ``fun(t, y)``, where ``t`` is a + scalar and ``y`` is an ndarray with ``len(y) = len(y0)``. Additional + arguments need to be passed if ``args`` is used (see documentation of + ``args`` argument). ``fun`` must return an array of the same shape as + ``y``. See `vectorized` for more information. + t_span : 2-member sequence + Interval of integration (t0, tf). The solver starts with t=t0 and + integrates until it reaches t=tf. Both t0 and tf must be floats + or values interpretable by the float conversion function. + y0 : array_like, shape (n,) + Initial state. For problems in the complex domain, pass `y0` with a + complex data type (even if the initial value is purely real). + method : string or `OdeSolver`, optional + Integration method to use: + + * 'RK45' (default): Explicit Runge-Kutta method of order 5(4) [1]_. + The error is controlled assuming accuracy of the fourth-order + method, but steps are taken using the fifth-order accurate + formula (local extrapolation is done). A quartic interpolation + polynomial is used for the dense output [2]_. Can be applied in + the complex domain. + * 'RK23': Explicit Runge-Kutta method of order 3(2) [3]_. The error + is controlled assuming accuracy of the second-order method, but + steps are taken using the third-order accurate formula (local + extrapolation is done). A cubic Hermite polynomial is used for the + dense output. Can be applied in the complex domain. + * 'DOP853': Explicit Runge-Kutta method of order 8 [13]_. + Python implementation of the "DOP853" algorithm originally + written in Fortran [14]_. A 7-th order interpolation polynomial + accurate to 7-th order is used for the dense output. + Can be applied in the complex domain. + * 'Radau': Implicit Runge-Kutta method of the Radau IIA family of + order 5 [4]_. The error is controlled with a third-order accurate + embedded formula. A cubic polynomial which satisfies the + collocation conditions is used for the dense output. + * 'BDF': Implicit multi-step variable-order (1 to 5) method based + on a backward differentiation formula for the derivative + approximation [5]_. The implementation follows the one described + in [6]_. A quasi-constant step scheme is used and accuracy is + enhanced using the NDF modification. Can be applied in the + complex domain. + * 'LSODA': Adams/BDF method with automatic stiffness detection and + switching [7]_, [8]_. This is a wrapper of the Fortran solver + from ODEPACK. + + Explicit Runge-Kutta methods ('RK23', 'RK45', 'DOP853') should be used + for non-stiff problems and implicit methods ('Radau', 'BDF') for + stiff problems [9]_. Among Runge-Kutta methods, 'DOP853' is recommended + for solving with high precision (low values of `rtol` and `atol`). + + If not sure, first try to run 'RK45'. If it makes unusually many + iterations, diverges, or fails, your problem is likely to be stiff and + you should use 'Radau' or 'BDF'. 'LSODA' can also be a good universal + choice, but it might be somewhat less convenient to work with as it + wraps old Fortran code. + + You can also pass an arbitrary class derived from `OdeSolver` which + implements the solver. + t_eval : array_like or None, optional + Times at which to store the computed solution, must be sorted and lie + within `t_span`. If None (default), use points selected by the solver. + dense_output : bool, optional + Whether to compute a continuous solution. Default is False. + events : callable, or list of callables, optional + Events to track. If None (default), no events will be tracked. + Each event occurs at the zeros of a continuous function of time and + state. Each function must have the signature ``event(t, y)`` where + additional argument have to be passed if ``args`` is used (see + documentation of ``args`` argument). Each function must return a + float. The solver will find an accurate value of `t` at which + ``event(t, y(t)) = 0`` using a root-finding algorithm. By default, + all zeros will be found. The solver looks for a sign change over + each step, so if multiple zero crossings occur within one step, + events may be missed. Additionally each `event` function might + have the following attributes: + + terminal: bool or int, optional + When boolean, whether to terminate integration if this event occurs. + When integral, termination occurs after the specified the number of + occurences of this event. + Implicitly False if not assigned. + direction: float, optional + Direction of a zero crossing. If `direction` is positive, + `event` will only trigger when going from negative to positive, + and vice versa if `direction` is negative. If 0, then either + direction will trigger event. Implicitly 0 if not assigned. + + You can assign attributes like ``event.terminal = True`` to any + function in Python. + vectorized : bool, optional + Whether `fun` can be called in a vectorized fashion. Default is False. + + If ``vectorized`` is False, `fun` will always be called with ``y`` of + shape ``(n,)``, where ``n = len(y0)``. + + If ``vectorized`` is True, `fun` may be called with ``y`` of shape + ``(n, k)``, where ``k`` is an integer. In this case, `fun` must behave + such that ``fun(t, y)[:, i] == fun(t, y[:, i])`` (i.e. each column of + the returned array is the time derivative of the state corresponding + with a column of ``y``). + + Setting ``vectorized=True`` allows for faster finite difference + approximation of the Jacobian by methods 'Radau' and 'BDF', but + will result in slower execution for other methods and for 'Radau' and + 'BDF' in some circumstances (e.g. small ``len(y0)``). + args : tuple, optional + Additional arguments to pass to the user-defined functions. If given, + the additional arguments are passed to all user-defined functions. + So if, for example, `fun` has the signature ``fun(t, y, a, b, c)``, + then `jac` (if given) and any event functions must have the same + signature, and `args` must be a tuple of length 3. + **options + Options passed to a chosen solver. All options available for already + implemented solvers are listed below. + first_step : float or None, optional + Initial step size. Default is `None` which means that the algorithm + should choose. + max_step : float, optional + Maximum allowed step size. Default is np.inf, i.e., the step size is not + bounded and determined solely by the solver. + rtol, atol : float or array_like, optional + Relative and absolute tolerances. The solver keeps the local error + estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a + relative accuracy (number of correct digits), while `atol` controls + absolute accuracy (number of correct decimal places). To achieve the + desired `rtol`, set `atol` to be smaller than the smallest value that + can be expected from ``rtol * abs(y)`` so that `rtol` dominates the + allowable error. If `atol` is larger than ``rtol * abs(y)`` the + number of correct digits is not guaranteed. Conversely, to achieve the + desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller + than `atol`. If components of y have different scales, it might be + beneficial to set different `atol` values for different components by + passing array_like with shape (n,) for `atol`. Default values are + 1e-3 for `rtol` and 1e-6 for `atol`. + jac : array_like, sparse_matrix, callable or None, optional + Jacobian matrix of the right-hand side of the system with respect + to y, required by the 'Radau', 'BDF' and 'LSODA' method. The + Jacobian matrix has shape (n, n) and its element (i, j) is equal to + ``d f_i / d y_j``. There are three ways to define the Jacobian: + + * If array_like or sparse_matrix, the Jacobian is assumed to + be constant. Not supported by 'LSODA'. + * If callable, the Jacobian is assumed to depend on both + t and y; it will be called as ``jac(t, y)``, as necessary. + Additional arguments have to be passed if ``args`` is + used (see documentation of ``args`` argument). + For 'Radau' and 'BDF' methods, the return value might be a + sparse matrix. + * If None (default), the Jacobian will be approximated by + finite differences. + + It is generally recommended to provide the Jacobian rather than + relying on a finite-difference approximation. + jac_sparsity : array_like, sparse matrix or None, optional + Defines a sparsity structure of the Jacobian matrix for a finite- + difference approximation. Its shape must be (n, n). This argument + is ignored if `jac` is not `None`. If the Jacobian has only few + non-zero elements in *each* row, providing the sparsity structure + will greatly speed up the computations [10]_. A zero entry means that + a corresponding element in the Jacobian is always zero. If None + (default), the Jacobian is assumed to be dense. + Not supported by 'LSODA', see `lband` and `uband` instead. + lband, uband : int or None, optional + Parameters defining the bandwidth of the Jacobian for the 'LSODA' + method, i.e., ``jac[i, j] != 0 only for i - lband <= j <= i + uband``. + Default is None. Setting these requires your jac routine to return the + Jacobian in the packed format: the returned array must have ``n`` + columns and ``uband + lband + 1`` rows in which Jacobian diagonals are + written. Specifically ``jac_packed[uband + i - j , j] = jac[i, j]``. + The same format is used in `scipy.linalg.solve_banded` (check for an + illustration). These parameters can be also used with ``jac=None`` to + reduce the number of Jacobian elements estimated by finite differences. + min_step : float, optional + The minimum allowed step size for 'LSODA' method. + By default `min_step` is zero. + + Returns + ------- + Bunch object with the following fields defined: + t : ndarray, shape (n_points,) + Time points. + y : ndarray, shape (n, n_points) + Values of the solution at `t`. + sol : `OdeSolution` or None + Found solution as `OdeSolution` instance; None if `dense_output` was + set to False. + t_events : list of ndarray or None + Contains for each event type a list of arrays at which an event of + that type event was detected. None if `events` was None. + y_events : list of ndarray or None + For each value of `t_events`, the corresponding value of the solution. + None if `events` was None. + nfev : int + Number of evaluations of the right-hand side. + njev : int + Number of evaluations of the Jacobian. + nlu : int + Number of LU decompositions. + status : int + Reason for algorithm termination: + + * -1: Integration step failed. + * 0: The solver successfully reached the end of `tspan`. + * 1: A termination event occurred. + + message : string + Human-readable description of the termination reason. + success : bool + True if the solver reached the interval end or a termination event + occurred (``status >= 0``). + + References + ---------- + .. [1] J. R. Dormand, P. J. Prince, "A family of embedded Runge-Kutta + formulae", Journal of Computational and Applied Mathematics, Vol. 6, + No. 1, pp. 19-26, 1980. + .. [2] L. W. Shampine, "Some Practical Runge-Kutta Formulas", Mathematics + of Computation,, Vol. 46, No. 173, pp. 135-150, 1986. + .. [3] P. Bogacki, L.F. Shampine, "A 3(2) Pair of Runge-Kutta Formulas", + Appl. Math. Lett. Vol. 2, No. 4. pp. 321-325, 1989. + .. [4] E. Hairer, G. Wanner, "Solving Ordinary Differential Equations II: + Stiff and Differential-Algebraic Problems", Sec. IV.8. + .. [5] `Backward Differentiation Formula + `_ + on Wikipedia. + .. [6] L. F. Shampine, M. W. Reichelt, "THE MATLAB ODE SUITE", SIAM J. SCI. + COMPUTE., Vol. 18, No. 1, pp. 1-22, January 1997. + .. [7] A. C. Hindmarsh, "ODEPACK, A Systematized Collection of ODE + Solvers," IMACS Transactions on Scientific Computation, Vol 1., + pp. 55-64, 1983. + .. [8] L. Petzold, "Automatic selection of methods for solving stiff and + nonstiff systems of ordinary differential equations", SIAM Journal + on Scientific and Statistical Computing, Vol. 4, No. 1, pp. 136-148, + 1983. + .. [9] `Stiff equation `_ on + Wikipedia. + .. [10] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of + sparse Jacobian matrices", Journal of the Institute of Mathematics + and its Applications, 13, pp. 117-120, 1974. + .. [11] `Cauchy-Riemann equations + `_ on + Wikipedia. + .. [12] `Lotka-Volterra equations + `_ + on Wikipedia. + .. [13] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential + Equations I: Nonstiff Problems", Sec. II. + .. [14] `Page with original Fortran code of DOP853 + `_. + + Examples + -------- + Basic exponential decay showing automatically chosen time points. + + >>> import numpy as np + >>> from scipy.integrate import solve_ivp + >>> def exponential_decay(t, y): return -0.5 * y + >>> sol = solve_ivp(exponential_decay, [0, 10], [2, 4, 8]) + >>> print(sol.t) + [ 0. 0.11487653 1.26364188 3.06061781 4.81611105 6.57445806 + 8.33328988 10. ] + >>> print(sol.y) + [[2. 1.88836035 1.06327177 0.43319312 0.18017253 0.07483045 + 0.03107158 0.01350781] + [4. 3.7767207 2.12654355 0.86638624 0.36034507 0.14966091 + 0.06214316 0.02701561] + [8. 7.5534414 4.25308709 1.73277247 0.72069014 0.29932181 + 0.12428631 0.05403123]] + + Specifying points where the solution is desired. + + >>> sol = solve_ivp(exponential_decay, [0, 10], [2, 4, 8], + ... t_eval=[0, 1, 2, 4, 10]) + >>> print(sol.t) + [ 0 1 2 4 10] + >>> print(sol.y) + [[2. 1.21305369 0.73534021 0.27066736 0.01350938] + [4. 2.42610739 1.47068043 0.54133472 0.02701876] + [8. 4.85221478 2.94136085 1.08266944 0.05403753]] + + Cannon fired upward with terminal event upon impact. The ``terminal`` and + ``direction`` fields of an event are applied by monkey patching a function. + Here ``y[0]`` is position and ``y[1]`` is velocity. The projectile starts + at position 0 with velocity +10. Note that the integration never reaches + t=100 because the event is terminal. + + >>> def upward_cannon(t, y): return [y[1], -0.5] + >>> def hit_ground(t, y): return y[0] + >>> hit_ground.terminal = True + >>> hit_ground.direction = -1 + >>> sol = solve_ivp(upward_cannon, [0, 100], [0, 10], events=hit_ground) + >>> print(sol.t_events) + [array([40.])] + >>> print(sol.t) + [0.00000000e+00 9.99900010e-05 1.09989001e-03 1.10988901e-02 + 1.11088891e-01 1.11098890e+00 1.11099890e+01 4.00000000e+01] + + Use `dense_output` and `events` to find position, which is 100, at the apex + of the cannonball's trajectory. Apex is not defined as terminal, so both + apex and hit_ground are found. There is no information at t=20, so the sol + attribute is used to evaluate the solution. The sol attribute is returned + by setting ``dense_output=True``. Alternatively, the `y_events` attribute + can be used to access the solution at the time of the event. + + >>> def apex(t, y): return y[1] + >>> sol = solve_ivp(upward_cannon, [0, 100], [0, 10], + ... events=(hit_ground, apex), dense_output=True) + >>> print(sol.t_events) + [array([40.]), array([20.])] + >>> print(sol.t) + [0.00000000e+00 9.99900010e-05 1.09989001e-03 1.10988901e-02 + 1.11088891e-01 1.11098890e+00 1.11099890e+01 4.00000000e+01] + >>> print(sol.sol(sol.t_events[1][0])) + [100. 0.] + >>> print(sol.y_events) + [array([[-5.68434189e-14, -1.00000000e+01]]), + array([[1.00000000e+02, 1.77635684e-15]])] + + As an example of a system with additional parameters, we'll implement + the Lotka-Volterra equations [12]_. + + >>> def lotkavolterra(t, z, a, b, c, d): + ... x, y = z + ... return [a*x - b*x*y, -c*y + d*x*y] + ... + + We pass in the parameter values a=1.5, b=1, c=3 and d=1 with the `args` + argument. + + >>> sol = solve_ivp(lotkavolterra, [0, 15], [10, 5], args=(1.5, 1, 3, 1), + ... dense_output=True) + + Compute a dense solution and plot it. + + >>> t = np.linspace(0, 15, 300) + >>> z = sol.sol(t) + >>> import matplotlib.pyplot as plt + >>> plt.plot(t, z.T) + >>> plt.xlabel('t') + >>> plt.legend(['x', 'y'], shadow=True) + >>> plt.title('Lotka-Volterra System') + >>> plt.show() + + A couple examples of using solve_ivp to solve the differential + equation ``y' = Ay`` with complex matrix ``A``. + + >>> A = np.array([[-0.25 + 0.14j, 0, 0.33 + 0.44j], + ... [0.25 + 0.58j, -0.2 + 0.14j, 0], + ... [0, 0.2 + 0.4j, -0.1 + 0.97j]]) + + Solving an IVP with ``A`` from above and ``y`` as 3x1 vector: + + >>> def deriv_vec(t, y): + ... return A @ y + >>> result = solve_ivp(deriv_vec, [0, 25], + ... np.array([10 + 0j, 20 + 0j, 30 + 0j]), + ... t_eval=np.linspace(0, 25, 101)) + >>> print(result.y[:, 0]) + [10.+0.j 20.+0.j 30.+0.j] + >>> print(result.y[:, -1]) + [18.46291039+45.25653651j 10.01569306+36.23293216j + -4.98662741+80.07360388j] + + Solving an IVP with ``A`` from above with ``y`` as 3x3 matrix : + + >>> def deriv_mat(t, y): + ... return (A @ y.reshape(3, 3)).flatten() + >>> y0 = np.array([[2 + 0j, 3 + 0j, 4 + 0j], + ... [5 + 0j, 6 + 0j, 7 + 0j], + ... [9 + 0j, 34 + 0j, 78 + 0j]]) + + >>> result = solve_ivp(deriv_mat, [0, 25], y0.flatten(), + ... t_eval=np.linspace(0, 25, 101)) + >>> print(result.y[:, 0].reshape(3, 3)) + [[ 2.+0.j 3.+0.j 4.+0.j] + [ 5.+0.j 6.+0.j 7.+0.j] + [ 9.+0.j 34.+0.j 78.+0.j]] + >>> print(result.y[:, -1].reshape(3, 3)) + [[ 5.67451179 +12.07938445j 17.2888073 +31.03278837j + 37.83405768 +63.25138759j] + [ 3.39949503 +11.82123994j 21.32530996 +44.88668871j + 53.17531184+103.80400411j] + [ -2.26105874 +22.19277664j -15.1255713 +70.19616341j + -38.34616845+153.29039931j]] + + + """ + if method not in METHODS and not ( + inspect.isclass(method) and issubclass(method, OdeSolver)): + raise ValueError(f"`method` must be one of {METHODS} or OdeSolver class.") + + t0, tf = map(float, t_span) + + if args is not None: + # Wrap the user's fun (and jac, if given) in lambdas to hide the + # additional parameters. Pass in the original fun as a keyword + # argument to keep it in the scope of the lambda. + try: + _ = [*(args)] + except TypeError as exp: + suggestion_tuple = ( + "Supplied 'args' cannot be unpacked. Please supply `args`" + f" as a tuple (e.g. `args=({args},)`)" + ) + raise TypeError(suggestion_tuple) from exp + + def fun(t, x, fun=fun): + return fun(t, x, *args) + jac = options.get('jac') + if callable(jac): + options['jac'] = lambda t, x: jac(t, x, *args) + + if t_eval is not None: + t_eval = np.asarray(t_eval) + if t_eval.ndim != 1: + raise ValueError("`t_eval` must be 1-dimensional.") + + if np.any(t_eval < min(t0, tf)) or np.any(t_eval > max(t0, tf)): + raise ValueError("Values in `t_eval` are not within `t_span`.") + + d = np.diff(t_eval) + if tf > t0 and np.any(d <= 0) or tf < t0 and np.any(d >= 0): + raise ValueError("Values in `t_eval` are not properly sorted.") + + if tf > t0: + t_eval_i = 0 + else: + # Make order of t_eval decreasing to use np.searchsorted. + t_eval = t_eval[::-1] + # This will be an upper bound for slices. + t_eval_i = t_eval.shape[0] + + if method in METHODS: + method = METHODS[method] + + solver = method(fun, t0, y0, tf, vectorized=vectorized, **options) + + if t_eval is None: + ts = [t0] + ys = [y0] + elif t_eval is not None and dense_output: + ts = [] + ti = [t0] + ys = [] + else: + ts = [] + ys = [] + + interpolants = [] + + if events is not None: + events, max_events, event_dir = prepare_events(events) + event_count = np.zeros(len(events)) + if args is not None: + # Wrap user functions in lambdas to hide the additional parameters. + # The original event function is passed as a keyword argument to the + # lambda to keep the original function in scope (i.e., avoid the + # late binding closure "gotcha"). + events = [lambda t, x, event=event: event(t, x, *args) + for event in events] + g = [event(t0, y0) for event in events] + t_events = [[] for _ in range(len(events))] + y_events = [[] for _ in range(len(events))] + else: + t_events = None + y_events = None + + status = None + while status is None: + message = solver.step() + + if solver.status == 'finished': + status = 0 + elif solver.status == 'failed': + status = -1 + break + + t_old = solver.t_old + t = solver.t + y = solver.y + + if dense_output: + sol = solver.dense_output() + interpolants.append(sol) + else: + sol = None + + if events is not None: + g_new = [event(t, y) for event in events] + active_events = find_active_events(g, g_new, event_dir) + if active_events.size > 0: + if sol is None: + sol = solver.dense_output() + + event_count[active_events] += 1 + root_indices, roots, terminate = handle_events( + sol, events, active_events, event_count, max_events, + t_old, t) + + for e, te in zip(root_indices, roots): + t_events[e].append(te) + y_events[e].append(sol(te)) + + if terminate: + status = 1 + t = roots[-1] + y = sol(t) + + g = g_new + + if t_eval is None: + ts.append(t) + ys.append(y) + else: + # The value in t_eval equal to t will be included. + if solver.direction > 0: + t_eval_i_new = np.searchsorted(t_eval, t, side='right') + t_eval_step = t_eval[t_eval_i:t_eval_i_new] + else: + t_eval_i_new = np.searchsorted(t_eval, t, side='left') + # It has to be done with two slice operations, because + # you can't slice to 0th element inclusive using backward + # slicing. + t_eval_step = t_eval[t_eval_i_new:t_eval_i][::-1] + + if t_eval_step.size > 0: + if sol is None: + sol = solver.dense_output() + ts.append(t_eval_step) + ys.append(sol(t_eval_step)) + t_eval_i = t_eval_i_new + + if t_eval is not None and dense_output: + ti.append(t) + + message = MESSAGES.get(status, message) + + if t_events is not None: + t_events = [np.asarray(te) for te in t_events] + y_events = [np.asarray(ye) for ye in y_events] + + if t_eval is None: + ts = np.array(ts) + ys = np.vstack(ys).T + elif ts: + ts = np.hstack(ts) + ys = np.hstack(ys) + + if dense_output: + if t_eval is None: + sol = OdeSolution( + ts, interpolants, alt_segment=True if method in [BDF, LSODA] else False + ) + else: + sol = OdeSolution( + ti, interpolants, alt_segment=True if method in [BDF, LSODA] else False + ) + else: + sol = None + + return OdeResult(t=ts, y=ys, sol=sol, t_events=t_events, y_events=y_events, + nfev=solver.nfev, njev=solver.njev, nlu=solver.nlu, + status=status, message=message, success=status >= 0) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/lsoda.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/lsoda.py new file mode 100644 index 0000000000000000000000000000000000000000..2a5a7c530c04eddc9beff44e2d4f6df439d5ef01 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/lsoda.py @@ -0,0 +1,224 @@ +import numpy as np +from scipy.integrate import ode +from .common import validate_tol, validate_first_step, warn_extraneous +from .base import OdeSolver, DenseOutput + + +class LSODA(OdeSolver): + """Adams/BDF method with automatic stiffness detection and switching. + + This is a wrapper to the Fortran solver from ODEPACK [1]_. It switches + automatically between the nonstiff Adams method and the stiff BDF method. + The method was originally detailed in [2]_. + + Parameters + ---------- + fun : callable + Right-hand side of the system: the time derivative of the state ``y`` + at time ``t``. The calling signature is ``fun(t, y)``, where ``t`` is a + scalar and ``y`` is an ndarray with ``len(y) = len(y0)``. ``fun`` must + return an array of the same shape as ``y``. See `vectorized` for more + information. + t0 : float + Initial time. + y0 : array_like, shape (n,) + Initial state. + t_bound : float + Boundary time - the integration won't continue beyond it. It also + determines the direction of the integration. + first_step : float or None, optional + Initial step size. Default is ``None`` which means that the algorithm + should choose. + min_step : float, optional + Minimum allowed step size. Default is 0.0, i.e., the step size is not + bounded and determined solely by the solver. + max_step : float, optional + Maximum allowed step size. Default is np.inf, i.e., the step size is not + bounded and determined solely by the solver. + rtol, atol : float and array_like, optional + Relative and absolute tolerances. The solver keeps the local error + estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a + relative accuracy (number of correct digits), while `atol` controls + absolute accuracy (number of correct decimal places). To achieve the + desired `rtol`, set `atol` to be smaller than the smallest value that + can be expected from ``rtol * abs(y)`` so that `rtol` dominates the + allowable error. If `atol` is larger than ``rtol * abs(y)`` the + number of correct digits is not guaranteed. Conversely, to achieve the + desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller + than `atol`. If components of y have different scales, it might be + beneficial to set different `atol` values for different components by + passing array_like with shape (n,) for `atol`. Default values are + 1e-3 for `rtol` and 1e-6 for `atol`. + jac : None or callable, optional + Jacobian matrix of the right-hand side of the system with respect to + ``y``. The Jacobian matrix has shape (n, n) and its element (i, j) is + equal to ``d f_i / d y_j``. The function will be called as + ``jac(t, y)``. If None (default), the Jacobian will be + approximated by finite differences. It is generally recommended to + provide the Jacobian rather than relying on a finite-difference + approximation. + lband, uband : int or None + Parameters defining the bandwidth of the Jacobian, + i.e., ``jac[i, j] != 0 only for i - lband <= j <= i + uband``. Setting + these requires your jac routine to return the Jacobian in the packed format: + the returned array must have ``n`` columns and ``uband + lband + 1`` + rows in which Jacobian diagonals are written. Specifically + ``jac_packed[uband + i - j , j] = jac[i, j]``. The same format is used + in `scipy.linalg.solve_banded` (check for an illustration). + These parameters can be also used with ``jac=None`` to reduce the + number of Jacobian elements estimated by finite differences. + vectorized : bool, optional + Whether `fun` may be called in a vectorized fashion. False (default) + is recommended for this solver. + + If ``vectorized`` is False, `fun` will always be called with ``y`` of + shape ``(n,)``, where ``n = len(y0)``. + + If ``vectorized`` is True, `fun` may be called with ``y`` of shape + ``(n, k)``, where ``k`` is an integer. In this case, `fun` must behave + such that ``fun(t, y)[:, i] == fun(t, y[:, i])`` (i.e. each column of + the returned array is the time derivative of the state corresponding + with a column of ``y``). + + Setting ``vectorized=True`` allows for faster finite difference + approximation of the Jacobian by methods 'Radau' and 'BDF', but + will result in slower execution for this solver. + + Attributes + ---------- + n : int + Number of equations. + status : string + Current status of the solver: 'running', 'finished' or 'failed'. + t_bound : float + Boundary time. + direction : float + Integration direction: +1 or -1. + t : float + Current time. + y : ndarray + Current state. + t_old : float + Previous time. None if no steps were made yet. + nfev : int + Number of evaluations of the right-hand side. + njev : int + Number of evaluations of the Jacobian. + + References + ---------- + .. [1] A. C. Hindmarsh, "ODEPACK, A Systematized Collection of ODE + Solvers," IMACS Transactions on Scientific Computation, Vol 1., + pp. 55-64, 1983. + .. [2] L. Petzold, "Automatic selection of methods for solving stiff and + nonstiff systems of ordinary differential equations", SIAM Journal + on Scientific and Statistical Computing, Vol. 4, No. 1, pp. 136-148, + 1983. + """ + def __init__(self, fun, t0, y0, t_bound, first_step=None, min_step=0.0, + max_step=np.inf, rtol=1e-3, atol=1e-6, jac=None, lband=None, + uband=None, vectorized=False, **extraneous): + warn_extraneous(extraneous) + super().__init__(fun, t0, y0, t_bound, vectorized) + + if first_step is None: + first_step = 0 # LSODA value for automatic selection. + else: + first_step = validate_first_step(first_step, t0, t_bound) + + first_step *= self.direction + + if max_step == np.inf: + max_step = 0 # LSODA value for infinity. + elif max_step <= 0: + raise ValueError("`max_step` must be positive.") + + if min_step < 0: + raise ValueError("`min_step` must be nonnegative.") + + rtol, atol = validate_tol(rtol, atol, self.n) + + solver = ode(self.fun, jac) + solver.set_integrator('lsoda', rtol=rtol, atol=atol, max_step=max_step, + min_step=min_step, first_step=first_step, + lband=lband, uband=uband) + solver.set_initial_value(y0, t0) + + # Inject t_bound into rwork array as needed for itask=5. + solver._integrator.rwork[0] = self.t_bound + solver._integrator.call_args[4] = solver._integrator.rwork + + self._lsoda_solver = solver + + def _step_impl(self): + solver = self._lsoda_solver + integrator = solver._integrator + + # From lsoda.step and lsoda.integrate itask=5 means take a single + # step and do not go past t_bound. + itask = integrator.call_args[2] + integrator.call_args[2] = 5 + solver._y, solver.t = integrator.run( + solver.f, solver.jac or (lambda: None), solver._y, solver.t, + self.t_bound, solver.f_params, solver.jac_params) + integrator.call_args[2] = itask + + if solver.successful(): + self.t = solver.t + self.y = solver._y + # From LSODA Fortran source njev is equal to nlu. + self.njev = integrator.iwork[12] + self.nlu = integrator.iwork[12] + return True, None + else: + return False, 'Unexpected istate in LSODA.' + + def _dense_output_impl(self): + iwork = self._lsoda_solver._integrator.iwork + rwork = self._lsoda_solver._integrator.rwork + + # We want to produce the Nordsieck history array, yh, up to the order + # used in the last successful iteration. The step size is unimportant + # because it will be scaled out in LsodaDenseOutput. Some additional + # work may be required because ODEPACK's LSODA implementation produces + # the Nordsieck history in the state needed for the next iteration. + + # iwork[13] contains order from last successful iteration, while + # iwork[14] contains order to be attempted next. + order = iwork[13] + + # rwork[11] contains the step size to be attempted next, while + # rwork[10] contains step size from last successful iteration. + h = rwork[11] + + # rwork[20:20 + (iwork[14] + 1) * self.n] contains entries of the + # Nordsieck array in state needed for next iteration. We want + # the entries up to order for the last successful step so use the + # following. + yh = np.reshape(rwork[20:20 + (order + 1) * self.n], + (self.n, order + 1), order='F').copy() + if iwork[14] < order: + # If the order is set to decrease then the final column of yh + # has not been updated within ODEPACK's LSODA + # implementation because this column will not be used in the + # next iteration. We must rescale this column to make the + # associated step size consistent with the other columns. + yh[:, -1] *= (h / rwork[10]) ** order + + return LsodaDenseOutput(self.t_old, self.t, h, order, yh) + + +class LsodaDenseOutput(DenseOutput): + def __init__(self, t_old, t, h, order, yh): + super().__init__(t_old, t) + self.h = h + self.yh = yh + self.p = np.arange(order + 1) + + def _call_impl(self, t): + if t.ndim == 0: + x = ((t - self.t) / self.h) ** self.p + else: + x = ((t - self.t) / self.h) ** self.p[:, None] + + return np.dot(self.yh, x) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/radau.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/radau.py new file mode 100644 index 0000000000000000000000000000000000000000..e13cb0f14c3c3e1102b828d4255609ab41d8d2a2 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/radau.py @@ -0,0 +1,574 @@ +import numpy as np +from scipy.linalg import lu_factor, lu_solve +from scipy.sparse import csc_matrix, issparse, eye +from scipy.sparse.linalg import splu +from scipy.optimize._numdiff import group_columns +from .common import (validate_max_step, validate_tol, select_initial_step, + norm, num_jac, EPS, warn_extraneous, + validate_first_step) +from .base import OdeSolver, DenseOutput + +S6 = 6 ** 0.5 + +# Butcher tableau. A is not used directly, see below. +C = np.array([(4 - S6) / 10, (4 + S6) / 10, 1]) +E = np.array([-13 - 7 * S6, -13 + 7 * S6, -1]) / 3 + +# Eigendecomposition of A is done: A = T L T**-1. There is 1 real eigenvalue +# and a complex conjugate pair. They are written below. +MU_REAL = 3 + 3 ** (2 / 3) - 3 ** (1 / 3) +MU_COMPLEX = (3 + 0.5 * (3 ** (1 / 3) - 3 ** (2 / 3)) + - 0.5j * (3 ** (5 / 6) + 3 ** (7 / 6))) + +# These are transformation matrices. +T = np.array([ + [0.09443876248897524, -0.14125529502095421, 0.03002919410514742], + [0.25021312296533332, 0.20412935229379994, -0.38294211275726192], + [1, 1, 0]]) +TI = np.array([ + [4.17871859155190428, 0.32768282076106237, 0.52337644549944951], + [-4.17871859155190428, -0.32768282076106237, 0.47662355450055044], + [0.50287263494578682, -2.57192694985560522, 0.59603920482822492]]) +# These linear combinations are used in the algorithm. +TI_REAL = TI[0] +TI_COMPLEX = TI[1] + 1j * TI[2] + +# Interpolator coefficients. +P = np.array([ + [13/3 + 7*S6/3, -23/3 - 22*S6/3, 10/3 + 5 * S6], + [13/3 - 7*S6/3, -23/3 + 22*S6/3, 10/3 - 5 * S6], + [1/3, -8/3, 10/3]]) + + +NEWTON_MAXITER = 6 # Maximum number of Newton iterations. +MIN_FACTOR = 0.2 # Minimum allowed decrease in a step size. +MAX_FACTOR = 10 # Maximum allowed increase in a step size. + + +def solve_collocation_system(fun, t, y, h, Z0, scale, tol, + LU_real, LU_complex, solve_lu): + """Solve the collocation system. + + Parameters + ---------- + fun : callable + Right-hand side of the system. + t : float + Current time. + y : ndarray, shape (n,) + Current state. + h : float + Step to try. + Z0 : ndarray, shape (3, n) + Initial guess for the solution. It determines new values of `y` at + ``t + h * C`` as ``y + Z0``, where ``C`` is the Radau method constants. + scale : ndarray, shape (n) + Problem tolerance scale, i.e. ``rtol * abs(y) + atol``. + tol : float + Tolerance to which solve the system. This value is compared with + the normalized by `scale` error. + LU_real, LU_complex + LU decompositions of the system Jacobians. + solve_lu : callable + Callable which solves a linear system given a LU decomposition. The + signature is ``solve_lu(LU, b)``. + + Returns + ------- + converged : bool + Whether iterations converged. + n_iter : int + Number of completed iterations. + Z : ndarray, shape (3, n) + Found solution. + rate : float + The rate of convergence. + """ + n = y.shape[0] + M_real = MU_REAL / h + M_complex = MU_COMPLEX / h + + W = TI.dot(Z0) + Z = Z0 + + F = np.empty((3, n)) + ch = h * C + + dW_norm_old = None + dW = np.empty_like(W) + converged = False + rate = None + for k in range(NEWTON_MAXITER): + for i in range(3): + F[i] = fun(t + ch[i], y + Z[i]) + + if not np.all(np.isfinite(F)): + break + + f_real = F.T.dot(TI_REAL) - M_real * W[0] + f_complex = F.T.dot(TI_COMPLEX) - M_complex * (W[1] + 1j * W[2]) + + dW_real = solve_lu(LU_real, f_real) + dW_complex = solve_lu(LU_complex, f_complex) + + dW[0] = dW_real + dW[1] = dW_complex.real + dW[2] = dW_complex.imag + + dW_norm = norm(dW / scale) + if dW_norm_old is not None: + rate = dW_norm / dW_norm_old + + if (rate is not None and (rate >= 1 or + rate ** (NEWTON_MAXITER - k) / (1 - rate) * dW_norm > tol)): + break + + W += dW + Z = T.dot(W) + + if (dW_norm == 0 or + rate is not None and rate / (1 - rate) * dW_norm < tol): + converged = True + break + + dW_norm_old = dW_norm + + return converged, k + 1, Z, rate + + +def predict_factor(h_abs, h_abs_old, error_norm, error_norm_old): + """Predict by which factor to increase/decrease the step size. + + The algorithm is described in [1]_. + + Parameters + ---------- + h_abs, h_abs_old : float + Current and previous values of the step size, `h_abs_old` can be None + (see Notes). + error_norm, error_norm_old : float + Current and previous values of the error norm, `error_norm_old` can + be None (see Notes). + + Returns + ------- + factor : float + Predicted factor. + + Notes + ----- + If `h_abs_old` and `error_norm_old` are both not None then a two-step + algorithm is used, otherwise a one-step algorithm is used. + + References + ---------- + .. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential + Equations II: Stiff and Differential-Algebraic Problems", Sec. IV.8. + """ + if error_norm_old is None or h_abs_old is None or error_norm == 0: + multiplier = 1 + else: + multiplier = h_abs / h_abs_old * (error_norm_old / error_norm) ** 0.25 + + with np.errstate(divide='ignore'): + factor = min(1, multiplier) * error_norm ** -0.25 + + return factor + + +class Radau(OdeSolver): + """Implicit Runge-Kutta method of Radau IIA family of order 5. + + The implementation follows [1]_. The error is controlled with a + third-order accurate embedded formula. A cubic polynomial which satisfies + the collocation conditions is used for the dense output. + + Parameters + ---------- + fun : callable + Right-hand side of the system: the time derivative of the state ``y`` + at time ``t``. The calling signature is ``fun(t, y)``, where ``t`` is a + scalar and ``y`` is an ndarray with ``len(y) = len(y0)``. ``fun`` must + return an array of the same shape as ``y``. See `vectorized` for more + information. + t0 : float + Initial time. + y0 : array_like, shape (n,) + Initial state. + t_bound : float + Boundary time - the integration won't continue beyond it. It also + determines the direction of the integration. + first_step : float or None, optional + Initial step size. Default is ``None`` which means that the algorithm + should choose. + max_step : float, optional + Maximum allowed step size. Default is np.inf, i.e., the step size is not + bounded and determined solely by the solver. + rtol, atol : float and array_like, optional + Relative and absolute tolerances. The solver keeps the local error + estimates less than ``atol + rtol * abs(y)``. HHere `rtol` controls a + relative accuracy (number of correct digits), while `atol` controls + absolute accuracy (number of correct decimal places). To achieve the + desired `rtol`, set `atol` to be smaller than the smallest value that + can be expected from ``rtol * abs(y)`` so that `rtol` dominates the + allowable error. If `atol` is larger than ``rtol * abs(y)`` the + number of correct digits is not guaranteed. Conversely, to achieve the + desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller + than `atol`. If components of y have different scales, it might be + beneficial to set different `atol` values for different components by + passing array_like with shape (n,) for `atol`. Default values are + 1e-3 for `rtol` and 1e-6 for `atol`. + jac : {None, array_like, sparse_matrix, callable}, optional + Jacobian matrix of the right-hand side of the system with respect to + y, required by this method. The Jacobian matrix has shape (n, n) and + its element (i, j) is equal to ``d f_i / d y_j``. + There are three ways to define the Jacobian: + + * If array_like or sparse_matrix, the Jacobian is assumed to + be constant. + * If callable, the Jacobian is assumed to depend on both + t and y; it will be called as ``jac(t, y)`` as necessary. + For the 'Radau' and 'BDF' methods, the return value might be a + sparse matrix. + * If None (default), the Jacobian will be approximated by + finite differences. + + It is generally recommended to provide the Jacobian rather than + relying on a finite-difference approximation. + jac_sparsity : {None, array_like, sparse matrix}, optional + Defines a sparsity structure of the Jacobian matrix for a + finite-difference approximation. Its shape must be (n, n). This argument + is ignored if `jac` is not `None`. If the Jacobian has only few non-zero + elements in *each* row, providing the sparsity structure will greatly + speed up the computations [2]_. A zero entry means that a corresponding + element in the Jacobian is always zero. If None (default), the Jacobian + is assumed to be dense. + vectorized : bool, optional + Whether `fun` can be called in a vectorized fashion. Default is False. + + If ``vectorized`` is False, `fun` will always be called with ``y`` of + shape ``(n,)``, where ``n = len(y0)``. + + If ``vectorized`` is True, `fun` may be called with ``y`` of shape + ``(n, k)``, where ``k`` is an integer. In this case, `fun` must behave + such that ``fun(t, y)[:, i] == fun(t, y[:, i])`` (i.e. each column of + the returned array is the time derivative of the state corresponding + with a column of ``y``). + + Setting ``vectorized=True`` allows for faster finite difference + approximation of the Jacobian by this method, but may result in slower + execution overall in some circumstances (e.g. small ``len(y0)``). + + Attributes + ---------- + n : int + Number of equations. + status : string + Current status of the solver: 'running', 'finished' or 'failed'. + t_bound : float + Boundary time. + direction : float + Integration direction: +1 or -1. + t : float + Current time. + y : ndarray + Current state. + t_old : float + Previous time. None if no steps were made yet. + step_size : float + Size of the last successful step. None if no steps were made yet. + nfev : int + Number of evaluations of the right-hand side. + njev : int + Number of evaluations of the Jacobian. + nlu : int + Number of LU decompositions. + + References + ---------- + .. [1] E. Hairer, G. Wanner, "Solving Ordinary Differential Equations II: + Stiff and Differential-Algebraic Problems", Sec. IV.8. + .. [2] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of + sparse Jacobian matrices", Journal of the Institute of Mathematics + and its Applications, 13, pp. 117-120, 1974. + """ + def __init__(self, fun, t0, y0, t_bound, max_step=np.inf, + rtol=1e-3, atol=1e-6, jac=None, jac_sparsity=None, + vectorized=False, first_step=None, **extraneous): + warn_extraneous(extraneous) + super().__init__(fun, t0, y0, t_bound, vectorized) + self.y_old = None + self.max_step = validate_max_step(max_step) + self.rtol, self.atol = validate_tol(rtol, atol, self.n) + self.f = self.fun(self.t, self.y) + # Select initial step assuming the same order which is used to control + # the error. + if first_step is None: + self.h_abs = select_initial_step( + self.fun, self.t, self.y, t_bound, max_step, self.f, self.direction, + 3, self.rtol, self.atol) + else: + self.h_abs = validate_first_step(first_step, t0, t_bound) + self.h_abs_old = None + self.error_norm_old = None + + self.newton_tol = max(10 * EPS / rtol, min(0.03, rtol ** 0.5)) + self.sol = None + + self.jac_factor = None + self.jac, self.J = self._validate_jac(jac, jac_sparsity) + if issparse(self.J): + def lu(A): + self.nlu += 1 + return splu(A) + + def solve_lu(LU, b): + return LU.solve(b) + + I = eye(self.n, format='csc') + else: + def lu(A): + self.nlu += 1 + return lu_factor(A, overwrite_a=True) + + def solve_lu(LU, b): + return lu_solve(LU, b, overwrite_b=True) + + I = np.identity(self.n) + + self.lu = lu + self.solve_lu = solve_lu + self.I = I + + self.current_jac = True + self.LU_real = None + self.LU_complex = None + self.Z = None + + def _validate_jac(self, jac, sparsity): + t0 = self.t + y0 = self.y + + if jac is None: + if sparsity is not None: + if issparse(sparsity): + sparsity = csc_matrix(sparsity) + groups = group_columns(sparsity) + sparsity = (sparsity, groups) + + def jac_wrapped(t, y, f): + self.njev += 1 + J, self.jac_factor = num_jac(self.fun_vectorized, t, y, f, + self.atol, self.jac_factor, + sparsity) + return J + J = jac_wrapped(t0, y0, self.f) + elif callable(jac): + J = jac(t0, y0) + self.njev = 1 + if issparse(J): + J = csc_matrix(J) + + def jac_wrapped(t, y, _=None): + self.njev += 1 + return csc_matrix(jac(t, y), dtype=float) + + else: + J = np.asarray(J, dtype=float) + + def jac_wrapped(t, y, _=None): + self.njev += 1 + return np.asarray(jac(t, y), dtype=float) + + if J.shape != (self.n, self.n): + raise ValueError("`jac` is expected to have shape {}, but " + "actually has {}." + .format((self.n, self.n), J.shape)) + else: + if issparse(jac): + J = csc_matrix(jac) + else: + J = np.asarray(jac, dtype=float) + + if J.shape != (self.n, self.n): + raise ValueError("`jac` is expected to have shape {}, but " + "actually has {}." + .format((self.n, self.n), J.shape)) + jac_wrapped = None + + return jac_wrapped, J + + def _step_impl(self): + t = self.t + y = self.y + f = self.f + + max_step = self.max_step + atol = self.atol + rtol = self.rtol + + min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t) + if self.h_abs > max_step: + h_abs = max_step + h_abs_old = None + error_norm_old = None + elif self.h_abs < min_step: + h_abs = min_step + h_abs_old = None + error_norm_old = None + else: + h_abs = self.h_abs + h_abs_old = self.h_abs_old + error_norm_old = self.error_norm_old + + J = self.J + LU_real = self.LU_real + LU_complex = self.LU_complex + + current_jac = self.current_jac + jac = self.jac + + rejected = False + step_accepted = False + message = None + while not step_accepted: + if h_abs < min_step: + return False, self.TOO_SMALL_STEP + + h = h_abs * self.direction + t_new = t + h + + if self.direction * (t_new - self.t_bound) > 0: + t_new = self.t_bound + + h = t_new - t + h_abs = np.abs(h) + + if self.sol is None: + Z0 = np.zeros((3, y.shape[0])) + else: + Z0 = self.sol(t + h * C).T - y + + scale = atol + np.abs(y) * rtol + + converged = False + while not converged: + if LU_real is None or LU_complex is None: + LU_real = self.lu(MU_REAL / h * self.I - J) + LU_complex = self.lu(MU_COMPLEX / h * self.I - J) + + converged, n_iter, Z, rate = solve_collocation_system( + self.fun, t, y, h, Z0, scale, self.newton_tol, + LU_real, LU_complex, self.solve_lu) + + if not converged: + if current_jac: + break + + J = self.jac(t, y, f) + current_jac = True + LU_real = None + LU_complex = None + + if not converged: + h_abs *= 0.5 + LU_real = None + LU_complex = None + continue + + y_new = y + Z[-1] + ZE = Z.T.dot(E) / h + error = self.solve_lu(LU_real, f + ZE) + scale = atol + np.maximum(np.abs(y), np.abs(y_new)) * rtol + error_norm = norm(error / scale) + safety = 0.9 * (2 * NEWTON_MAXITER + 1) / (2 * NEWTON_MAXITER + + n_iter) + + if rejected and error_norm > 1: + error = self.solve_lu(LU_real, self.fun(t, y + error) + ZE) + error_norm = norm(error / scale) + + if error_norm > 1: + factor = predict_factor(h_abs, h_abs_old, + error_norm, error_norm_old) + h_abs *= max(MIN_FACTOR, safety * factor) + + LU_real = None + LU_complex = None + rejected = True + else: + step_accepted = True + + recompute_jac = jac is not None and n_iter > 2 and rate > 1e-3 + + factor = predict_factor(h_abs, h_abs_old, error_norm, error_norm_old) + factor = min(MAX_FACTOR, safety * factor) + + if not recompute_jac and factor < 1.2: + factor = 1 + else: + LU_real = None + LU_complex = None + + f_new = self.fun(t_new, y_new) + if recompute_jac: + J = jac(t_new, y_new, f_new) + current_jac = True + elif jac is not None: + current_jac = False + + self.h_abs_old = self.h_abs + self.error_norm_old = error_norm + + self.h_abs = h_abs * factor + + self.y_old = y + + self.t = t_new + self.y = y_new + self.f = f_new + + self.Z = Z + + self.LU_real = LU_real + self.LU_complex = LU_complex + self.current_jac = current_jac + self.J = J + + self.t_old = t + self.sol = self._compute_dense_output() + + return step_accepted, message + + def _compute_dense_output(self): + Q = np.dot(self.Z.T, P) + return RadauDenseOutput(self.t_old, self.t, self.y_old, Q) + + def _dense_output_impl(self): + return self.sol + + +class RadauDenseOutput(DenseOutput): + def __init__(self, t_old, t, y_old, Q): + super().__init__(t_old, t) + self.h = t - t_old + self.Q = Q + self.order = Q.shape[1] - 1 + self.y_old = y_old + + def _call_impl(self, t): + x = (t - self.t_old) / self.h + if t.ndim == 0: + p = np.tile(x, self.order + 1) + p = np.cumprod(p) + else: + p = np.tile(x, (self.order + 1, 1)) + p = np.cumprod(p, axis=0) + # Here we don't multiply by h, not a mistake. + y = np.dot(self.Q, p) + if y.ndim == 2: + y += self.y_old[:, None] + else: + y += self.y_old + + return y diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/rk.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/rk.py new file mode 100644 index 0000000000000000000000000000000000000000..62a5347ffe91afc754e9b818d0b34c010d0c4d12 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/rk.py @@ -0,0 +1,601 @@ +import numpy as np +from .base import OdeSolver, DenseOutput +from .common import (validate_max_step, validate_tol, select_initial_step, + norm, warn_extraneous, validate_first_step) +from . import dop853_coefficients + +# Multiply steps computed from asymptotic behaviour of errors by this. +SAFETY = 0.9 + +MIN_FACTOR = 0.2 # Minimum allowed decrease in a step size. +MAX_FACTOR = 10 # Maximum allowed increase in a step size. + + +def rk_step(fun, t, y, f, h, A, B, C, K): + """Perform a single Runge-Kutta step. + + This function computes a prediction of an explicit Runge-Kutta method and + also estimates the error of a less accurate method. + + Notation for Butcher tableau is as in [1]_. + + Parameters + ---------- + fun : callable + Right-hand side of the system. + t : float + Current time. + y : ndarray, shape (n,) + Current state. + f : ndarray, shape (n,) + Current value of the derivative, i.e., ``fun(x, y)``. + h : float + Step to use. + A : ndarray, shape (n_stages, n_stages) + Coefficients for combining previous RK stages to compute the next + stage. For explicit methods the coefficients at and above the main + diagonal are zeros. + B : ndarray, shape (n_stages,) + Coefficients for combining RK stages for computing the final + prediction. + C : ndarray, shape (n_stages,) + Coefficients for incrementing time for consecutive RK stages. + The value for the first stage is always zero. + K : ndarray, shape (n_stages + 1, n) + Storage array for putting RK stages here. Stages are stored in rows. + The last row is a linear combination of the previous rows with + coefficients + + Returns + ------- + y_new : ndarray, shape (n,) + Solution at t + h computed with a higher accuracy. + f_new : ndarray, shape (n,) + Derivative ``fun(t + h, y_new)``. + + References + ---------- + .. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential + Equations I: Nonstiff Problems", Sec. II.4. + """ + K[0] = f + for s, (a, c) in enumerate(zip(A[1:], C[1:]), start=1): + dy = np.dot(K[:s].T, a[:s]) * h + K[s] = fun(t + c * h, y + dy) + + y_new = y + h * np.dot(K[:-1].T, B) + f_new = fun(t + h, y_new) + + K[-1] = f_new + + return y_new, f_new + + +class RungeKutta(OdeSolver): + """Base class for explicit Runge-Kutta methods.""" + C: np.ndarray = NotImplemented + A: np.ndarray = NotImplemented + B: np.ndarray = NotImplemented + E: np.ndarray = NotImplemented + P: np.ndarray = NotImplemented + order: int = NotImplemented + error_estimator_order: int = NotImplemented + n_stages: int = NotImplemented + + def __init__(self, fun, t0, y0, t_bound, max_step=np.inf, + rtol=1e-3, atol=1e-6, vectorized=False, + first_step=None, **extraneous): + warn_extraneous(extraneous) + super().__init__(fun, t0, y0, t_bound, vectorized, + support_complex=True) + self.y_old = None + self.max_step = validate_max_step(max_step) + self.rtol, self.atol = validate_tol(rtol, atol, self.n) + self.f = self.fun(self.t, self.y) + if first_step is None: + self.h_abs = select_initial_step( + self.fun, self.t, self.y, t_bound, max_step, self.f, self.direction, + self.error_estimator_order, self.rtol, self.atol) + else: + self.h_abs = validate_first_step(first_step, t0, t_bound) + self.K = np.empty((self.n_stages + 1, self.n), dtype=self.y.dtype) + self.error_exponent = -1 / (self.error_estimator_order + 1) + self.h_previous = None + + def _estimate_error(self, K, h): + return np.dot(K.T, self.E) * h + + def _estimate_error_norm(self, K, h, scale): + return norm(self._estimate_error(K, h) / scale) + + def _step_impl(self): + t = self.t + y = self.y + + max_step = self.max_step + rtol = self.rtol + atol = self.atol + + min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t) + + if self.h_abs > max_step: + h_abs = max_step + elif self.h_abs < min_step: + h_abs = min_step + else: + h_abs = self.h_abs + + step_accepted = False + step_rejected = False + + while not step_accepted: + if h_abs < min_step: + return False, self.TOO_SMALL_STEP + + h = h_abs * self.direction + t_new = t + h + + if self.direction * (t_new - self.t_bound) > 0: + t_new = self.t_bound + + h = t_new - t + h_abs = np.abs(h) + + y_new, f_new = rk_step(self.fun, t, y, self.f, h, self.A, + self.B, self.C, self.K) + scale = atol + np.maximum(np.abs(y), np.abs(y_new)) * rtol + error_norm = self._estimate_error_norm(self.K, h, scale) + + if error_norm < 1: + if error_norm == 0: + factor = MAX_FACTOR + else: + factor = min(MAX_FACTOR, + SAFETY * error_norm ** self.error_exponent) + + if step_rejected: + factor = min(1, factor) + + h_abs *= factor + + step_accepted = True + else: + h_abs *= max(MIN_FACTOR, + SAFETY * error_norm ** self.error_exponent) + step_rejected = True + + self.h_previous = h + self.y_old = y + + self.t = t_new + self.y = y_new + + self.h_abs = h_abs + self.f = f_new + + return True, None + + def _dense_output_impl(self): + Q = self.K.T.dot(self.P) + return RkDenseOutput(self.t_old, self.t, self.y_old, Q) + + +class RK23(RungeKutta): + """Explicit Runge-Kutta method of order 3(2). + + This uses the Bogacki-Shampine pair of formulas [1]_. The error is controlled + assuming accuracy of the second-order method, but steps are taken using the + third-order accurate formula (local extrapolation is done). A cubic Hermite + polynomial is used for the dense output. + + Can be applied in the complex domain. + + Parameters + ---------- + fun : callable + Right-hand side of the system: the time derivative of the state ``y`` + at time ``t``. The calling signature is ``fun(t, y)``, where ``t`` is a + scalar and ``y`` is an ndarray with ``len(y) = len(y0)``. ``fun`` must + return an array of the same shape as ``y``. See `vectorized` for more + information. + t0 : float + Initial time. + y0 : array_like, shape (n,) + Initial state. + t_bound : float + Boundary time - the integration won't continue beyond it. It also + determines the direction of the integration. + first_step : float or None, optional + Initial step size. Default is ``None`` which means that the algorithm + should choose. + max_step : float, optional + Maximum allowed step size. Default is np.inf, i.e., the step size is not + bounded and determined solely by the solver. + rtol, atol : float and array_like, optional + Relative and absolute tolerances. The solver keeps the local error + estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a + relative accuracy (number of correct digits), while `atol` controls + absolute accuracy (number of correct decimal places). To achieve the + desired `rtol`, set `atol` to be smaller than the smallest value that + can be expected from ``rtol * abs(y)`` so that `rtol` dominates the + allowable error. If `atol` is larger than ``rtol * abs(y)`` the + number of correct digits is not guaranteed. Conversely, to achieve the + desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller + than `atol`. If components of y have different scales, it might be + beneficial to set different `atol` values for different components by + passing array_like with shape (n,) for `atol`. Default values are + 1e-3 for `rtol` and 1e-6 for `atol`. + vectorized : bool, optional + Whether `fun` may be called in a vectorized fashion. False (default) + is recommended for this solver. + + If ``vectorized`` is False, `fun` will always be called with ``y`` of + shape ``(n,)``, where ``n = len(y0)``. + + If ``vectorized`` is True, `fun` may be called with ``y`` of shape + ``(n, k)``, where ``k`` is an integer. In this case, `fun` must behave + such that ``fun(t, y)[:, i] == fun(t, y[:, i])`` (i.e. each column of + the returned array is the time derivative of the state corresponding + with a column of ``y``). + + Setting ``vectorized=True`` allows for faster finite difference + approximation of the Jacobian by methods 'Radau' and 'BDF', but + will result in slower execution for this solver. + + Attributes + ---------- + n : int + Number of equations. + status : string + Current status of the solver: 'running', 'finished' or 'failed'. + t_bound : float + Boundary time. + direction : float + Integration direction: +1 or -1. + t : float + Current time. + y : ndarray + Current state. + t_old : float + Previous time. None if no steps were made yet. + step_size : float + Size of the last successful step. None if no steps were made yet. + nfev : int + Number evaluations of the system's right-hand side. + njev : int + Number of evaluations of the Jacobian. + Is always 0 for this solver as it does not use the Jacobian. + nlu : int + Number of LU decompositions. Is always 0 for this solver. + + References + ---------- + .. [1] P. Bogacki, L.F. Shampine, "A 3(2) Pair of Runge-Kutta Formulas", + Appl. Math. Lett. Vol. 2, No. 4. pp. 321-325, 1989. + """ + order = 3 + error_estimator_order = 2 + n_stages = 3 + C = np.array([0, 1/2, 3/4]) + A = np.array([ + [0, 0, 0], + [1/2, 0, 0], + [0, 3/4, 0] + ]) + B = np.array([2/9, 1/3, 4/9]) + E = np.array([5/72, -1/12, -1/9, 1/8]) + P = np.array([[1, -4 / 3, 5 / 9], + [0, 1, -2/3], + [0, 4/3, -8/9], + [0, -1, 1]]) + + +class RK45(RungeKutta): + """Explicit Runge-Kutta method of order 5(4). + + This uses the Dormand-Prince pair of formulas [1]_. The error is controlled + assuming accuracy of the fourth-order method accuracy, but steps are taken + using the fifth-order accurate formula (local extrapolation is done). + A quartic interpolation polynomial is used for the dense output [2]_. + + Can be applied in the complex domain. + + Parameters + ---------- + fun : callable + Right-hand side of the system. The calling signature is ``fun(t, y)``. + Here ``t`` is a scalar, and there are two options for the ndarray ``y``: + It can either have shape (n,); then ``fun`` must return array_like with + shape (n,). Alternatively it can have shape (n, k); then ``fun`` + must return an array_like with shape (n, k), i.e., each column + corresponds to a single column in ``y``. The choice between the two + options is determined by `vectorized` argument (see below). + t0 : float + Initial time. + y0 : array_like, shape (n,) + Initial state. + t_bound : float + Boundary time - the integration won't continue beyond it. It also + determines the direction of the integration. + first_step : float or None, optional + Initial step size. Default is ``None`` which means that the algorithm + should choose. + max_step : float, optional + Maximum allowed step size. Default is np.inf, i.e., the step size is not + bounded and determined solely by the solver. + rtol, atol : float and array_like, optional + Relative and absolute tolerances. The solver keeps the local error + estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a + relative accuracy (number of correct digits), while `atol` controls + absolute accuracy (number of correct decimal places). To achieve the + desired `rtol`, set `atol` to be smaller than the smallest value that + can be expected from ``rtol * abs(y)`` so that `rtol` dominates the + allowable error. If `atol` is larger than ``rtol * abs(y)`` the + number of correct digits is not guaranteed. Conversely, to achieve the + desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller + than `atol`. If components of y have different scales, it might be + beneficial to set different `atol` values for different components by + passing array_like with shape (n,) for `atol`. Default values are + 1e-3 for `rtol` and 1e-6 for `atol`. + vectorized : bool, optional + Whether `fun` is implemented in a vectorized fashion. Default is False. + + Attributes + ---------- + n : int + Number of equations. + status : string + Current status of the solver: 'running', 'finished' or 'failed'. + t_bound : float + Boundary time. + direction : float + Integration direction: +1 or -1. + t : float + Current time. + y : ndarray + Current state. + t_old : float + Previous time. None if no steps were made yet. + step_size : float + Size of the last successful step. None if no steps were made yet. + nfev : int + Number evaluations of the system's right-hand side. + njev : int + Number of evaluations of the Jacobian. + Is always 0 for this solver as it does not use the Jacobian. + nlu : int + Number of LU decompositions. Is always 0 for this solver. + + References + ---------- + .. [1] J. R. Dormand, P. J. Prince, "A family of embedded Runge-Kutta + formulae", Journal of Computational and Applied Mathematics, Vol. 6, + No. 1, pp. 19-26, 1980. + .. [2] L. W. Shampine, "Some Practical Runge-Kutta Formulas", Mathematics + of Computation,, Vol. 46, No. 173, pp. 135-150, 1986. + """ + order = 5 + error_estimator_order = 4 + n_stages = 6 + C = np.array([0, 1/5, 3/10, 4/5, 8/9, 1]) + A = np.array([ + [0, 0, 0, 0, 0], + [1/5, 0, 0, 0, 0], + [3/40, 9/40, 0, 0, 0], + [44/45, -56/15, 32/9, 0, 0], + [19372/6561, -25360/2187, 64448/6561, -212/729, 0], + [9017/3168, -355/33, 46732/5247, 49/176, -5103/18656] + ]) + B = np.array([35/384, 0, 500/1113, 125/192, -2187/6784, 11/84]) + E = np.array([-71/57600, 0, 71/16695, -71/1920, 17253/339200, -22/525, + 1/40]) + # Corresponds to the optimum value of c_6 from [2]_. + P = np.array([ + [1, -8048581381/2820520608, 8663915743/2820520608, + -12715105075/11282082432], + [0, 0, 0, 0], + [0, 131558114200/32700410799, -68118460800/10900136933, + 87487479700/32700410799], + [0, -1754552775/470086768, 14199869525/1410260304, + -10690763975/1880347072], + [0, 127303824393/49829197408, -318862633887/49829197408, + 701980252875 / 199316789632], + [0, -282668133/205662961, 2019193451/616988883, -1453857185/822651844], + [0, 40617522/29380423, -110615467/29380423, 69997945/29380423]]) + + +class DOP853(RungeKutta): + """Explicit Runge-Kutta method of order 8. + + This is a Python implementation of "DOP853" algorithm originally written + in Fortran [1]_, [2]_. Note that this is not a literal translation, but + the algorithmic core and coefficients are the same. + + Can be applied in the complex domain. + + Parameters + ---------- + fun : callable + Right-hand side of the system. The calling signature is ``fun(t, y)``. + Here, ``t`` is a scalar, and there are two options for the ndarray ``y``: + It can either have shape (n,); then ``fun`` must return array_like with + shape (n,). Alternatively it can have shape (n, k); then ``fun`` + must return an array_like with shape (n, k), i.e. each column + corresponds to a single column in ``y``. The choice between the two + options is determined by `vectorized` argument (see below). + t0 : float + Initial time. + y0 : array_like, shape (n,) + Initial state. + t_bound : float + Boundary time - the integration won't continue beyond it. It also + determines the direction of the integration. + first_step : float or None, optional + Initial step size. Default is ``None`` which means that the algorithm + should choose. + max_step : float, optional + Maximum allowed step size. Default is np.inf, i.e. the step size is not + bounded and determined solely by the solver. + rtol, atol : float and array_like, optional + Relative and absolute tolerances. The solver keeps the local error + estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a + relative accuracy (number of correct digits), while `atol` controls + absolute accuracy (number of correct decimal places). To achieve the + desired `rtol`, set `atol` to be smaller than the smallest value that + can be expected from ``rtol * abs(y)`` so that `rtol` dominates the + allowable error. If `atol` is larger than ``rtol * abs(y)`` the + number of correct digits is not guaranteed. Conversely, to achieve the + desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller + than `atol`. If components of y have different scales, it might be + beneficial to set different `atol` values for different components by + passing array_like with shape (n,) for `atol`. Default values are + 1e-3 for `rtol` and 1e-6 for `atol`. + vectorized : bool, optional + Whether `fun` is implemented in a vectorized fashion. Default is False. + + Attributes + ---------- + n : int + Number of equations. + status : string + Current status of the solver: 'running', 'finished' or 'failed'. + t_bound : float + Boundary time. + direction : float + Integration direction: +1 or -1. + t : float + Current time. + y : ndarray + Current state. + t_old : float + Previous time. None if no steps were made yet. + step_size : float + Size of the last successful step. None if no steps were made yet. + nfev : int + Number evaluations of the system's right-hand side. + njev : int + Number of evaluations of the Jacobian. Is always 0 for this solver + as it does not use the Jacobian. + nlu : int + Number of LU decompositions. Is always 0 for this solver. + + References + ---------- + .. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential + Equations I: Nonstiff Problems", Sec. II. + .. [2] `Page with original Fortran code of DOP853 + `_. + """ + n_stages = dop853_coefficients.N_STAGES + order = 8 + error_estimator_order = 7 + A = dop853_coefficients.A[:n_stages, :n_stages] + B = dop853_coefficients.B + C = dop853_coefficients.C[:n_stages] + E3 = dop853_coefficients.E3 + E5 = dop853_coefficients.E5 + D = dop853_coefficients.D + + A_EXTRA = dop853_coefficients.A[n_stages + 1:] + C_EXTRA = dop853_coefficients.C[n_stages + 1:] + + def __init__(self, fun, t0, y0, t_bound, max_step=np.inf, + rtol=1e-3, atol=1e-6, vectorized=False, + first_step=None, **extraneous): + super().__init__(fun, t0, y0, t_bound, max_step, rtol, atol, + vectorized, first_step, **extraneous) + self.K_extended = np.empty((dop853_coefficients.N_STAGES_EXTENDED, + self.n), dtype=self.y.dtype) + self.K = self.K_extended[:self.n_stages + 1] + + def _estimate_error(self, K, h): # Left for testing purposes. + err5 = np.dot(K.T, self.E5) + err3 = np.dot(K.T, self.E3) + denom = np.hypot(np.abs(err5), 0.1 * np.abs(err3)) + correction_factor = np.ones_like(err5) + mask = denom > 0 + correction_factor[mask] = np.abs(err5[mask]) / denom[mask] + return h * err5 * correction_factor + + def _estimate_error_norm(self, K, h, scale): + err5 = np.dot(K.T, self.E5) / scale + err3 = np.dot(K.T, self.E3) / scale + err5_norm_2 = np.linalg.norm(err5)**2 + err3_norm_2 = np.linalg.norm(err3)**2 + if err5_norm_2 == 0 and err3_norm_2 == 0: + return 0.0 + denom = err5_norm_2 + 0.01 * err3_norm_2 + return np.abs(h) * err5_norm_2 / np.sqrt(denom * len(scale)) + + def _dense_output_impl(self): + K = self.K_extended + h = self.h_previous + for s, (a, c) in enumerate(zip(self.A_EXTRA, self.C_EXTRA), + start=self.n_stages + 1): + dy = np.dot(K[:s].T, a[:s]) * h + K[s] = self.fun(self.t_old + c * h, self.y_old + dy) + + F = np.empty((dop853_coefficients.INTERPOLATOR_POWER, self.n), + dtype=self.y_old.dtype) + + f_old = K[0] + delta_y = self.y - self.y_old + + F[0] = delta_y + F[1] = h * f_old - delta_y + F[2] = 2 * delta_y - h * (self.f + f_old) + F[3:] = h * np.dot(self.D, K) + + return Dop853DenseOutput(self.t_old, self.t, self.y_old, F) + + +class RkDenseOutput(DenseOutput): + def __init__(self, t_old, t, y_old, Q): + super().__init__(t_old, t) + self.h = t - t_old + self.Q = Q + self.order = Q.shape[1] - 1 + self.y_old = y_old + + def _call_impl(self, t): + x = (t - self.t_old) / self.h + if t.ndim == 0: + p = np.tile(x, self.order + 1) + p = np.cumprod(p) + else: + p = np.tile(x, (self.order + 1, 1)) + p = np.cumprod(p, axis=0) + y = self.h * np.dot(self.Q, p) + if y.ndim == 2: + y += self.y_old[:, None] + else: + y += self.y_old + + return y + + +class Dop853DenseOutput(DenseOutput): + def __init__(self, t_old, t, y_old, F): + super().__init__(t_old, t) + self.h = t - t_old + self.F = F + self.y_old = y_old + + def _call_impl(self, t): + x = (t - self.t_old) / self.h + + if t.ndim == 0: + y = np.zeros_like(self.y_old) + else: + x = x[:, None] + y = np.zeros((len(x), len(self.y_old)), dtype=self.y_old.dtype) + + for i, f in enumerate(reversed(self.F)): + y += f + if i % 2 == 0: + y *= x + else: + y *= 1 - x + y += self.y_old + + return y.T diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/__init__.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/__pycache__/__init__.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d91a0a76cf12156e4b190d31d88e98d1ffdcd6e Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/__pycache__/test_ivp.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/__pycache__/test_ivp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2abe16abb9b42be678f9c2fcb1b1b566e4591b83 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/__pycache__/test_ivp.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/__pycache__/test_rk.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/__pycache__/test_rk.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ecee56ec3d5b3c11573574d907a47eb3f0dc6506 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/__pycache__/test_rk.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/test_ivp.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/test_ivp.py new file mode 100644 index 0000000000000000000000000000000000000000..9c050b3e5bd49e8f222958edf7802f11d2c9bdda --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/test_ivp.py @@ -0,0 +1,1244 @@ +from itertools import product +from numpy.testing import (assert_, assert_allclose, assert_array_less, + assert_equal, assert_no_warnings, suppress_warnings) +import pytest +from pytest import raises as assert_raises +import numpy as np +from scipy.optimize._numdiff import group_columns +from scipy.integrate import solve_ivp, RK23, RK45, DOP853, Radau, BDF, LSODA +from scipy.integrate import OdeSolution +from scipy.integrate._ivp.common import num_jac, select_initial_step +from scipy.integrate._ivp.base import ConstantDenseOutput +from scipy.sparse import coo_matrix, csc_matrix + + +def fun_zero(t, y): + return np.zeros_like(y) + + +def fun_linear(t, y): + return np.array([-y[0] - 5 * y[1], y[0] + y[1]]) + + +def jac_linear(): + return np.array([[-1, -5], [1, 1]]) + + +def sol_linear(t): + return np.vstack((-5 * np.sin(2 * t), + 2 * np.cos(2 * t) + np.sin(2 * t))) + + +def fun_rational(t, y): + return np.array([y[1] / t, + y[1] * (y[0] + 2 * y[1] - 1) / (t * (y[0] - 1))]) + + +def fun_rational_vectorized(t, y): + return np.vstack((y[1] / t, + y[1] * (y[0] + 2 * y[1] - 1) / (t * (y[0] - 1)))) + + +def jac_rational(t, y): + return np.array([ + [0, 1 / t], + [-2 * y[1] ** 2 / (t * (y[0] - 1) ** 2), + (y[0] + 4 * y[1] - 1) / (t * (y[0] - 1))] + ]) + + +def jac_rational_sparse(t, y): + return csc_matrix([ + [0, 1 / t], + [-2 * y[1] ** 2 / (t * (y[0] - 1) ** 2), + (y[0] + 4 * y[1] - 1) / (t * (y[0] - 1))] + ]) + + +def sol_rational(t): + return np.asarray((t / (t + 10), 10 * t / (t + 10) ** 2)) + + +def fun_medazko(t, y): + n = y.shape[0] // 2 + k = 100 + c = 4 + + phi = 2 if t <= 5 else 0 + y = np.hstack((phi, 0, y, y[-2])) + + d = 1 / n + j = np.arange(n) + 1 + alpha = 2 * (j * d - 1) ** 3 / c ** 2 + beta = (j * d - 1) ** 4 / c ** 2 + + j_2_p1 = 2 * j + 2 + j_2_m3 = 2 * j - 2 + j_2_m1 = 2 * j + j_2 = 2 * j + 1 + + f = np.empty(2 * n) + f[::2] = (alpha * (y[j_2_p1] - y[j_2_m3]) / (2 * d) + + beta * (y[j_2_m3] - 2 * y[j_2_m1] + y[j_2_p1]) / d ** 2 - + k * y[j_2_m1] * y[j_2]) + f[1::2] = -k * y[j_2] * y[j_2_m1] + + return f + + +def medazko_sparsity(n): + cols = [] + rows = [] + + i = np.arange(n) * 2 + + cols.append(i[1:]) + rows.append(i[1:] - 2) + + cols.append(i) + rows.append(i) + + cols.append(i) + rows.append(i + 1) + + cols.append(i[:-1]) + rows.append(i[:-1] + 2) + + i = np.arange(n) * 2 + 1 + + cols.append(i) + rows.append(i) + + cols.append(i) + rows.append(i - 1) + + cols = np.hstack(cols) + rows = np.hstack(rows) + + return coo_matrix((np.ones_like(cols), (cols, rows))) + + +def fun_complex(t, y): + return -y + + +def jac_complex(t, y): + return -np.eye(y.shape[0]) + + +def jac_complex_sparse(t, y): + return csc_matrix(jac_complex(t, y)) + + +def sol_complex(t): + y = (0.5 + 1j) * np.exp(-t) + return y.reshape((1, -1)) + + +def fun_event_dense_output_LSODA(t, y): + return y * (t - 2) + + +def jac_event_dense_output_LSODA(t, y): + return t - 2 + + +def sol_event_dense_output_LSODA(t): + return np.exp(t ** 2 / 2 - 2 * t + np.log(0.05) - 6) + + +def compute_error(y, y_true, rtol, atol): + e = (y - y_true) / (atol + rtol * np.abs(y_true)) + return np.linalg.norm(e, axis=0) / np.sqrt(e.shape[0]) + + +def test_integration(): + rtol = 1e-3 + atol = 1e-6 + y0 = [1/3, 2/9] + + for vectorized, method, t_span, jac in product( + [False, True], + ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA'], + [[5, 9], [5, 1]], + [None, jac_rational, jac_rational_sparse]): + + if vectorized: + fun = fun_rational_vectorized + else: + fun = fun_rational + + with suppress_warnings() as sup: + sup.filter(UserWarning, + "The following arguments have no effect for a chosen " + "solver: `jac`") + res = solve_ivp(fun, t_span, y0, rtol=rtol, + atol=atol, method=method, dense_output=True, + jac=jac, vectorized=vectorized) + assert_equal(res.t[0], t_span[0]) + assert_(res.t_events is None) + assert_(res.y_events is None) + assert_(res.success) + assert_equal(res.status, 0) + + if method == 'DOP853': + # DOP853 spends more functions evaluation because it doesn't + # have enough time to develop big enough step size. + assert_(res.nfev < 50) + else: + assert_(res.nfev < 40) + + if method in ['RK23', 'RK45', 'DOP853', 'LSODA']: + assert_equal(res.njev, 0) + assert_equal(res.nlu, 0) + else: + assert_(0 < res.njev < 3) + assert_(0 < res.nlu < 10) + + y_true = sol_rational(res.t) + e = compute_error(res.y, y_true, rtol, atol) + assert_(np.all(e < 5)) + + tc = np.linspace(*t_span) + yc_true = sol_rational(tc) + yc = res.sol(tc) + + e = compute_error(yc, yc_true, rtol, atol) + assert_(np.all(e < 5)) + + tc = (t_span[0] + t_span[-1]) / 2 + yc_true = sol_rational(tc) + yc = res.sol(tc) + + e = compute_error(yc, yc_true, rtol, atol) + assert_(np.all(e < 5)) + + assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15) + + +def test_integration_complex(): + rtol = 1e-3 + atol = 1e-6 + y0 = [0.5 + 1j] + t_span = [0, 1] + tc = np.linspace(t_span[0], t_span[1]) + for method, jac in product(['RK23', 'RK45', 'DOP853', 'BDF'], + [None, jac_complex, jac_complex_sparse]): + with suppress_warnings() as sup: + sup.filter(UserWarning, + "The following arguments have no effect for a chosen " + "solver: `jac`") + res = solve_ivp(fun_complex, t_span, y0, method=method, + dense_output=True, rtol=rtol, atol=atol, jac=jac) + + assert_equal(res.t[0], t_span[0]) + assert_(res.t_events is None) + assert_(res.y_events is None) + assert_(res.success) + assert_equal(res.status, 0) + + if method == 'DOP853': + assert res.nfev < 35 + else: + assert res.nfev < 25 + + if method == 'BDF': + assert_equal(res.njev, 1) + assert res.nlu < 6 + else: + assert res.njev == 0 + assert res.nlu == 0 + + y_true = sol_complex(res.t) + e = compute_error(res.y, y_true, rtol, atol) + assert np.all(e < 5) + + yc_true = sol_complex(tc) + yc = res.sol(tc) + e = compute_error(yc, yc_true, rtol, atol) + + assert np.all(e < 5) + + +@pytest.mark.fail_slow(2) +def test_integration_sparse_difference(): + n = 200 + t_span = [0, 20] + y0 = np.zeros(2 * n) + y0[1::2] = 1 + sparsity = medazko_sparsity(n) + + for method in ['BDF', 'Radau']: + res = solve_ivp(fun_medazko, t_span, y0, method=method, + jac_sparsity=sparsity) + + assert_equal(res.t[0], t_span[0]) + assert_(res.t_events is None) + assert_(res.y_events is None) + assert_(res.success) + assert_equal(res.status, 0) + + assert_allclose(res.y[78, -1], 0.233994e-3, rtol=1e-2) + assert_allclose(res.y[79, -1], 0, atol=1e-3) + assert_allclose(res.y[148, -1], 0.359561e-3, rtol=1e-2) + assert_allclose(res.y[149, -1], 0, atol=1e-3) + assert_allclose(res.y[198, -1], 0.117374129e-3, rtol=1e-2) + assert_allclose(res.y[199, -1], 0.6190807e-5, atol=1e-3) + assert_allclose(res.y[238, -1], 0, atol=1e-3) + assert_allclose(res.y[239, -1], 0.9999997, rtol=1e-2) + + +def test_integration_const_jac(): + rtol = 1e-3 + atol = 1e-6 + y0 = [0, 2] + t_span = [0, 2] + J = jac_linear() + J_sparse = csc_matrix(J) + + for method, jac in product(['Radau', 'BDF'], [J, J_sparse]): + res = solve_ivp(fun_linear, t_span, y0, rtol=rtol, atol=atol, + method=method, dense_output=True, jac=jac) + assert_equal(res.t[0], t_span[0]) + assert_(res.t_events is None) + assert_(res.y_events is None) + assert_(res.success) + assert_equal(res.status, 0) + + assert_(res.nfev < 100) + assert_equal(res.njev, 0) + assert_(0 < res.nlu < 15) + + y_true = sol_linear(res.t) + e = compute_error(res.y, y_true, rtol, atol) + assert_(np.all(e < 10)) + + tc = np.linspace(*t_span) + yc_true = sol_linear(tc) + yc = res.sol(tc) + + e = compute_error(yc, yc_true, rtol, atol) + assert_(np.all(e < 15)) + + assert_allclose(res.sol(res.t), res.y, rtol=1e-14, atol=1e-14) + + +@pytest.mark.slow +@pytest.mark.parametrize('method', ['Radau', 'BDF', 'LSODA']) +def test_integration_stiff(method): + rtol = 1e-6 + atol = 1e-6 + y0 = [1e4, 0, 0] + tspan = [0, 1e8] + + def fun_robertson(t, state): + x, y, z = state + return [ + -0.04 * x + 1e4 * y * z, + 0.04 * x - 1e4 * y * z - 3e7 * y * y, + 3e7 * y * y, + ] + + res = solve_ivp(fun_robertson, tspan, y0, rtol=rtol, + atol=atol, method=method) + + # If the stiff mode is not activated correctly, these numbers will be much bigger + assert res.nfev < 5000 + assert res.njev < 200 + + +def test_events(): + def event_rational_1(t, y): + return y[0] - y[1] ** 0.7 + + def event_rational_2(t, y): + return y[1] ** 0.6 - y[0] + + def event_rational_3(t, y): + return t - 7.4 + + event_rational_3.terminal = True + + for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']: + res = solve_ivp(fun_rational, [5, 8], [1/3, 2/9], method=method, + events=(event_rational_1, event_rational_2)) + assert_equal(res.status, 0) + assert_equal(res.t_events[0].size, 1) + assert_equal(res.t_events[1].size, 1) + assert_(5.3 < res.t_events[0][0] < 5.7) + assert_(7.3 < res.t_events[1][0] < 7.7) + + assert_equal(res.y_events[0].shape, (1, 2)) + assert_equal(res.y_events[1].shape, (1, 2)) + assert np.isclose( + event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0) + assert np.isclose( + event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0) + + event_rational_1.direction = 1 + event_rational_2.direction = 1 + res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method, + events=(event_rational_1, event_rational_2)) + assert_equal(res.status, 0) + assert_equal(res.t_events[0].size, 1) + assert_equal(res.t_events[1].size, 0) + assert_(5.3 < res.t_events[0][0] < 5.7) + assert_equal(res.y_events[0].shape, (1, 2)) + assert_equal(res.y_events[1].shape, (0,)) + assert np.isclose( + event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0) + + event_rational_1.direction = -1 + event_rational_2.direction = -1 + res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method, + events=(event_rational_1, event_rational_2)) + assert_equal(res.status, 0) + assert_equal(res.t_events[0].size, 0) + assert_equal(res.t_events[1].size, 1) + assert_(7.3 < res.t_events[1][0] < 7.7) + assert_equal(res.y_events[0].shape, (0,)) + assert_equal(res.y_events[1].shape, (1, 2)) + assert np.isclose( + event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0) + + event_rational_1.direction = 0 + event_rational_2.direction = 0 + + res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method, + events=(event_rational_1, event_rational_2, + event_rational_3), dense_output=True) + assert_equal(res.status, 1) + assert_equal(res.t_events[0].size, 1) + assert_equal(res.t_events[1].size, 0) + assert_equal(res.t_events[2].size, 1) + assert_(5.3 < res.t_events[0][0] < 5.7) + assert_(7.3 < res.t_events[2][0] < 7.5) + assert_equal(res.y_events[0].shape, (1, 2)) + assert_equal(res.y_events[1].shape, (0,)) + assert_equal(res.y_events[2].shape, (1, 2)) + assert np.isclose( + event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0) + assert np.isclose( + event_rational_3(res.t_events[2][0], res.y_events[2][0]), 0) + + res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method, + events=event_rational_1, dense_output=True) + assert_equal(res.status, 0) + assert_equal(res.t_events[0].size, 1) + assert_(5.3 < res.t_events[0][0] < 5.7) + + assert_equal(res.y_events[0].shape, (1, 2)) + assert np.isclose( + event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0) + + # Also test that termination by event doesn't break interpolants. + tc = np.linspace(res.t[0], res.t[-1]) + yc_true = sol_rational(tc) + yc = res.sol(tc) + e = compute_error(yc, yc_true, 1e-3, 1e-6) + assert_(np.all(e < 5)) + + # Test that the y_event matches solution + assert np.allclose(sol_rational(res.t_events[0][0]), res.y_events[0][0], + rtol=1e-3, atol=1e-6) + + # Test in backward direction. + event_rational_1.direction = 0 + event_rational_2.direction = 0 + for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']: + res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method, + events=(event_rational_1, event_rational_2)) + assert_equal(res.status, 0) + assert_equal(res.t_events[0].size, 1) + assert_equal(res.t_events[1].size, 1) + assert_(5.3 < res.t_events[0][0] < 5.7) + assert_(7.3 < res.t_events[1][0] < 7.7) + + assert_equal(res.y_events[0].shape, (1, 2)) + assert_equal(res.y_events[1].shape, (1, 2)) + assert np.isclose( + event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0) + assert np.isclose( + event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0) + + event_rational_1.direction = -1 + event_rational_2.direction = -1 + res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method, + events=(event_rational_1, event_rational_2)) + assert_equal(res.status, 0) + assert_equal(res.t_events[0].size, 1) + assert_equal(res.t_events[1].size, 0) + assert_(5.3 < res.t_events[0][0] < 5.7) + + assert_equal(res.y_events[0].shape, (1, 2)) + assert_equal(res.y_events[1].shape, (0,)) + assert np.isclose( + event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0) + + event_rational_1.direction = 1 + event_rational_2.direction = 1 + res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method, + events=(event_rational_1, event_rational_2)) + assert_equal(res.status, 0) + assert_equal(res.t_events[0].size, 0) + assert_equal(res.t_events[1].size, 1) + assert_(7.3 < res.t_events[1][0] < 7.7) + + assert_equal(res.y_events[0].shape, (0,)) + assert_equal(res.y_events[1].shape, (1, 2)) + assert np.isclose( + event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0) + + event_rational_1.direction = 0 + event_rational_2.direction = 0 + + res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method, + events=(event_rational_1, event_rational_2, + event_rational_3), dense_output=True) + assert_equal(res.status, 1) + assert_equal(res.t_events[0].size, 0) + assert_equal(res.t_events[1].size, 1) + assert_equal(res.t_events[2].size, 1) + assert_(7.3 < res.t_events[1][0] < 7.7) + assert_(7.3 < res.t_events[2][0] < 7.5) + + assert_equal(res.y_events[0].shape, (0,)) + assert_equal(res.y_events[1].shape, (1, 2)) + assert_equal(res.y_events[2].shape, (1, 2)) + assert np.isclose( + event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0) + assert np.isclose( + event_rational_3(res.t_events[2][0], res.y_events[2][0]), 0) + + # Also test that termination by event doesn't break interpolants. + tc = np.linspace(res.t[-1], res.t[0]) + yc_true = sol_rational(tc) + yc = res.sol(tc) + e = compute_error(yc, yc_true, 1e-3, 1e-6) + assert_(np.all(e < 5)) + + assert np.allclose(sol_rational(res.t_events[1][0]), res.y_events[1][0], + rtol=1e-3, atol=1e-6) + assert np.allclose(sol_rational(res.t_events[2][0]), res.y_events[2][0], + rtol=1e-3, atol=1e-6) + + +def _get_harmonic_oscillator(): + def f(t, y): + return [y[1], -y[0]] + + def event(t, y): + return y[0] + + return f, event + + +@pytest.mark.parametrize('n_events', [3, 4]) +def test_event_terminal_integer(n_events): + f, event = _get_harmonic_oscillator() + event.terminal = n_events + res = solve_ivp(f, (0, 100), [1, 0], events=event) + assert len(res.t_events[0]) == n_events + assert len(res.y_events[0]) == n_events + assert_allclose(res.y_events[0][:, 0], 0, atol=1e-14) + + +def test_event_terminal_iv(): + f, event = _get_harmonic_oscillator() + args = (f, (0, 100), [1, 0]) + + event.terminal = None + res = solve_ivp(*args, events=event) + event.terminal = 0 + ref = solve_ivp(*args, events=event) + assert_allclose(res.t_events, ref.t_events) + + message = "The `terminal` attribute..." + event.terminal = -1 + with pytest.raises(ValueError, match=message): + solve_ivp(*args, events=event) + event.terminal = 3.5 + with pytest.raises(ValueError, match=message): + solve_ivp(*args, events=event) + + +def test_max_step(): + rtol = 1e-3 + atol = 1e-6 + y0 = [1/3, 2/9] + for method in [RK23, RK45, DOP853, Radau, BDF, LSODA]: + for t_span in ([5, 9], [5, 1]): + res = solve_ivp(fun_rational, t_span, y0, rtol=rtol, + max_step=0.5, atol=atol, method=method, + dense_output=True) + assert_equal(res.t[0], t_span[0]) + assert_equal(res.t[-1], t_span[-1]) + assert_(np.all(np.abs(np.diff(res.t)) <= 0.5 + 1e-15)) + assert_(res.t_events is None) + assert_(res.success) + assert_equal(res.status, 0) + + y_true = sol_rational(res.t) + e = compute_error(res.y, y_true, rtol, atol) + assert_(np.all(e < 5)) + + tc = np.linspace(*t_span) + yc_true = sol_rational(tc) + yc = res.sol(tc) + + e = compute_error(yc, yc_true, rtol, atol) + assert_(np.all(e < 5)) + + assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15) + + assert_raises(ValueError, method, fun_rational, t_span[0], y0, + t_span[1], max_step=-1) + + if method is not LSODA: + solver = method(fun_rational, t_span[0], y0, t_span[1], + rtol=rtol, atol=atol, max_step=1e-20) + message = solver.step() + message = solver.step() # First step succeeds but second step fails. + assert_equal(solver.status, 'failed') + assert_("step size is less" in message) + assert_raises(RuntimeError, solver.step) + + +def test_first_step(): + rtol = 1e-3 + atol = 1e-6 + y0 = [1/3, 2/9] + first_step = 0.1 + for method in [RK23, RK45, DOP853, Radau, BDF, LSODA]: + for t_span in ([5, 9], [5, 1]): + res = solve_ivp(fun_rational, t_span, y0, rtol=rtol, + max_step=0.5, atol=atol, method=method, + dense_output=True, first_step=first_step) + + assert_equal(res.t[0], t_span[0]) + assert_equal(res.t[-1], t_span[-1]) + assert_allclose(first_step, np.abs(res.t[1] - 5)) + assert_(res.t_events is None) + assert_(res.success) + assert_equal(res.status, 0) + + y_true = sol_rational(res.t) + e = compute_error(res.y, y_true, rtol, atol) + assert_(np.all(e < 5)) + + tc = np.linspace(*t_span) + yc_true = sol_rational(tc) + yc = res.sol(tc) + + e = compute_error(yc, yc_true, rtol, atol) + assert_(np.all(e < 5)) + + assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15) + + assert_raises(ValueError, method, fun_rational, t_span[0], y0, + t_span[1], first_step=-1) + assert_raises(ValueError, method, fun_rational, t_span[0], y0, + t_span[1], first_step=5) + + +def test_t_eval(): + rtol = 1e-3 + atol = 1e-6 + y0 = [1/3, 2/9] + for t_span in ([5, 9], [5, 1]): + t_eval = np.linspace(t_span[0], t_span[1], 10) + res = solve_ivp(fun_rational, t_span, y0, rtol=rtol, atol=atol, + t_eval=t_eval) + assert_equal(res.t, t_eval) + assert_(res.t_events is None) + assert_(res.success) + assert_equal(res.status, 0) + + y_true = sol_rational(res.t) + e = compute_error(res.y, y_true, rtol, atol) + assert_(np.all(e < 5)) + + t_eval = [5, 5.01, 7, 8, 8.01, 9] + res = solve_ivp(fun_rational, [5, 9], y0, rtol=rtol, atol=atol, + t_eval=t_eval) + assert_equal(res.t, t_eval) + assert_(res.t_events is None) + assert_(res.success) + assert_equal(res.status, 0) + + y_true = sol_rational(res.t) + e = compute_error(res.y, y_true, rtol, atol) + assert_(np.all(e < 5)) + + t_eval = [5, 4.99, 3, 1.5, 1.1, 1.01, 1] + res = solve_ivp(fun_rational, [5, 1], y0, rtol=rtol, atol=atol, + t_eval=t_eval) + assert_equal(res.t, t_eval) + assert_(res.t_events is None) + assert_(res.success) + assert_equal(res.status, 0) + + t_eval = [5.01, 7, 8, 8.01] + res = solve_ivp(fun_rational, [5, 9], y0, rtol=rtol, atol=atol, + t_eval=t_eval) + assert_equal(res.t, t_eval) + assert_(res.t_events is None) + assert_(res.success) + assert_equal(res.status, 0) + + y_true = sol_rational(res.t) + e = compute_error(res.y, y_true, rtol, atol) + assert_(np.all(e < 5)) + + t_eval = [4.99, 3, 1.5, 1.1, 1.01] + res = solve_ivp(fun_rational, [5, 1], y0, rtol=rtol, atol=atol, + t_eval=t_eval) + assert_equal(res.t, t_eval) + assert_(res.t_events is None) + assert_(res.success) + assert_equal(res.status, 0) + + t_eval = [4, 6] + assert_raises(ValueError, solve_ivp, fun_rational, [5, 9], y0, + rtol=rtol, atol=atol, t_eval=t_eval) + + +def test_t_eval_dense_output(): + rtol = 1e-3 + atol = 1e-6 + y0 = [1/3, 2/9] + t_span = [5, 9] + t_eval = np.linspace(t_span[0], t_span[1], 10) + res = solve_ivp(fun_rational, t_span, y0, rtol=rtol, atol=atol, + t_eval=t_eval) + res_d = solve_ivp(fun_rational, t_span, y0, rtol=rtol, atol=atol, + t_eval=t_eval, dense_output=True) + assert_equal(res.t, t_eval) + assert_(res.t_events is None) + assert_(res.success) + assert_equal(res.status, 0) + + assert_equal(res.t, res_d.t) + assert_equal(res.y, res_d.y) + assert_(res_d.t_events is None) + assert_(res_d.success) + assert_equal(res_d.status, 0) + + # if t and y are equal only test values for one case + y_true = sol_rational(res.t) + e = compute_error(res.y, y_true, rtol, atol) + assert_(np.all(e < 5)) + + +def test_t_eval_early_event(): + def early_event(t, y): + return t - 7 + + early_event.terminal = True + + rtol = 1e-3 + atol = 1e-6 + y0 = [1/3, 2/9] + t_span = [5, 9] + t_eval = np.linspace(7.5, 9, 16) + for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']: + with suppress_warnings() as sup: + sup.filter(UserWarning, + "The following arguments have no effect for a chosen " + "solver: `jac`") + res = solve_ivp(fun_rational, t_span, y0, rtol=rtol, atol=atol, + method=method, t_eval=t_eval, events=early_event, + jac=jac_rational) + assert res.success + assert res.message == 'A termination event occurred.' + assert res.status == 1 + assert not res.t and not res.y + assert len(res.t_events) == 1 + assert res.t_events[0].size == 1 + assert res.t_events[0][0] == 7 + + +def test_event_dense_output_LSODA(): + def event_lsoda(t, y): + return y[0] - 2.02e-5 + + rtol = 1e-3 + atol = 1e-6 + y0 = [0.05] + t_span = [-2, 2] + first_step = 1e-3 + res = solve_ivp( + fun_event_dense_output_LSODA, + t_span, + y0, + method="LSODA", + dense_output=True, + events=event_lsoda, + first_step=first_step, + max_step=1, + rtol=rtol, + atol=atol, + jac=jac_event_dense_output_LSODA, + ) + + assert_equal(res.t[0], t_span[0]) + assert_equal(res.t[-1], t_span[-1]) + assert_allclose(first_step, np.abs(res.t[1] - t_span[0])) + assert res.success + assert_equal(res.status, 0) + + y_true = sol_event_dense_output_LSODA(res.t) + e = compute_error(res.y, y_true, rtol, atol) + assert_array_less(e, 5) + + tc = np.linspace(*t_span) + yc_true = sol_event_dense_output_LSODA(tc) + yc = res.sol(tc) + e = compute_error(yc, yc_true, rtol, atol) + assert_array_less(e, 5) + + assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15) + + +def test_no_integration(): + for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']: + sol = solve_ivp(lambda t, y: -y, [4, 4], [2, 3], + method=method, dense_output=True) + assert_equal(sol.sol(4), [2, 3]) + assert_equal(sol.sol([4, 5, 6]), [[2, 2, 2], [3, 3, 3]]) + + +def test_no_integration_class(): + for method in [RK23, RK45, DOP853, Radau, BDF, LSODA]: + solver = method(lambda t, y: -y, 0.0, [10.0, 0.0], 0.0) + solver.step() + assert_equal(solver.status, 'finished') + sol = solver.dense_output() + assert_equal(sol(0.0), [10.0, 0.0]) + assert_equal(sol([0, 1, 2]), [[10, 10, 10], [0, 0, 0]]) + + solver = method(lambda t, y: -y, 0.0, [], np.inf) + solver.step() + assert_equal(solver.status, 'finished') + sol = solver.dense_output() + assert_equal(sol(100.0), []) + assert_equal(sol([0, 1, 2]), np.empty((0, 3))) + + +def test_empty(): + def fun(t, y): + return np.zeros((0,)) + + y0 = np.zeros((0,)) + + for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']: + sol = assert_no_warnings(solve_ivp, fun, [0, 10], y0, + method=method, dense_output=True) + assert_equal(sol.sol(10), np.zeros((0,))) + assert_equal(sol.sol([1, 2, 3]), np.zeros((0, 3))) + + for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']: + sol = assert_no_warnings(solve_ivp, fun, [0, np.inf], y0, + method=method, dense_output=True) + assert_equal(sol.sol(10), np.zeros((0,))) + assert_equal(sol.sol([1, 2, 3]), np.zeros((0, 3))) + + +def test_ConstantDenseOutput(): + sol = ConstantDenseOutput(0, 1, np.array([1, 2])) + assert_allclose(sol(1.5), [1, 2]) + assert_allclose(sol([1, 1.5, 2]), [[1, 1, 1], [2, 2, 2]]) + + sol = ConstantDenseOutput(0, 1, np.array([])) + assert_allclose(sol(1.5), np.empty(0)) + assert_allclose(sol([1, 1.5, 2]), np.empty((0, 3))) + + +def test_classes(): + y0 = [1 / 3, 2 / 9] + for cls in [RK23, RK45, DOP853, Radau, BDF, LSODA]: + solver = cls(fun_rational, 5, y0, np.inf) + assert_equal(solver.n, 2) + assert_equal(solver.status, 'running') + assert_equal(solver.t_bound, np.inf) + assert_equal(solver.direction, 1) + assert_equal(solver.t, 5) + assert_equal(solver.y, y0) + assert_(solver.step_size is None) + if cls is not LSODA: + assert_(solver.nfev > 0) + assert_(solver.njev >= 0) + assert_equal(solver.nlu, 0) + else: + assert_equal(solver.nfev, 0) + assert_equal(solver.njev, 0) + assert_equal(solver.nlu, 0) + + assert_raises(RuntimeError, solver.dense_output) + + message = solver.step() + assert_equal(solver.status, 'running') + assert_equal(message, None) + assert_equal(solver.n, 2) + assert_equal(solver.t_bound, np.inf) + assert_equal(solver.direction, 1) + assert_(solver.t > 5) + assert_(not np.all(np.equal(solver.y, y0))) + assert_(solver.step_size > 0) + assert_(solver.nfev > 0) + assert_(solver.njev >= 0) + assert_(solver.nlu >= 0) + sol = solver.dense_output() + assert_allclose(sol(5), y0, rtol=1e-15, atol=0) + + +def test_OdeSolution(): + ts = np.array([0, 2, 5], dtype=float) + s1 = ConstantDenseOutput(ts[0], ts[1], np.array([-1])) + s2 = ConstantDenseOutput(ts[1], ts[2], np.array([1])) + + sol = OdeSolution(ts, [s1, s2]) + + assert_equal(sol(-1), [-1]) + assert_equal(sol(1), [-1]) + assert_equal(sol(2), [-1]) + assert_equal(sol(3), [1]) + assert_equal(sol(5), [1]) + assert_equal(sol(6), [1]) + + assert_equal(sol([0, 6, -2, 1.5, 4.5, 2.5, 5, 5.5, 2]), + np.array([[-1, 1, -1, -1, 1, 1, 1, 1, -1]])) + + ts = np.array([10, 4, -3]) + s1 = ConstantDenseOutput(ts[0], ts[1], np.array([-1])) + s2 = ConstantDenseOutput(ts[1], ts[2], np.array([1])) + + sol = OdeSolution(ts, [s1, s2]) + assert_equal(sol(11), [-1]) + assert_equal(sol(10), [-1]) + assert_equal(sol(5), [-1]) + assert_equal(sol(4), [-1]) + assert_equal(sol(0), [1]) + assert_equal(sol(-3), [1]) + assert_equal(sol(-4), [1]) + + assert_equal(sol([12, -5, 10, -3, 6, 1, 4]), + np.array([[-1, 1, -1, 1, -1, 1, -1]])) + + ts = np.array([1, 1]) + s = ConstantDenseOutput(1, 1, np.array([10])) + sol = OdeSolution(ts, [s]) + assert_equal(sol(0), [10]) + assert_equal(sol(1), [10]) + assert_equal(sol(2), [10]) + + assert_equal(sol([2, 1, 0]), np.array([[10, 10, 10]])) + + +def test_num_jac(): + def fun(t, y): + return np.vstack([ + -0.04 * y[0] + 1e4 * y[1] * y[2], + 0.04 * y[0] - 1e4 * y[1] * y[2] - 3e7 * y[1] ** 2, + 3e7 * y[1] ** 2 + ]) + + def jac(t, y): + return np.array([ + [-0.04, 1e4 * y[2], 1e4 * y[1]], + [0.04, -1e4 * y[2] - 6e7 * y[1], -1e4 * y[1]], + [0, 6e7 * y[1], 0] + ]) + + t = 1 + y = np.array([1, 0, 0]) + J_true = jac(t, y) + threshold = 1e-5 + f = fun(t, y).ravel() + + J_num, factor = num_jac(fun, t, y, f, threshold, None) + assert_allclose(J_num, J_true, rtol=1e-5, atol=1e-5) + + J_num, factor = num_jac(fun, t, y, f, threshold, factor) + assert_allclose(J_num, J_true, rtol=1e-5, atol=1e-5) + + +def test_num_jac_sparse(): + def fun(t, y): + e = y[1:]**3 - y[:-1]**2 + z = np.zeros(y.shape[1]) + return np.vstack((z, 3 * e)) + np.vstack((2 * e, z)) + + def structure(n): + A = np.zeros((n, n), dtype=int) + A[0, 0] = 1 + A[0, 1] = 1 + for i in range(1, n - 1): + A[i, i - 1: i + 2] = 1 + A[-1, -1] = 1 + A[-1, -2] = 1 + + return A + + np.random.seed(0) + n = 20 + y = np.random.randn(n) + A = structure(n) + groups = group_columns(A) + + f = fun(0, y[:, None]).ravel() + + # Compare dense and sparse results, assuming that dense implementation + # is correct (as it is straightforward). + J_num_sparse, factor_sparse = num_jac(fun, 0, y.ravel(), f, 1e-8, None, + sparsity=(A, groups)) + J_num_dense, factor_dense = num_jac(fun, 0, y.ravel(), f, 1e-8, None) + assert_allclose(J_num_dense, J_num_sparse.toarray(), + rtol=1e-12, atol=1e-14) + assert_allclose(factor_dense, factor_sparse, rtol=1e-12, atol=1e-14) + + # Take small factors to trigger their recomputing inside. + factor = np.random.uniform(0, 1e-12, size=n) + J_num_sparse, factor_sparse = num_jac(fun, 0, y.ravel(), f, 1e-8, factor, + sparsity=(A, groups)) + J_num_dense, factor_dense = num_jac(fun, 0, y.ravel(), f, 1e-8, factor) + + assert_allclose(J_num_dense, J_num_sparse.toarray(), + rtol=1e-12, atol=1e-14) + assert_allclose(factor_dense, factor_sparse, rtol=1e-12, atol=1e-14) + + +def test_args(): + + # sys3 is actually two decoupled systems. (x, y) form a + # linear oscillator, while z is a nonlinear first order + # system with equilibria at z=0 and z=1. If k > 0, z=1 + # is stable and z=0 is unstable. + + def sys3(t, w, omega, k, zfinal): + x, y, z = w + return [-omega*y, omega*x, k*z*(1 - z)] + + def sys3_jac(t, w, omega, k, zfinal): + x, y, z = w + J = np.array([[0, -omega, 0], + [omega, 0, 0], + [0, 0, k*(1 - 2*z)]]) + return J + + def sys3_x0decreasing(t, w, omega, k, zfinal): + x, y, z = w + return x + + def sys3_y0increasing(t, w, omega, k, zfinal): + x, y, z = w + return y + + def sys3_zfinal(t, w, omega, k, zfinal): + x, y, z = w + return z - zfinal + + # Set the event flags for the event functions. + sys3_x0decreasing.direction = -1 + sys3_y0increasing.direction = 1 + sys3_zfinal.terminal = True + + omega = 2 + k = 4 + + tfinal = 5 + zfinal = 0.99 + # Find z0 such that when z(0) = z0, z(tfinal) = zfinal. + # The condition z(tfinal) = zfinal is the terminal event. + z0 = np.exp(-k*tfinal)/((1 - zfinal)/zfinal + np.exp(-k*tfinal)) + + w0 = [0, -1, z0] + + # Provide the jac argument and use the Radau method to ensure that the use + # of the Jacobian function is exercised. + # If event handling is working, the solution will stop at tfinal, not tend. + tend = 2*tfinal + sol = solve_ivp(sys3, [0, tend], w0, + events=[sys3_x0decreasing, sys3_y0increasing, sys3_zfinal], + dense_output=True, args=(omega, k, zfinal), + method='Radau', jac=sys3_jac, + rtol=1e-10, atol=1e-13) + + # Check that we got the expected events at the expected times. + x0events_t = sol.t_events[0] + y0events_t = sol.t_events[1] + zfinalevents_t = sol.t_events[2] + assert_allclose(x0events_t, [0.5*np.pi, 1.5*np.pi]) + assert_allclose(y0events_t, [0.25*np.pi, 1.25*np.pi]) + assert_allclose(zfinalevents_t, [tfinal]) + + # Check that the solution agrees with the known exact solution. + t = np.linspace(0, zfinalevents_t[0], 250) + w = sol.sol(t) + assert_allclose(w[0], np.sin(omega*t), rtol=1e-9, atol=1e-12) + assert_allclose(w[1], -np.cos(omega*t), rtol=1e-9, atol=1e-12) + assert_allclose(w[2], 1/(((1 - z0)/z0)*np.exp(-k*t) + 1), + rtol=1e-9, atol=1e-12) + + # Check that the state variables have the expected values at the events. + x0events = sol.sol(x0events_t) + y0events = sol.sol(y0events_t) + zfinalevents = sol.sol(zfinalevents_t) + assert_allclose(x0events[0], np.zeros_like(x0events[0]), atol=5e-14) + assert_allclose(x0events[1], np.ones_like(x0events[1])) + assert_allclose(y0events[0], np.ones_like(y0events[0])) + assert_allclose(y0events[1], np.zeros_like(y0events[1]), atol=5e-14) + assert_allclose(zfinalevents[2], [zfinal]) + + +def test_array_rtol(): + # solve_ivp had a bug with array_like `rtol`; see gh-15482 + # check that it's fixed + def f(t, y): + return y[0], y[1] + + # no warning (or error) when `rtol` is array_like + sol = solve_ivp(f, (0, 1), [1., 1.], rtol=[1e-1, 1e-1]) + err1 = np.abs(np.linalg.norm(sol.y[:, -1] - np.exp(1))) + + # warning when an element of `rtol` is too small + with pytest.warns(UserWarning, match="At least one element..."): + sol = solve_ivp(f, (0, 1), [1., 1.], rtol=[1e-1, 1e-16]) + err2 = np.abs(np.linalg.norm(sol.y[:, -1] - np.exp(1))) + + # tighter rtol improves the error + assert err2 < err1 + +@pytest.mark.parametrize('method', ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']) +def test_integration_zero_rhs(method): + result = solve_ivp(fun_zero, [0, 10], np.ones(3), method=method) + assert_(result.success) + assert_equal(result.status, 0) + assert_allclose(result.y, 1.0, rtol=1e-15) + + +def test_args_single_value(): + def fun_with_arg(t, y, a): + return a*y + + message = "Supplied 'args' cannot be unpacked." + with pytest.raises(TypeError, match=message): + solve_ivp(fun_with_arg, (0, 0.1), [1], args=-1) + + sol = solve_ivp(fun_with_arg, (0, 0.1), [1], args=(-1,)) + assert_allclose(sol.y[0, -1], np.exp(-0.1)) + + +@pytest.mark.parametrize("f0_fill", [np.nan, np.inf]) +def test_initial_state_finiteness(f0_fill): + # regression test for gh-17846 + msg = "All components of the initial state `y0` must be finite." + with pytest.raises(ValueError, match=msg): + solve_ivp(fun_zero, [0, 10], np.full(3, f0_fill)) + + +@pytest.mark.parametrize('method', ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF']) +def test_zero_interval(method): + # Case where upper and lower limits of integration are the same + # Result of integration should match initial state. + # f[y(t)] = 2y(t) + def f(t, y): + return 2 * y + res = solve_ivp(f, (0.0, 0.0), np.array([1.0]), method=method) + assert res.success + assert_allclose(res.y[0, -1], 1.0) + + +@pytest.mark.parametrize('method', ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF']) +def test_tbound_respected_small_interval(method): + """Regression test for gh-17341""" + SMALL = 1e-4 + + # f[y(t)] = 2y(t) on t in [0,SMALL] + # undefined otherwise + def f(t, y): + if t > SMALL: + raise ValueError("Function was evaluated outside interval") + return 2 * y + res = solve_ivp(f, (0.0, SMALL), np.array([1]), method=method) + assert res.success + + +@pytest.mark.parametrize('method', ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF']) +def test_tbound_respected_larger_interval(method): + """Regression test for gh-8848""" + def V(r): + return -11/r + 10 * r / (0.05 + r**2) + + def func(t, p): + if t < -17 or t > 2: + raise ValueError("Function was evaluated outside interval") + P = p[0] + Q = p[1] + r = np.exp(t) + dPdr = r * Q + dQdr = -2.0 * r * ((-0.2 - V(r)) * P + 1 / r * Q) + return np.array([dPdr, dQdr]) + + result = solve_ivp(func, + (-17, 2), + y0=np.array([1, -11]), + max_step=0.03, + vectorized=False, + t_eval=None, + atol=1e-8, + rtol=1e-5) + assert result.success + + +@pytest.mark.parametrize('method', ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF']) +def test_tbound_respected_oscillator(method): + "Regression test for gh-9198" + def reactions_func(t, y): + if (t > 205): + raise ValueError("Called outside interval") + yprime = np.array([1.73307544e-02, + 6.49376470e-06, + 0.00000000e+00, + 0.00000000e+00]) + return yprime + + def run_sim2(t_end, n_timepoints=10, shortest_delay_line=10000000): + init_state = np.array([134.08298555, 138.82348612, 100., 0.]) + t0 = 100.0 + t1 = 200.0 + return solve_ivp(reactions_func, + (t0, t1), + init_state.copy(), + dense_output=True, + max_step=t1 - t0) + result = run_sim2(1000, 100, 100) + assert result.success + + +def test_inital_maxstep(): + """Verify that select_inital_step respects max_step""" + rtol = 1e-3 + atol = 1e-6 + y0 = np.array([1/3, 2/9]) + for (t0, t_bound) in ((5, 9), (5, 1)): + for method_order in [RK23.error_estimator_order, + RK45.error_estimator_order, + DOP853.error_estimator_order, + 3, #RADAU + 1 #BDF + ]: + step_no_max = select_initial_step(fun_rational, t0, y0, t_bound, + np.inf, + fun_rational(t0,y0), + np.sign(t_bound - t0), + method_order, + rtol, atol) + max_step = step_no_max/2 + step_with_max = select_initial_step(fun_rational, t0, y0, t_bound, + max_step, + fun_rational(t0, y0), + np.sign(t_bound - t0), + method_order, + rtol, atol) + assert_equal(max_step, step_with_max) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/test_rk.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/test_rk.py new file mode 100644 index 0000000000000000000000000000000000000000..33cb27d0323d037c0937ab94b4de8f63b46be3d7 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/test_rk.py @@ -0,0 +1,37 @@ +import pytest +from numpy.testing import assert_allclose, assert_ +import numpy as np +from scipy.integrate import RK23, RK45, DOP853 +from scipy.integrate._ivp import dop853_coefficients + + +@pytest.mark.parametrize("solver", [RK23, RK45, DOP853]) +def test_coefficient_properties(solver): + assert_allclose(np.sum(solver.B), 1, rtol=1e-15) + assert_allclose(np.sum(solver.A, axis=1), solver.C, rtol=1e-14) + + +def test_coefficient_properties_dop853(): + assert_allclose(np.sum(dop853_coefficients.B), 1, rtol=1e-15) + assert_allclose(np.sum(dop853_coefficients.A, axis=1), + dop853_coefficients.C, + rtol=1e-14) + + +@pytest.mark.parametrize("solver_class", [RK23, RK45, DOP853]) +def test_error_estimation(solver_class): + step = 0.2 + solver = solver_class(lambda t, y: y, 0, [1], 1, first_step=step) + solver.step() + error_estimate = solver._estimate_error(solver.K, step) + error = solver.y - np.exp([step]) + assert_(np.abs(error) < np.abs(error_estimate)) + + +@pytest.mark.parametrize("solver_class", [RK23, RK45, DOP853]) +def test_error_estimation_complex(solver_class): + h = 0.2 + solver = solver_class(lambda t, y: 1j * y, 0, [1j], 1, first_step=h) + solver.step() + err_norm = solver._estimate_error_norm(solver.K, h, scale=[1]) + assert np.isrealobj(err_norm) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ode.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ode.py new file mode 100644 index 0000000000000000000000000000000000000000..794a4dc6372164db4c4f1540649b20fd945cd81c --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_ode.py @@ -0,0 +1,1376 @@ +# Authors: Pearu Peterson, Pauli Virtanen, John Travers +""" +First-order ODE integrators. + +User-friendly interface to various numerical integrators for solving a +system of first order ODEs with prescribed initial conditions:: + + d y(t)[i] + --------- = f(t,y(t))[i], + d t + + y(t=0)[i] = y0[i], + +where:: + + i = 0, ..., len(y0) - 1 + +class ode +--------- + +A generic interface class to numeric integrators. It has the following +methods:: + + integrator = ode(f, jac=None) + integrator = integrator.set_integrator(name, **params) + integrator = integrator.set_initial_value(y0, t0=0.0) + integrator = integrator.set_f_params(*args) + integrator = integrator.set_jac_params(*args) + y1 = integrator.integrate(t1, step=False, relax=False) + flag = integrator.successful() + +class complex_ode +----------------- + +This class has the same generic interface as ode, except it can handle complex +f, y and Jacobians by transparently translating them into the equivalent +real-valued system. It supports the real-valued solvers (i.e., not zvode) and is +an alternative to ode with the zvode solver, sometimes performing better. +""" +# XXX: Integrators must have: +# =========================== +# cvode - C version of vode and vodpk with many improvements. +# Get it from http://www.netlib.org/ode/cvode.tar.gz. +# To wrap cvode to Python, one must write the extension module by +# hand. Its interface is too much 'advanced C' that using f2py +# would be too complicated (or impossible). +# +# How to define a new integrator: +# =============================== +# +# class myodeint(IntegratorBase): +# +# runner = or None +# +# def __init__(self,...): # required +# +# +# def reset(self,n,has_jac): # optional +# # n - the size of the problem (number of equations) +# # has_jac - whether user has supplied its own routine for Jacobian +# +# +# def run(self,f,jac,y0,t0,t1,f_params,jac_params): # required +# # this method is called to integrate from t=t0 to t=t1 +# # with initial condition y0. f and jac are user-supplied functions +# # that define the problem. f_params,jac_params are additional +# # arguments +# # to these functions. +# +# if : +# self.success = 0 +# return t1,y1 +# +# # In addition, one can define step() and run_relax() methods (they +# # take the same arguments as run()) if the integrator can support +# # these features (see IntegratorBase doc strings). +# +# if myodeint.runner: +# IntegratorBase.integrator_classes.append(myodeint) + +__all__ = ['ode', 'complex_ode'] + +import re +import warnings + +from numpy import asarray, array, zeros, isscalar, real, imag, vstack + +from . import _vode +from . import _dop +from . import _lsoda + + +_dop_int_dtype = _dop.types.intvar.dtype +_vode_int_dtype = _vode.types.intvar.dtype +_lsoda_int_dtype = _lsoda.types.intvar.dtype + + +# ------------------------------------------------------------------------------ +# User interface +# ------------------------------------------------------------------------------ + + +class ode: + """ + A generic interface class to numeric integrators. + + Solve an equation system :math:`y'(t) = f(t,y)` with (optional) ``jac = df/dy``. + + *Note*: The first two arguments of ``f(t, y, ...)`` are in the + opposite order of the arguments in the system definition function used + by `scipy.integrate.odeint`. + + Parameters + ---------- + f : callable ``f(t, y, *f_args)`` + Right-hand side of the differential equation. t is a scalar, + ``y.shape == (n,)``. + ``f_args`` is set by calling ``set_f_params(*args)``. + `f` should return a scalar, array or list (not a tuple). + jac : callable ``jac(t, y, *jac_args)``, optional + Jacobian of the right-hand side, ``jac[i,j] = d f[i] / d y[j]``. + ``jac_args`` is set by calling ``set_jac_params(*args)``. + + Attributes + ---------- + t : float + Current time. + y : ndarray + Current variable values. + + See also + -------- + odeint : an integrator with a simpler interface based on lsoda from ODEPACK + quad : for finding the area under a curve + + Notes + ----- + Available integrators are listed below. They can be selected using + the `set_integrator` method. + + "vode" + + Real-valued Variable-coefficient Ordinary Differential Equation + solver, with fixed-leading-coefficient implementation. It provides + implicit Adams method (for non-stiff problems) and a method based on + backward differentiation formulas (BDF) (for stiff problems). + + Source: http://www.netlib.org/ode/vode.f + + .. warning:: + + This integrator is not re-entrant. You cannot have two `ode` + instances using the "vode" integrator at the same time. + + This integrator accepts the following parameters in `set_integrator` + method of the `ode` class: + + - atol : float or sequence + absolute tolerance for solution + - rtol : float or sequence + relative tolerance for solution + - lband : None or int + - uband : None or int + Jacobian band width, jac[i,j] != 0 for i-lband <= j <= i+uband. + Setting these requires your jac routine to return the jacobian + in packed format, jac_packed[i-j+uband, j] = jac[i,j]. The + dimension of the matrix must be (lband+uband+1, len(y)). + - method: 'adams' or 'bdf' + Which solver to use, Adams (non-stiff) or BDF (stiff) + - with_jacobian : bool + This option is only considered when the user has not supplied a + Jacobian function and has not indicated (by setting either band) + that the Jacobian is banded. In this case, `with_jacobian` specifies + whether the iteration method of the ODE solver's correction step is + chord iteration with an internally generated full Jacobian or + functional iteration with no Jacobian. + - nsteps : int + Maximum number of (internally defined) steps allowed during one + call to the solver. + - first_step : float + - min_step : float + - max_step : float + Limits for the step sizes used by the integrator. + - order : int + Maximum order used by the integrator, + order <= 12 for Adams, <= 5 for BDF. + + "zvode" + + Complex-valued Variable-coefficient Ordinary Differential Equation + solver, with fixed-leading-coefficient implementation. It provides + implicit Adams method (for non-stiff problems) and a method based on + backward differentiation formulas (BDF) (for stiff problems). + + Source: http://www.netlib.org/ode/zvode.f + + .. warning:: + + This integrator is not re-entrant. You cannot have two `ode` + instances using the "zvode" integrator at the same time. + + This integrator accepts the same parameters in `set_integrator` + as the "vode" solver. + + .. note:: + + When using ZVODE for a stiff system, it should only be used for + the case in which the function f is analytic, that is, when each f(i) + is an analytic function of each y(j). Analyticity means that the + partial derivative df(i)/dy(j) is a unique complex number, and this + fact is critical in the way ZVODE solves the dense or banded linear + systems that arise in the stiff case. For a complex stiff ODE system + in which f is not analytic, ZVODE is likely to have convergence + failures, and for this problem one should instead use DVODE on the + equivalent real system (in the real and imaginary parts of y). + + "lsoda" + + Real-valued Variable-coefficient Ordinary Differential Equation + solver, with fixed-leading-coefficient implementation. It provides + automatic method switching between implicit Adams method (for non-stiff + problems) and a method based on backward differentiation formulas (BDF) + (for stiff problems). + + Source: http://www.netlib.org/odepack + + .. warning:: + + This integrator is not re-entrant. You cannot have two `ode` + instances using the "lsoda" integrator at the same time. + + This integrator accepts the following parameters in `set_integrator` + method of the `ode` class: + + - atol : float or sequence + absolute tolerance for solution + - rtol : float or sequence + relative tolerance for solution + - lband : None or int + - uband : None or int + Jacobian band width, jac[i,j] != 0 for i-lband <= j <= i+uband. + Setting these requires your jac routine to return the jacobian + in packed format, jac_packed[i-j+uband, j] = jac[i,j]. + - with_jacobian : bool + *Not used.* + - nsteps : int + Maximum number of (internally defined) steps allowed during one + call to the solver. + - first_step : float + - min_step : float + - max_step : float + Limits for the step sizes used by the integrator. + - max_order_ns : int + Maximum order used in the nonstiff case (default 12). + - max_order_s : int + Maximum order used in the stiff case (default 5). + - max_hnil : int + Maximum number of messages reporting too small step size (t + h = t) + (default 0) + - ixpr : int + Whether to generate extra printing at method switches (default False). + + "dopri5" + + This is an explicit runge-kutta method of order (4)5 due to Dormand & + Prince (with stepsize control and dense output). + + Authors: + + E. Hairer and G. Wanner + Universite de Geneve, Dept. de Mathematiques + CH-1211 Geneve 24, Switzerland + e-mail: ernst.hairer@math.unige.ch, gerhard.wanner@math.unige.ch + + This code is described in [HNW93]_. + + This integrator accepts the following parameters in set_integrator() + method of the ode class: + + - atol : float or sequence + absolute tolerance for solution + - rtol : float or sequence + relative tolerance for solution + - nsteps : int + Maximum number of (internally defined) steps allowed during one + call to the solver. + - first_step : float + - max_step : float + - safety : float + Safety factor on new step selection (default 0.9) + - ifactor : float + - dfactor : float + Maximum factor to increase/decrease step size by in one step + - beta : float + Beta parameter for stabilised step size control. + - verbosity : int + Switch for printing messages (< 0 for no messages). + + "dop853" + + This is an explicit runge-kutta method of order 8(5,3) due to Dormand + & Prince (with stepsize control and dense output). + + Options and references the same as "dopri5". + + Examples + -------- + + A problem to integrate and the corresponding jacobian: + + >>> from scipy.integrate import ode + >>> + >>> y0, t0 = [1.0j, 2.0], 0 + >>> + >>> def f(t, y, arg1): + ... return [1j*arg1*y[0] + y[1], -arg1*y[1]**2] + >>> def jac(t, y, arg1): + ... return [[1j*arg1, 1], [0, -arg1*2*y[1]]] + + The integration: + + >>> r = ode(f, jac).set_integrator('zvode', method='bdf') + >>> r.set_initial_value(y0, t0).set_f_params(2.0).set_jac_params(2.0) + >>> t1 = 10 + >>> dt = 1 + >>> while r.successful() and r.t < t1: + ... print(r.t+dt, r.integrate(r.t+dt)) + 1 [-0.71038232+0.23749653j 0.40000271+0.j ] + 2.0 [0.19098503-0.52359246j 0.22222356+0.j ] + 3.0 [0.47153208+0.52701229j 0.15384681+0.j ] + 4.0 [-0.61905937+0.30726255j 0.11764744+0.j ] + 5.0 [0.02340997-0.61418799j 0.09523835+0.j ] + 6.0 [0.58643071+0.339819j 0.08000018+0.j ] + 7.0 [-0.52070105+0.44525141j 0.06896565+0.j ] + 8.0 [-0.15986733-0.61234476j 0.06060616+0.j ] + 9.0 [0.64850462+0.15048982j 0.05405414+0.j ] + 10.0 [-0.38404699+0.56382299j 0.04878055+0.j ] + + References + ---------- + .. [HNW93] E. Hairer, S.P. Norsett and G. Wanner, Solving Ordinary + Differential Equations i. Nonstiff Problems. 2nd edition. + Springer Series in Computational Mathematics, + Springer-Verlag (1993) + + """ + + def __init__(self, f, jac=None): + self.stiff = 0 + self.f = f + self.jac = jac + self.f_params = () + self.jac_params = () + self._y = [] + + @property + def y(self): + return self._y + + def set_initial_value(self, y, t=0.0): + """Set initial conditions y(t) = y.""" + if isscalar(y): + y = [y] + n_prev = len(self._y) + if not n_prev: + self.set_integrator('') # find first available integrator + self._y = asarray(y, self._integrator.scalar) + self.t = t + self._integrator.reset(len(self._y), self.jac is not None) + return self + + def set_integrator(self, name, **integrator_params): + """ + Set integrator by name. + + Parameters + ---------- + name : str + Name of the integrator. + **integrator_params + Additional parameters for the integrator. + """ + integrator = find_integrator(name) + if integrator is None: + # FIXME: this really should be raise an exception. Will that break + # any code? + message = f'No integrator name match with {name!r} or is not available.' + warnings.warn(message, stacklevel=2) + else: + self._integrator = integrator(**integrator_params) + if not len(self._y): + self.t = 0.0 + self._y = array([0.0], self._integrator.scalar) + self._integrator.reset(len(self._y), self.jac is not None) + return self + + def integrate(self, t, step=False, relax=False): + """Find y=y(t), set y as an initial condition, and return y. + + Parameters + ---------- + t : float + The endpoint of the integration step. + step : bool + If True, and if the integrator supports the step method, + then perform a single integration step and return. + This parameter is provided in order to expose internals of + the implementation, and should not be changed from its default + value in most cases. + relax : bool + If True and if the integrator supports the run_relax method, + then integrate until t_1 >= t and return. ``relax`` is not + referenced if ``step=True``. + This parameter is provided in order to expose internals of + the implementation, and should not be changed from its default + value in most cases. + + Returns + ------- + y : float + The integrated value at t + """ + if step and self._integrator.supports_step: + mth = self._integrator.step + elif relax and self._integrator.supports_run_relax: + mth = self._integrator.run_relax + else: + mth = self._integrator.run + + try: + self._y, self.t = mth(self.f, self.jac or (lambda: None), + self._y, self.t, t, + self.f_params, self.jac_params) + except SystemError as e: + # f2py issue with tuple returns, see ticket 1187. + raise ValueError( + 'Function to integrate must not return a tuple.' + ) from e + + return self._y + + def successful(self): + """Check if integration was successful.""" + try: + self._integrator + except AttributeError: + self.set_integrator('') + return self._integrator.success == 1 + + def get_return_code(self): + """Extracts the return code for the integration to enable better control + if the integration fails. + + In general, a return code > 0 implies success, while a return code < 0 + implies failure. + + Notes + ----- + This section describes possible return codes and their meaning, for available + integrators that can be selected by `set_integrator` method. + + "vode" + + =========== ======= + Return Code Message + =========== ======= + 2 Integration successful. + -1 Excess work done on this call. (Perhaps wrong MF.) + -2 Excess accuracy requested. (Tolerances too small.) + -3 Illegal input detected. (See printed message.) + -4 Repeated error test failures. (Check all input.) + -5 Repeated convergence failures. (Perhaps bad Jacobian + supplied or wrong choice of MF or tolerances.) + -6 Error weight became zero during problem. (Solution + component i vanished, and ATOL or ATOL(i) = 0.) + =========== ======= + + "zvode" + + =========== ======= + Return Code Message + =========== ======= + 2 Integration successful. + -1 Excess work done on this call. (Perhaps wrong MF.) + -2 Excess accuracy requested. (Tolerances too small.) + -3 Illegal input detected. (See printed message.) + -4 Repeated error test failures. (Check all input.) + -5 Repeated convergence failures. (Perhaps bad Jacobian + supplied or wrong choice of MF or tolerances.) + -6 Error weight became zero during problem. (Solution + component i vanished, and ATOL or ATOL(i) = 0.) + =========== ======= + + "dopri5" + + =========== ======= + Return Code Message + =========== ======= + 1 Integration successful. + 2 Integration successful (interrupted by solout). + -1 Input is not consistent. + -2 Larger nsteps is needed. + -3 Step size becomes too small. + -4 Problem is probably stiff (interrupted). + =========== ======= + + "dop853" + + =========== ======= + Return Code Message + =========== ======= + 1 Integration successful. + 2 Integration successful (interrupted by solout). + -1 Input is not consistent. + -2 Larger nsteps is needed. + -3 Step size becomes too small. + -4 Problem is probably stiff (interrupted). + =========== ======= + + "lsoda" + + =========== ======= + Return Code Message + =========== ======= + 2 Integration successful. + -1 Excess work done on this call (perhaps wrong Dfun type). + -2 Excess accuracy requested (tolerances too small). + -3 Illegal input detected (internal error). + -4 Repeated error test failures (internal error). + -5 Repeated convergence failures (perhaps bad Jacobian or tolerances). + -6 Error weight became zero during problem. + -7 Internal workspace insufficient to finish (internal error). + =========== ======= + """ + try: + self._integrator + except AttributeError: + self.set_integrator('') + return self._integrator.istate + + def set_f_params(self, *args): + """Set extra parameters for user-supplied function f.""" + self.f_params = args + return self + + def set_jac_params(self, *args): + """Set extra parameters for user-supplied function jac.""" + self.jac_params = args + return self + + def set_solout(self, solout): + """ + Set callable to be called at every successful integration step. + + Parameters + ---------- + solout : callable + ``solout(t, y)`` is called at each internal integrator step, + t is a scalar providing the current independent position + y is the current solution ``y.shape == (n,)`` + solout should return -1 to stop integration + otherwise it should return None or 0 + + """ + if self._integrator.supports_solout: + self._integrator.set_solout(solout) + if self._y is not None: + self._integrator.reset(len(self._y), self.jac is not None) + else: + raise ValueError("selected integrator does not support solout," + " choose another one") + + +def _transform_banded_jac(bjac): + """ + Convert a real matrix of the form (for example) + + [0 0 A B] [0 0 0 B] + [0 0 C D] [0 0 A D] + [E F G H] to [0 F C H] + [I J K L] [E J G L] + [I 0 K 0] + + That is, every other column is shifted up one. + """ + # Shift every other column. + newjac = zeros((bjac.shape[0] + 1, bjac.shape[1])) + newjac[1:, ::2] = bjac[:, ::2] + newjac[:-1, 1::2] = bjac[:, 1::2] + return newjac + + +class complex_ode(ode): + """ + A wrapper of ode for complex systems. + + This functions similarly as `ode`, but re-maps a complex-valued + equation system to a real-valued one before using the integrators. + + Parameters + ---------- + f : callable ``f(t, y, *f_args)`` + Rhs of the equation. t is a scalar, ``y.shape == (n,)``. + ``f_args`` is set by calling ``set_f_params(*args)``. + jac : callable ``jac(t, y, *jac_args)`` + Jacobian of the rhs, ``jac[i,j] = d f[i] / d y[j]``. + ``jac_args`` is set by calling ``set_f_params(*args)``. + + Attributes + ---------- + t : float + Current time. + y : ndarray + Current variable values. + + Examples + -------- + For usage examples, see `ode`. + + """ + + def __init__(self, f, jac=None): + self.cf = f + self.cjac = jac + if jac is None: + ode.__init__(self, self._wrap, None) + else: + ode.__init__(self, self._wrap, self._wrap_jac) + + def _wrap(self, t, y, *f_args): + f = self.cf(*((t, y[::2] + 1j * y[1::2]) + f_args)) + # self.tmp is a real-valued array containing the interleaved + # real and imaginary parts of f. + self.tmp[::2] = real(f) + self.tmp[1::2] = imag(f) + return self.tmp + + def _wrap_jac(self, t, y, *jac_args): + # jac is the complex Jacobian computed by the user-defined function. + jac = self.cjac(*((t, y[::2] + 1j * y[1::2]) + jac_args)) + + # jac_tmp is the real version of the complex Jacobian. Each complex + # entry in jac, say 2+3j, becomes a 2x2 block of the form + # [2 -3] + # [3 2] + jac_tmp = zeros((2 * jac.shape[0], 2 * jac.shape[1])) + jac_tmp[1::2, 1::2] = jac_tmp[::2, ::2] = real(jac) + jac_tmp[1::2, ::2] = imag(jac) + jac_tmp[::2, 1::2] = -jac_tmp[1::2, ::2] + + ml = getattr(self._integrator, 'ml', None) + mu = getattr(self._integrator, 'mu', None) + if ml is not None or mu is not None: + # Jacobian is banded. The user's Jacobian function has computed + # the complex Jacobian in packed format. The corresponding + # real-valued version has every other column shifted up. + jac_tmp = _transform_banded_jac(jac_tmp) + + return jac_tmp + + @property + def y(self): + return self._y[::2] + 1j * self._y[1::2] + + def set_integrator(self, name, **integrator_params): + """ + Set integrator by name. + + Parameters + ---------- + name : str + Name of the integrator + **integrator_params + Additional parameters for the integrator. + """ + if name == 'zvode': + raise ValueError("zvode must be used with ode, not complex_ode") + + lband = integrator_params.get('lband') + uband = integrator_params.get('uband') + if lband is not None or uband is not None: + # The Jacobian is banded. Override the user-supplied bandwidths + # (which are for the complex Jacobian) with the bandwidths of + # the corresponding real-valued Jacobian wrapper of the complex + # Jacobian. + integrator_params['lband'] = 2 * (lband or 0) + 1 + integrator_params['uband'] = 2 * (uband or 0) + 1 + + return ode.set_integrator(self, name, **integrator_params) + + def set_initial_value(self, y, t=0.0): + """Set initial conditions y(t) = y.""" + y = asarray(y) + self.tmp = zeros(y.size * 2, 'float') + self.tmp[::2] = real(y) + self.tmp[1::2] = imag(y) + return ode.set_initial_value(self, self.tmp, t) + + def integrate(self, t, step=False, relax=False): + """Find y=y(t), set y as an initial condition, and return y. + + Parameters + ---------- + t : float + The endpoint of the integration step. + step : bool + If True, and if the integrator supports the step method, + then perform a single integration step and return. + This parameter is provided in order to expose internals of + the implementation, and should not be changed from its default + value in most cases. + relax : bool + If True and if the integrator supports the run_relax method, + then integrate until t_1 >= t and return. ``relax`` is not + referenced if ``step=True``. + This parameter is provided in order to expose internals of + the implementation, and should not be changed from its default + value in most cases. + + Returns + ------- + y : float + The integrated value at t + """ + y = ode.integrate(self, t, step, relax) + return y[::2] + 1j * y[1::2] + + def set_solout(self, solout): + """ + Set callable to be called at every successful integration step. + + Parameters + ---------- + solout : callable + ``solout(t, y)`` is called at each internal integrator step, + t is a scalar providing the current independent position + y is the current solution ``y.shape == (n,)`` + solout should return -1 to stop integration + otherwise it should return None or 0 + + """ + if self._integrator.supports_solout: + self._integrator.set_solout(solout, complex=True) + else: + raise TypeError("selected integrator does not support solouta, " + "choose another one") + + +# ------------------------------------------------------------------------------ +# ODE integrators +# ------------------------------------------------------------------------------ + +def find_integrator(name): + for cl in IntegratorBase.integrator_classes: + if re.match(name, cl.__name__, re.I): + return cl + return None + + +class IntegratorConcurrencyError(RuntimeError): + """ + Failure due to concurrent usage of an integrator that can be used + only for a single problem at a time. + + """ + + def __init__(self, name): + msg = ("Integrator `%s` can be used to solve only a single problem " + "at a time. If you want to integrate multiple problems, " + "consider using a different integrator " + "(see `ode.set_integrator`)") % name + RuntimeError.__init__(self, msg) + + +class IntegratorBase: + runner = None # runner is None => integrator is not available + success = None # success==1 if integrator was called successfully + istate = None # istate > 0 means success, istate < 0 means failure + supports_run_relax = None + supports_step = None + supports_solout = False + integrator_classes = [] + scalar = float + + def acquire_new_handle(self): + # Some of the integrators have internal state (ancient + # Fortran...), and so only one instance can use them at a time. + # We keep track of this, and fail when concurrent usage is tried. + self.__class__.active_global_handle += 1 + self.handle = self.__class__.active_global_handle + + def check_handle(self): + if self.handle is not self.__class__.active_global_handle: + raise IntegratorConcurrencyError(self.__class__.__name__) + + def reset(self, n, has_jac): + """Prepare integrator for call: allocate memory, set flags, etc. + n - number of equations. + has_jac - if user has supplied function for evaluating Jacobian. + """ + + def run(self, f, jac, y0, t0, t1, f_params, jac_params): + """Integrate from t=t0 to t=t1 using y0 as an initial condition. + Return 2-tuple (y1,t1) where y1 is the result and t=t1 + defines the stoppage coordinate of the result. + """ + raise NotImplementedError('all integrators must define ' + 'run(f, jac, t0, t1, y0, f_params, jac_params)') + + def step(self, f, jac, y0, t0, t1, f_params, jac_params): + """Make one integration step and return (y1,t1).""" + raise NotImplementedError('%s does not support step() method' % + self.__class__.__name__) + + def run_relax(self, f, jac, y0, t0, t1, f_params, jac_params): + """Integrate from t=t0 to t>=t1 and return (y1,t).""" + raise NotImplementedError('%s does not support run_relax() method' % + self.__class__.__name__) + + # XXX: __str__ method for getting visual state of the integrator + + +def _vode_banded_jac_wrapper(jacfunc, ml, jac_params): + """ + Wrap a banded Jacobian function with a function that pads + the Jacobian with `ml` rows of zeros. + """ + + def jac_wrapper(t, y): + jac = asarray(jacfunc(t, y, *jac_params)) + padded_jac = vstack((jac, zeros((ml, jac.shape[1])))) + return padded_jac + + return jac_wrapper + + +class vode(IntegratorBase): + runner = getattr(_vode, 'dvode', None) + + messages = {-1: 'Excess work done on this call. (Perhaps wrong MF.)', + -2: 'Excess accuracy requested. (Tolerances too small.)', + -3: 'Illegal input detected. (See printed message.)', + -4: 'Repeated error test failures. (Check all input.)', + -5: 'Repeated convergence failures. (Perhaps bad' + ' Jacobian supplied or wrong choice of MF or tolerances.)', + -6: 'Error weight became zero during problem. (Solution' + ' component i vanished, and ATOL or ATOL(i) = 0.)' + } + supports_run_relax = 1 + supports_step = 1 + active_global_handle = 0 + + def __init__(self, + method='adams', + with_jacobian=False, + rtol=1e-6, atol=1e-12, + lband=None, uband=None, + order=12, + nsteps=500, + max_step=0.0, # corresponds to infinite + min_step=0.0, + first_step=0.0, # determined by solver + ): + + if re.match(method, r'adams', re.I): + self.meth = 1 + elif re.match(method, r'bdf', re.I): + self.meth = 2 + else: + raise ValueError('Unknown integration method %s' % method) + self.with_jacobian = with_jacobian + self.rtol = rtol + self.atol = atol + self.mu = uband + self.ml = lband + + self.order = order + self.nsteps = nsteps + self.max_step = max_step + self.min_step = min_step + self.first_step = first_step + self.success = 1 + + self.initialized = False + + def _determine_mf_and_set_bands(self, has_jac): + """ + Determine the `MF` parameter (Method Flag) for the Fortran subroutine `dvode`. + + In the Fortran code, the legal values of `MF` are: + 10, 11, 12, 13, 14, 15, 20, 21, 22, 23, 24, 25, + -11, -12, -14, -15, -21, -22, -24, -25 + but this Python wrapper does not use negative values. + + Returns + + mf = 10*self.meth + miter + + self.meth is the linear multistep method: + self.meth == 1: method="adams" + self.meth == 2: method="bdf" + + miter is the correction iteration method: + miter == 0: Functional iteration; no Jacobian involved. + miter == 1: Chord iteration with user-supplied full Jacobian. + miter == 2: Chord iteration with internally computed full Jacobian. + miter == 3: Chord iteration with internally computed diagonal Jacobian. + miter == 4: Chord iteration with user-supplied banded Jacobian. + miter == 5: Chord iteration with internally computed banded Jacobian. + + Side effects: If either self.mu or self.ml is not None and the other is None, + then the one that is None is set to 0. + """ + + jac_is_banded = self.mu is not None or self.ml is not None + if jac_is_banded: + if self.mu is None: + self.mu = 0 + if self.ml is None: + self.ml = 0 + + # has_jac is True if the user provided a Jacobian function. + if has_jac: + if jac_is_banded: + miter = 4 + else: + miter = 1 + else: + if jac_is_banded: + if self.ml == self.mu == 0: + miter = 3 # Chord iteration with internal diagonal Jacobian. + else: + miter = 5 # Chord iteration with internal banded Jacobian. + else: + # self.with_jacobian is set by the user in + # the call to ode.set_integrator. + if self.with_jacobian: + miter = 2 # Chord iteration with internal full Jacobian. + else: + miter = 0 # Functional iteration; no Jacobian involved. + + mf = 10 * self.meth + miter + return mf + + def reset(self, n, has_jac): + mf = self._determine_mf_and_set_bands(has_jac) + + if mf == 10: + lrw = 20 + 16 * n + elif mf in [11, 12]: + lrw = 22 + 16 * n + 2 * n * n + elif mf == 13: + lrw = 22 + 17 * n + elif mf in [14, 15]: + lrw = 22 + 18 * n + (3 * self.ml + 2 * self.mu) * n + elif mf == 20: + lrw = 20 + 9 * n + elif mf in [21, 22]: + lrw = 22 + 9 * n + 2 * n * n + elif mf == 23: + lrw = 22 + 10 * n + elif mf in [24, 25]: + lrw = 22 + 11 * n + (3 * self.ml + 2 * self.mu) * n + else: + raise ValueError('Unexpected mf=%s' % mf) + + if mf % 10 in [0, 3]: + liw = 30 + else: + liw = 30 + n + + rwork = zeros((lrw,), float) + rwork[4] = self.first_step + rwork[5] = self.max_step + rwork[6] = self.min_step + self.rwork = rwork + + iwork = zeros((liw,), _vode_int_dtype) + if self.ml is not None: + iwork[0] = self.ml + if self.mu is not None: + iwork[1] = self.mu + iwork[4] = self.order + iwork[5] = self.nsteps + iwork[6] = 2 # mxhnil + self.iwork = iwork + + self.call_args = [self.rtol, self.atol, 1, 1, + self.rwork, self.iwork, mf] + self.success = 1 + self.initialized = False + + def run(self, f, jac, y0, t0, t1, f_params, jac_params): + if self.initialized: + self.check_handle() + else: + self.initialized = True + self.acquire_new_handle() + + if self.ml is not None and self.ml > 0: + # Banded Jacobian. Wrap the user-provided function with one + # that pads the Jacobian array with the extra `self.ml` rows + # required by the f2py-generated wrapper. + jac = _vode_banded_jac_wrapper(jac, self.ml, jac_params) + + args = ((f, jac, y0, t0, t1) + tuple(self.call_args) + + (f_params, jac_params)) + y1, t, istate = self.runner(*args) + self.istate = istate + if istate < 0: + unexpected_istate_msg = f'Unexpected istate={istate:d}' + warnings.warn('{:s}: {:s}'.format(self.__class__.__name__, + self.messages.get(istate, unexpected_istate_msg)), + stacklevel=2) + self.success = 0 + else: + self.call_args[3] = 2 # upgrade istate from 1 to 2 + self.istate = 2 + return y1, t + + def step(self, *args): + itask = self.call_args[2] + self.call_args[2] = 2 + r = self.run(*args) + self.call_args[2] = itask + return r + + def run_relax(self, *args): + itask = self.call_args[2] + self.call_args[2] = 3 + r = self.run(*args) + self.call_args[2] = itask + return r + + +if vode.runner is not None: + IntegratorBase.integrator_classes.append(vode) + + +class zvode(vode): + runner = getattr(_vode, 'zvode', None) + + supports_run_relax = 1 + supports_step = 1 + scalar = complex + active_global_handle = 0 + + def reset(self, n, has_jac): + mf = self._determine_mf_and_set_bands(has_jac) + + if mf in (10,): + lzw = 15 * n + elif mf in (11, 12): + lzw = 15 * n + 2 * n ** 2 + elif mf in (-11, -12): + lzw = 15 * n + n ** 2 + elif mf in (13,): + lzw = 16 * n + elif mf in (14, 15): + lzw = 17 * n + (3 * self.ml + 2 * self.mu) * n + elif mf in (-14, -15): + lzw = 16 * n + (2 * self.ml + self.mu) * n + elif mf in (20,): + lzw = 8 * n + elif mf in (21, 22): + lzw = 8 * n + 2 * n ** 2 + elif mf in (-21, -22): + lzw = 8 * n + n ** 2 + elif mf in (23,): + lzw = 9 * n + elif mf in (24, 25): + lzw = 10 * n + (3 * self.ml + 2 * self.mu) * n + elif mf in (-24, -25): + lzw = 9 * n + (2 * self.ml + self.mu) * n + + lrw = 20 + n + + if mf % 10 in (0, 3): + liw = 30 + else: + liw = 30 + n + + zwork = zeros((lzw,), complex) + self.zwork = zwork + + rwork = zeros((lrw,), float) + rwork[4] = self.first_step + rwork[5] = self.max_step + rwork[6] = self.min_step + self.rwork = rwork + + iwork = zeros((liw,), _vode_int_dtype) + if self.ml is not None: + iwork[0] = self.ml + if self.mu is not None: + iwork[1] = self.mu + iwork[4] = self.order + iwork[5] = self.nsteps + iwork[6] = 2 # mxhnil + self.iwork = iwork + + self.call_args = [self.rtol, self.atol, 1, 1, + self.zwork, self.rwork, self.iwork, mf] + self.success = 1 + self.initialized = False + + +if zvode.runner is not None: + IntegratorBase.integrator_classes.append(zvode) + + +class dopri5(IntegratorBase): + runner = getattr(_dop, 'dopri5', None) + name = 'dopri5' + supports_solout = True + + messages = {1: 'computation successful', + 2: 'computation successful (interrupted by solout)', + -1: 'input is not consistent', + -2: 'larger nsteps is needed', + -3: 'step size becomes too small', + -4: 'problem is probably stiff (interrupted)', + } + + def __init__(self, + rtol=1e-6, atol=1e-12, + nsteps=500, + max_step=0.0, + first_step=0.0, # determined by solver + safety=0.9, + ifactor=10.0, + dfactor=0.2, + beta=0.0, + method=None, + verbosity=-1, # no messages if negative + ): + self.rtol = rtol + self.atol = atol + self.nsteps = nsteps + self.max_step = max_step + self.first_step = first_step + self.safety = safety + self.ifactor = ifactor + self.dfactor = dfactor + self.beta = beta + self.verbosity = verbosity + self.success = 1 + self.set_solout(None) + + def set_solout(self, solout, complex=False): + self.solout = solout + self.solout_cmplx = complex + if solout is None: + self.iout = 0 + else: + self.iout = 1 + + def reset(self, n, has_jac): + work = zeros((8 * n + 21,), float) + work[1] = self.safety + work[2] = self.dfactor + work[3] = self.ifactor + work[4] = self.beta + work[5] = self.max_step + work[6] = self.first_step + self.work = work + iwork = zeros((21,), _dop_int_dtype) + iwork[0] = self.nsteps + iwork[2] = self.verbosity + self.iwork = iwork + self.call_args = [self.rtol, self.atol, self._solout, + self.iout, self.work, self.iwork] + self.success = 1 + + def run(self, f, jac, y0, t0, t1, f_params, jac_params): + x, y, iwork, istate = self.runner(*((f, t0, y0, t1) + + tuple(self.call_args) + (f_params,))) + self.istate = istate + if istate < 0: + unexpected_istate_msg = f'Unexpected istate={istate:d}' + warnings.warn('{:s}: {:s}'.format(self.__class__.__name__, + self.messages.get(istate, unexpected_istate_msg)), + stacklevel=2) + self.success = 0 + return y, x + + def _solout(self, nr, xold, x, y, nd, icomp, con): + if self.solout is not None: + if self.solout_cmplx: + y = y[::2] + 1j * y[1::2] + return self.solout(x, y) + else: + return 1 + + +if dopri5.runner is not None: + IntegratorBase.integrator_classes.append(dopri5) + + +class dop853(dopri5): + runner = getattr(_dop, 'dop853', None) + name = 'dop853' + + def __init__(self, + rtol=1e-6, atol=1e-12, + nsteps=500, + max_step=0.0, + first_step=0.0, # determined by solver + safety=0.9, + ifactor=6.0, + dfactor=0.3, + beta=0.0, + method=None, + verbosity=-1, # no messages if negative + ): + super().__init__(rtol, atol, nsteps, max_step, first_step, safety, + ifactor, dfactor, beta, method, verbosity) + + def reset(self, n, has_jac): + work = zeros((11 * n + 21,), float) + work[1] = self.safety + work[2] = self.dfactor + work[3] = self.ifactor + work[4] = self.beta + work[5] = self.max_step + work[6] = self.first_step + self.work = work + iwork = zeros((21,), _dop_int_dtype) + iwork[0] = self.nsteps + iwork[2] = self.verbosity + self.iwork = iwork + self.call_args = [self.rtol, self.atol, self._solout, + self.iout, self.work, self.iwork] + self.success = 1 + + +if dop853.runner is not None: + IntegratorBase.integrator_classes.append(dop853) + + +class lsoda(IntegratorBase): + runner = getattr(_lsoda, 'lsoda', None) + active_global_handle = 0 + + messages = { + 2: "Integration successful.", + -1: "Excess work done on this call (perhaps wrong Dfun type).", + -2: "Excess accuracy requested (tolerances too small).", + -3: "Illegal input detected (internal error).", + -4: "Repeated error test failures (internal error).", + -5: "Repeated convergence failures (perhaps bad Jacobian or tolerances).", + -6: "Error weight became zero during problem.", + -7: "Internal workspace insufficient to finish (internal error)." + } + + def __init__(self, + with_jacobian=False, + rtol=1e-6, atol=1e-12, + lband=None, uband=None, + nsteps=500, + max_step=0.0, # corresponds to infinite + min_step=0.0, + first_step=0.0, # determined by solver + ixpr=0, + max_hnil=0, + max_order_ns=12, + max_order_s=5, + method=None + ): + + self.with_jacobian = with_jacobian + self.rtol = rtol + self.atol = atol + self.mu = uband + self.ml = lband + + self.max_order_ns = max_order_ns + self.max_order_s = max_order_s + self.nsteps = nsteps + self.max_step = max_step + self.min_step = min_step + self.first_step = first_step + self.ixpr = ixpr + self.max_hnil = max_hnil + self.success = 1 + + self.initialized = False + + def reset(self, n, has_jac): + # Calculate parameters for Fortran subroutine dvode. + if has_jac: + if self.mu is None and self.ml is None: + jt = 1 + else: + if self.mu is None: + self.mu = 0 + if self.ml is None: + self.ml = 0 + jt = 4 + else: + if self.mu is None and self.ml is None: + jt = 2 + else: + if self.mu is None: + self.mu = 0 + if self.ml is None: + self.ml = 0 + jt = 5 + lrn = 20 + (self.max_order_ns + 4) * n + if jt in [1, 2]: + lrs = 22 + (self.max_order_s + 4) * n + n * n + elif jt in [4, 5]: + lrs = 22 + (self.max_order_s + 5 + 2 * self.ml + self.mu) * n + else: + raise ValueError('Unexpected jt=%s' % jt) + lrw = max(lrn, lrs) + liw = 20 + n + rwork = zeros((lrw,), float) + rwork[4] = self.first_step + rwork[5] = self.max_step + rwork[6] = self.min_step + self.rwork = rwork + iwork = zeros((liw,), _lsoda_int_dtype) + if self.ml is not None: + iwork[0] = self.ml + if self.mu is not None: + iwork[1] = self.mu + iwork[4] = self.ixpr + iwork[5] = self.nsteps + iwork[6] = self.max_hnil + iwork[7] = self.max_order_ns + iwork[8] = self.max_order_s + self.iwork = iwork + self.call_args = [self.rtol, self.atol, 1, 1, + self.rwork, self.iwork, jt] + self.success = 1 + self.initialized = False + + def run(self, f, jac, y0, t0, t1, f_params, jac_params): + if self.initialized: + self.check_handle() + else: + self.initialized = True + self.acquire_new_handle() + args = [f, y0, t0, t1] + self.call_args[:-1] + \ + [jac, self.call_args[-1], f_params, 0, jac_params] + y1, t, istate = self.runner(*args) + self.istate = istate + if istate < 0: + unexpected_istate_msg = f'Unexpected istate={istate:d}' + warnings.warn('{:s}: {:s}'.format(self.__class__.__name__, + self.messages.get(istate, unexpected_istate_msg)), + stacklevel=2) + self.success = 0 + else: + self.call_args[3] = 2 # upgrade istate from 1 to 2 + self.istate = 2 + return y1, t + + def step(self, *args): + itask = self.call_args[2] + self.call_args[2] = 2 + r = self.run(*args) + self.call_args[2] = itask + return r + + def run_relax(self, *args): + itask = self.call_args[2] + self.call_args[2] = 3 + r = self.run(*args) + self.call_args[2] = itask + return r + + +if lsoda.runner: + IntegratorBase.integrator_classes.append(lsoda) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_odepack_py.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_odepack_py.py new file mode 100644 index 0000000000000000000000000000000000000000..20993e5bb516c9edbcb19699fb43063caac1a19f --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_odepack_py.py @@ -0,0 +1,266 @@ +# Author: Travis Oliphant + +__all__ = ['odeint', 'ODEintWarning'] + +import numpy as np +from . import _odepack +from copy import copy +import warnings + + +class ODEintWarning(Warning): + """Warning raised during the execution of `odeint`.""" + pass + + +_msgs = {2: "Integration successful.", + 1: "Nothing was done; the integration time was 0.", + -1: "Excess work done on this call (perhaps wrong Dfun type).", + -2: "Excess accuracy requested (tolerances too small).", + -3: "Illegal input detected (internal error).", + -4: "Repeated error test failures (internal error).", + -5: "Repeated convergence failures (perhaps bad Jacobian or tolerances).", + -6: "Error weight became zero during problem.", + -7: "Internal workspace insufficient to finish (internal error).", + -8: "Run terminated (internal error)." + } + + +def odeint(func, y0, t, args=(), Dfun=None, col_deriv=0, full_output=0, + ml=None, mu=None, rtol=None, atol=None, tcrit=None, h0=0.0, + hmax=0.0, hmin=0.0, ixpr=0, mxstep=0, mxhnil=0, mxordn=12, + mxords=5, printmessg=0, tfirst=False): + """ + Integrate a system of ordinary differential equations. + + .. note:: For new code, use `scipy.integrate.solve_ivp` to solve a + differential equation. + + Solve a system of ordinary differential equations using lsoda from the + FORTRAN library odepack. + + Solves the initial value problem for stiff or non-stiff systems + of first order ode-s:: + + dy/dt = func(y, t, ...) [or func(t, y, ...)] + + where y can be a vector. + + .. note:: By default, the required order of the first two arguments of + `func` are in the opposite order of the arguments in the system + definition function used by the `scipy.integrate.ode` class and + the function `scipy.integrate.solve_ivp`. To use a function with + the signature ``func(t, y, ...)``, the argument `tfirst` must be + set to ``True``. + + Parameters + ---------- + func : callable(y, t, ...) or callable(t, y, ...) + Computes the derivative of y at t. + If the signature is ``callable(t, y, ...)``, then the argument + `tfirst` must be set ``True``. + `func` must not modify the data in `y`, as it is a + view of the data used internally by the ODE solver. + y0 : array + Initial condition on y (can be a vector). + t : array + A sequence of time points for which to solve for y. The initial + value point should be the first element of this sequence. + This sequence must be monotonically increasing or monotonically + decreasing; repeated values are allowed. + args : tuple, optional + Extra arguments to pass to function. + Dfun : callable(y, t, ...) or callable(t, y, ...) + Gradient (Jacobian) of `func`. + If the signature is ``callable(t, y, ...)``, then the argument + `tfirst` must be set ``True``. + `Dfun` must not modify the data in `y`, as it is a + view of the data used internally by the ODE solver. + col_deriv : bool, optional + True if `Dfun` defines derivatives down columns (faster), + otherwise `Dfun` should define derivatives across rows. + full_output : bool, optional + True if to return a dictionary of optional outputs as the second output + printmessg : bool, optional + Whether to print the convergence message + tfirst : bool, optional + If True, the first two arguments of `func` (and `Dfun`, if given) + must ``t, y`` instead of the default ``y, t``. + + .. versionadded:: 1.1.0 + + Returns + ------- + y : array, shape (len(t), len(y0)) + Array containing the value of y for each desired time in t, + with the initial value `y0` in the first row. + infodict : dict, only returned if full_output == True + Dictionary containing additional output information + + ======= ============================================================ + key meaning + ======= ============================================================ + 'hu' vector of step sizes successfully used for each time step + 'tcur' vector with the value of t reached for each time step + (will always be at least as large as the input times) + 'tolsf' vector of tolerance scale factors, greater than 1.0, + computed when a request for too much accuracy was detected + 'tsw' value of t at the time of the last method switch + (given for each time step) + 'nst' cumulative number of time steps + 'nfe' cumulative number of function evaluations for each time step + 'nje' cumulative number of jacobian evaluations for each time step + 'nqu' a vector of method orders for each successful step + 'imxer' index of the component of largest magnitude in the + weighted local error vector (e / ewt) on an error return, -1 + otherwise + 'lenrw' the length of the double work array required + 'leniw' the length of integer work array required + 'mused' a vector of method indicators for each successful time step: + 1: adams (nonstiff), 2: bdf (stiff) + ======= ============================================================ + + Other Parameters + ---------------- + ml, mu : int, optional + If either of these are not None or non-negative, then the + Jacobian is assumed to be banded. These give the number of + lower and upper non-zero diagonals in this banded matrix. + For the banded case, `Dfun` should return a matrix whose + rows contain the non-zero bands (starting with the lowest diagonal). + Thus, the return matrix `jac` from `Dfun` should have shape + ``(ml + mu + 1, len(y0))`` when ``ml >=0`` or ``mu >=0``. + The data in `jac` must be stored such that ``jac[i - j + mu, j]`` + holds the derivative of the ``i``\\ th equation with respect to the + ``j``\\ th state variable. If `col_deriv` is True, the transpose of + this `jac` must be returned. + rtol, atol : float, optional + The input parameters `rtol` and `atol` determine the error + control performed by the solver. The solver will control the + vector, e, of estimated local errors in y, according to an + inequality of the form ``max-norm of (e / ewt) <= 1``, + where ewt is a vector of positive error weights computed as + ``ewt = rtol * abs(y) + atol``. + rtol and atol can be either vectors the same length as y or scalars. + Defaults to 1.49012e-8. + tcrit : ndarray, optional + Vector of critical points (e.g., singularities) where integration + care should be taken. + h0 : float, (0: solver-determined), optional + The step size to be attempted on the first step. + hmax : float, (0: solver-determined), optional + The maximum absolute step size allowed. + hmin : float, (0: solver-determined), optional + The minimum absolute step size allowed. + ixpr : bool, optional + Whether to generate extra printing at method switches. + mxstep : int, (0: solver-determined), optional + Maximum number of (internally defined) steps allowed for each + integration point in t. + mxhnil : int, (0: solver-determined), optional + Maximum number of messages printed. + mxordn : int, (0: solver-determined), optional + Maximum order to be allowed for the non-stiff (Adams) method. + mxords : int, (0: solver-determined), optional + Maximum order to be allowed for the stiff (BDF) method. + + See Also + -------- + solve_ivp : solve an initial value problem for a system of ODEs + ode : a more object-oriented integrator based on VODE + quad : for finding the area under a curve + + Examples + -------- + The second order differential equation for the angle `theta` of a + pendulum acted on by gravity with friction can be written:: + + theta''(t) + b*theta'(t) + c*sin(theta(t)) = 0 + + where `b` and `c` are positive constants, and a prime (') denotes a + derivative. To solve this equation with `odeint`, we must first convert + it to a system of first order equations. By defining the angular + velocity ``omega(t) = theta'(t)``, we obtain the system:: + + theta'(t) = omega(t) + omega'(t) = -b*omega(t) - c*sin(theta(t)) + + Let `y` be the vector [`theta`, `omega`]. We implement this system + in Python as: + + >>> import numpy as np + >>> def pend(y, t, b, c): + ... theta, omega = y + ... dydt = [omega, -b*omega - c*np.sin(theta)] + ... return dydt + ... + + We assume the constants are `b` = 0.25 and `c` = 5.0: + + >>> b = 0.25 + >>> c = 5.0 + + For initial conditions, we assume the pendulum is nearly vertical + with `theta(0)` = `pi` - 0.1, and is initially at rest, so + `omega(0)` = 0. Then the vector of initial conditions is + + >>> y0 = [np.pi - 0.1, 0.0] + + We will generate a solution at 101 evenly spaced samples in the interval + 0 <= `t` <= 10. So our array of times is: + + >>> t = np.linspace(0, 10, 101) + + Call `odeint` to generate the solution. To pass the parameters + `b` and `c` to `pend`, we give them to `odeint` using the `args` + argument. + + >>> from scipy.integrate import odeint + >>> sol = odeint(pend, y0, t, args=(b, c)) + + The solution is an array with shape (101, 2). The first column + is `theta(t)`, and the second is `omega(t)`. The following code + plots both components. + + >>> import matplotlib.pyplot as plt + >>> plt.plot(t, sol[:, 0], 'b', label='theta(t)') + >>> plt.plot(t, sol[:, 1], 'g', label='omega(t)') + >>> plt.legend(loc='best') + >>> plt.xlabel('t') + >>> plt.grid() + >>> plt.show() + """ + + if ml is None: + ml = -1 # changed to zero inside function call + if mu is None: + mu = -1 # changed to zero inside function call + + dt = np.diff(t) + if not ((dt >= 0).all() or (dt <= 0).all()): + raise ValueError("The values in t must be monotonically increasing " + "or monotonically decreasing; repeated values are " + "allowed.") + + t = copy(t) + y0 = copy(y0) + output = _odepack.odeint(func, y0, t, args, Dfun, col_deriv, ml, mu, + full_output, rtol, atol, tcrit, h0, hmax, hmin, + ixpr, mxstep, mxhnil, mxordn, mxords, + int(bool(tfirst))) + if output[-1] < 0: + warning_msg = (f"{_msgs[output[-1]]} Run with full_output = 1 to " + f"get quantitative information.") + warnings.warn(warning_msg, ODEintWarning, stacklevel=2) + elif printmessg: + warning_msg = _msgs[output[-1]] + warnings.warn(warning_msg, ODEintWarning, stacklevel=2) + + if full_output: + output[1]['message'] = _msgs[output[-1]] + + output = output[:-1] + if len(output) == 1: + return output[0] + else: + return output diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_quad_vec.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_quad_vec.py new file mode 100644 index 0000000000000000000000000000000000000000..19218d196eb31a9df71baca485e898577911c871 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_quad_vec.py @@ -0,0 +1,663 @@ +import sys +import copy +import heapq +import collections +import functools +import warnings + +import numpy as np + +from scipy._lib._util import MapWrapper, _FunctionWrapper + + +class LRUDict(collections.OrderedDict): + def __init__(self, max_size): + self.__max_size = max_size + + def __setitem__(self, key, value): + existing_key = (key in self) + super().__setitem__(key, value) + if existing_key: + self.move_to_end(key) + elif len(self) > self.__max_size: + self.popitem(last=False) + + def update(self, other): + # Not needed below + raise NotImplementedError() + + +class SemiInfiniteFunc: + """ + Argument transform from (start, +-oo) to (0, 1) + """ + def __init__(self, func, start, infty): + self._func = func + self._start = start + self._sgn = -1 if infty < 0 else 1 + + # Overflow threshold for the 1/t**2 factor + self._tmin = sys.float_info.min**0.5 + + def get_t(self, x): + z = self._sgn * (x - self._start) + 1 + if z == 0: + # Can happen only if point not in range + return np.inf + return 1 / z + + def __call__(self, t): + if t < self._tmin: + return 0.0 + else: + x = self._start + self._sgn * (1 - t) / t + f = self._func(x) + return self._sgn * (f / t) / t + + +class DoubleInfiniteFunc: + """ + Argument transform from (-oo, oo) to (-1, 1) + """ + def __init__(self, func): + self._func = func + + # Overflow threshold for the 1/t**2 factor + self._tmin = sys.float_info.min**0.5 + + def get_t(self, x): + s = -1 if x < 0 else 1 + return s / (abs(x) + 1) + + def __call__(self, t): + if abs(t) < self._tmin: + return 0.0 + else: + x = (1 - abs(t)) / t + f = self._func(x) + return (f / t) / t + + +def _max_norm(x): + return np.amax(abs(x)) + + +def _get_sizeof(obj): + try: + return sys.getsizeof(obj) + except TypeError: + # occurs on pypy + if hasattr(obj, '__sizeof__'): + return int(obj.__sizeof__()) + return 64 + + +class _Bunch: + def __init__(self, **kwargs): + self.__keys = kwargs.keys() + self.__dict__.update(**kwargs) + + def __repr__(self): + return "_Bunch({})".format(", ".join(f"{k}={repr(self.__dict__[k])}" + for k in self.__keys)) + + +def quad_vec(f, a, b, epsabs=1e-200, epsrel=1e-8, norm='2', cache_size=100e6, + limit=10000, workers=1, points=None, quadrature=None, full_output=False, + *, args=()): + r"""Adaptive integration of a vector-valued function. + + Parameters + ---------- + f : callable + Vector-valued function f(x) to integrate. + a : float + Initial point. + b : float + Final point. + epsabs : float, optional + Absolute tolerance. + epsrel : float, optional + Relative tolerance. + norm : {'max', '2'}, optional + Vector norm to use for error estimation. + cache_size : int, optional + Number of bytes to use for memoization. + limit : float or int, optional + An upper bound on the number of subintervals used in the adaptive + algorithm. + workers : int or map-like callable, optional + If `workers` is an integer, part of the computation is done in + parallel subdivided to this many tasks (using + :class:`python:multiprocessing.pool.Pool`). + Supply `-1` to use all cores available to the Process. + Alternatively, supply a map-like callable, such as + :meth:`python:multiprocessing.pool.Pool.map` for evaluating the + population in parallel. + This evaluation is carried out as ``workers(func, iterable)``. + points : list, optional + List of additional breakpoints. + quadrature : {'gk21', 'gk15', 'trapezoid'}, optional + Quadrature rule to use on subintervals. + Options: 'gk21' (Gauss-Kronrod 21-point rule), + 'gk15' (Gauss-Kronrod 15-point rule), + 'trapezoid' (composite trapezoid rule). + Default: 'gk21' for finite intervals and 'gk15' for (semi-)infinite + full_output : bool, optional + Return an additional ``info`` dictionary. + args : tuple, optional + Extra arguments to pass to function, if any. + + .. versionadded:: 1.8.0 + + Returns + ------- + res : {float, array-like} + Estimate for the result + err : float + Error estimate for the result in the given norm + info : dict + Returned only when ``full_output=True``. + Info dictionary. Is an object with the attributes: + + success : bool + Whether integration reached target precision. + status : int + Indicator for convergence, success (0), + failure (1), and failure due to rounding error (2). + neval : int + Number of function evaluations. + intervals : ndarray, shape (num_intervals, 2) + Start and end points of subdivision intervals. + integrals : ndarray, shape (num_intervals, ...) + Integral for each interval. + Note that at most ``cache_size`` values are recorded, + and the array may contains *nan* for missing items. + errors : ndarray, shape (num_intervals,) + Estimated integration error for each interval. + + Notes + ----- + The algorithm mainly follows the implementation of QUADPACK's + DQAG* algorithms, implementing global error control and adaptive + subdivision. + + The algorithm here has some differences to the QUADPACK approach: + + Instead of subdividing one interval at a time, the algorithm + subdivides N intervals with largest errors at once. This enables + (partial) parallelization of the integration. + + The logic of subdividing "next largest" intervals first is then + not implemented, and we rely on the above extension to avoid + concentrating on "small" intervals only. + + The Wynn epsilon table extrapolation is not used (QUADPACK uses it + for infinite intervals). This is because the algorithm here is + supposed to work on vector-valued functions, in an user-specified + norm, and the extension of the epsilon algorithm to this case does + not appear to be widely agreed. For max-norm, using elementwise + Wynn epsilon could be possible, but we do not do this here with + the hope that the epsilon extrapolation is mainly useful in + special cases. + + References + ---------- + [1] R. Piessens, E. de Doncker, QUADPACK (1983). + + Examples + -------- + We can compute integrations of a vector-valued function: + + >>> from scipy.integrate import quad_vec + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> alpha = np.linspace(0.0, 2.0, num=30) + >>> f = lambda x: x**alpha + >>> x0, x1 = 0, 2 + >>> y, err = quad_vec(f, x0, x1) + >>> plt.plot(alpha, y) + >>> plt.xlabel(r"$\alpha$") + >>> plt.ylabel(r"$\int_{0}^{2} x^\alpha dx$") + >>> plt.show() + + """ + a = float(a) + b = float(b) + + if args: + if not isinstance(args, tuple): + args = (args,) + + # create a wrapped function to allow the use of map and Pool.map + f = _FunctionWrapper(f, args) + + # Use simple transformations to deal with integrals over infinite + # intervals. + kwargs = dict(epsabs=epsabs, + epsrel=epsrel, + norm=norm, + cache_size=cache_size, + limit=limit, + workers=workers, + points=points, + quadrature='gk15' if quadrature is None else quadrature, + full_output=full_output) + if np.isfinite(a) and np.isinf(b): + f2 = SemiInfiniteFunc(f, start=a, infty=b) + if points is not None: + kwargs['points'] = tuple(f2.get_t(xp) for xp in points) + return quad_vec(f2, 0, 1, **kwargs) + elif np.isfinite(b) and np.isinf(a): + f2 = SemiInfiniteFunc(f, start=b, infty=a) + if points is not None: + kwargs['points'] = tuple(f2.get_t(xp) for xp in points) + res = quad_vec(f2, 0, 1, **kwargs) + return (-res[0],) + res[1:] + elif np.isinf(a) and np.isinf(b): + sgn = -1 if b < a else 1 + + # NB. explicitly split integral at t=0, which separates + # the positive and negative sides + f2 = DoubleInfiniteFunc(f) + if points is not None: + kwargs['points'] = (0,) + tuple(f2.get_t(xp) for xp in points) + else: + kwargs['points'] = (0,) + + if a != b: + res = quad_vec(f2, -1, 1, **kwargs) + else: + res = quad_vec(f2, 1, 1, **kwargs) + + return (res[0]*sgn,) + res[1:] + elif not (np.isfinite(a) and np.isfinite(b)): + raise ValueError(f"invalid integration bounds a={a}, b={b}") + + norm_funcs = { + None: _max_norm, + 'max': _max_norm, + '2': np.linalg.norm + } + if callable(norm): + norm_func = norm + else: + norm_func = norm_funcs[norm] + + parallel_count = 128 + min_intervals = 2 + + try: + _quadrature = {None: _quadrature_gk21, + 'gk21': _quadrature_gk21, + 'gk15': _quadrature_gk15, + 'trapz': _quadrature_trapezoid, # alias for backcompat + 'trapezoid': _quadrature_trapezoid}[quadrature] + except KeyError as e: + raise ValueError(f"unknown quadrature {quadrature!r}") from e + + if quadrature == "trapz": + msg = ("`quadrature='trapz'` is deprecated in favour of " + "`quadrature='trapezoid' and will raise an error from SciPy 1.16.0 " + "onwards.") + warnings.warn(msg, DeprecationWarning, stacklevel=2) + + # Initial interval set + if points is None: + initial_intervals = [(a, b)] + else: + prev = a + initial_intervals = [] + for p in sorted(points): + p = float(p) + if not (a < p < b) or p == prev: + continue + initial_intervals.append((prev, p)) + prev = p + initial_intervals.append((prev, b)) + + global_integral = None + global_error = None + rounding_error = None + interval_cache = None + intervals = [] + neval = 0 + + for x1, x2 in initial_intervals: + ig, err, rnd = _quadrature(x1, x2, f, norm_func) + neval += _quadrature.num_eval + + if global_integral is None: + if isinstance(ig, (float, complex)): + # Specialize for scalars + if norm_func in (_max_norm, np.linalg.norm): + norm_func = abs + + global_integral = ig + global_error = float(err) + rounding_error = float(rnd) + + cache_count = cache_size // _get_sizeof(ig) + interval_cache = LRUDict(cache_count) + else: + global_integral += ig + global_error += err + rounding_error += rnd + + interval_cache[(x1, x2)] = copy.copy(ig) + intervals.append((-err, x1, x2)) + + heapq.heapify(intervals) + + CONVERGED = 0 + NOT_CONVERGED = 1 + ROUNDING_ERROR = 2 + NOT_A_NUMBER = 3 + + status_msg = { + CONVERGED: "Target precision reached.", + NOT_CONVERGED: "Target precision not reached.", + ROUNDING_ERROR: "Target precision could not be reached due to rounding error.", + NOT_A_NUMBER: "Non-finite values encountered." + } + + # Process intervals + with MapWrapper(workers) as mapwrapper: + ier = NOT_CONVERGED + + while intervals and len(intervals) < limit: + # Select intervals with largest errors for subdivision + tol = max(epsabs, epsrel*norm_func(global_integral)) + + to_process = [] + err_sum = 0 + + for j in range(parallel_count): + if not intervals: + break + + if j > 0 and err_sum > global_error - tol/8: + # avoid unnecessary parallel splitting + break + + interval = heapq.heappop(intervals) + + neg_old_err, a, b = interval + old_int = interval_cache.pop((a, b), None) + to_process.append( + ((-neg_old_err, a, b, old_int), f, norm_func, _quadrature) + ) + err_sum += -neg_old_err + + # Subdivide intervals + for parts in mapwrapper(_subdivide_interval, to_process): + dint, derr, dround_err, subint, dneval = parts + neval += dneval + global_integral += dint + global_error += derr + rounding_error += dround_err + for x in subint: + x1, x2, ig, err = x + interval_cache[(x1, x2)] = ig + heapq.heappush(intervals, (-err, x1, x2)) + + # Termination check + if len(intervals) >= min_intervals: + tol = max(epsabs, epsrel*norm_func(global_integral)) + if global_error < tol/8: + ier = CONVERGED + break + if global_error < rounding_error: + ier = ROUNDING_ERROR + break + + if not (np.isfinite(global_error) and np.isfinite(rounding_error)): + ier = NOT_A_NUMBER + break + + res = global_integral + err = global_error + rounding_error + + if full_output: + res_arr = np.asarray(res) + dummy = np.full(res_arr.shape, np.nan, dtype=res_arr.dtype) + integrals = np.array([interval_cache.get((z[1], z[2]), dummy) + for z in intervals], dtype=res_arr.dtype) + errors = np.array([-z[0] for z in intervals]) + intervals = np.array([[z[1], z[2]] for z in intervals]) + + info = _Bunch(neval=neval, + success=(ier == CONVERGED), + status=ier, + message=status_msg[ier], + intervals=intervals, + integrals=integrals, + errors=errors) + return (res, err, info) + else: + return (res, err) + + +def _subdivide_interval(args): + interval, f, norm_func, _quadrature = args + old_err, a, b, old_int = interval + + c = 0.5 * (a + b) + + # Left-hand side + if getattr(_quadrature, 'cache_size', 0) > 0: + f = functools.lru_cache(_quadrature.cache_size)(f) + + s1, err1, round1 = _quadrature(a, c, f, norm_func) + dneval = _quadrature.num_eval + s2, err2, round2 = _quadrature(c, b, f, norm_func) + dneval += _quadrature.num_eval + if old_int is None: + old_int, _, _ = _quadrature(a, b, f, norm_func) + dneval += _quadrature.num_eval + + if getattr(_quadrature, 'cache_size', 0) > 0: + dneval = f.cache_info().misses + + dint = s1 + s2 - old_int + derr = err1 + err2 - old_err + dround_err = round1 + round2 + + subintervals = ((a, c, s1, err1), (c, b, s2, err2)) + return dint, derr, dround_err, subintervals, dneval + + +def _quadrature_trapezoid(x1, x2, f, norm_func): + """ + Composite trapezoid quadrature + """ + x3 = 0.5*(x1 + x2) + f1 = f(x1) + f2 = f(x2) + f3 = f(x3) + + s2 = 0.25 * (x2 - x1) * (f1 + 2*f3 + f2) + + round_err = 0.25 * abs(x2 - x1) * (float(norm_func(f1)) + + 2*float(norm_func(f3)) + + float(norm_func(f2))) * 2e-16 + + s1 = 0.5 * (x2 - x1) * (f1 + f2) + err = 1/3 * float(norm_func(s1 - s2)) + return s2, err, round_err + + +_quadrature_trapezoid.cache_size = 3 * 3 +_quadrature_trapezoid.num_eval = 3 + + +def _quadrature_gk(a, b, f, norm_func, x, w, v): + """ + Generic Gauss-Kronrod quadrature + """ + + fv = [0.0]*len(x) + + c = 0.5 * (a + b) + h = 0.5 * (b - a) + + # Gauss-Kronrod + s_k = 0.0 + s_k_abs = 0.0 + for i in range(len(x)): + ff = f(c + h*x[i]) + fv[i] = ff + + vv = v[i] + + # \int f(x) + s_k += vv * ff + # \int |f(x)| + s_k_abs += vv * abs(ff) + + # Gauss + s_g = 0.0 + for i in range(len(w)): + s_g += w[i] * fv[2*i + 1] + + # Quadrature of abs-deviation from average + s_k_dabs = 0.0 + y0 = s_k / 2.0 + for i in range(len(x)): + # \int |f(x) - y0| + s_k_dabs += v[i] * abs(fv[i] - y0) + + # Use similar error estimation as quadpack + err = float(norm_func((s_k - s_g) * h)) + dabs = float(norm_func(s_k_dabs * h)) + if dabs != 0 and err != 0: + err = dabs * min(1.0, (200 * err / dabs)**1.5) + + eps = sys.float_info.epsilon + round_err = float(norm_func(50 * eps * h * s_k_abs)) + + if round_err > sys.float_info.min: + err = max(err, round_err) + + return h * s_k, err, round_err + + +def _quadrature_gk21(a, b, f, norm_func): + """ + Gauss-Kronrod 21 quadrature with error estimate + """ + # Gauss-Kronrod points + x = (0.995657163025808080735527280689003, + 0.973906528517171720077964012084452, + 0.930157491355708226001207180059508, + 0.865063366688984510732096688423493, + 0.780817726586416897063717578345042, + 0.679409568299024406234327365114874, + 0.562757134668604683339000099272694, + 0.433395394129247190799265943165784, + 0.294392862701460198131126603103866, + 0.148874338981631210884826001129720, + 0, + -0.148874338981631210884826001129720, + -0.294392862701460198131126603103866, + -0.433395394129247190799265943165784, + -0.562757134668604683339000099272694, + -0.679409568299024406234327365114874, + -0.780817726586416897063717578345042, + -0.865063366688984510732096688423493, + -0.930157491355708226001207180059508, + -0.973906528517171720077964012084452, + -0.995657163025808080735527280689003) + + # 10-point weights + w = (0.066671344308688137593568809893332, + 0.149451349150580593145776339657697, + 0.219086362515982043995534934228163, + 0.269266719309996355091226921569469, + 0.295524224714752870173892994651338, + 0.295524224714752870173892994651338, + 0.269266719309996355091226921569469, + 0.219086362515982043995534934228163, + 0.149451349150580593145776339657697, + 0.066671344308688137593568809893332) + + # 21-point weights + v = (0.011694638867371874278064396062192, + 0.032558162307964727478818972459390, + 0.054755896574351996031381300244580, + 0.075039674810919952767043140916190, + 0.093125454583697605535065465083366, + 0.109387158802297641899210590325805, + 0.123491976262065851077958109831074, + 0.134709217311473325928054001771707, + 0.142775938577060080797094273138717, + 0.147739104901338491374841515972068, + 0.149445554002916905664936468389821, + 0.147739104901338491374841515972068, + 0.142775938577060080797094273138717, + 0.134709217311473325928054001771707, + 0.123491976262065851077958109831074, + 0.109387158802297641899210590325805, + 0.093125454583697605535065465083366, + 0.075039674810919952767043140916190, + 0.054755896574351996031381300244580, + 0.032558162307964727478818972459390, + 0.011694638867371874278064396062192) + + return _quadrature_gk(a, b, f, norm_func, x, w, v) + + +_quadrature_gk21.num_eval = 21 + + +def _quadrature_gk15(a, b, f, norm_func): + """ + Gauss-Kronrod 15 quadrature with error estimate + """ + # Gauss-Kronrod points + x = (0.991455371120812639206854697526329, + 0.949107912342758524526189684047851, + 0.864864423359769072789712788640926, + 0.741531185599394439863864773280788, + 0.586087235467691130294144838258730, + 0.405845151377397166906606412076961, + 0.207784955007898467600689403773245, + 0.000000000000000000000000000000000, + -0.207784955007898467600689403773245, + -0.405845151377397166906606412076961, + -0.586087235467691130294144838258730, + -0.741531185599394439863864773280788, + -0.864864423359769072789712788640926, + -0.949107912342758524526189684047851, + -0.991455371120812639206854697526329) + + # 7-point weights + w = (0.129484966168869693270611432679082, + 0.279705391489276667901467771423780, + 0.381830050505118944950369775488975, + 0.417959183673469387755102040816327, + 0.381830050505118944950369775488975, + 0.279705391489276667901467771423780, + 0.129484966168869693270611432679082) + + # 15-point weights + v = (0.022935322010529224963732008058970, + 0.063092092629978553290700663189204, + 0.104790010322250183839876322541518, + 0.140653259715525918745189590510238, + 0.169004726639267902826583426598550, + 0.190350578064785409913256402421014, + 0.204432940075298892414161999234649, + 0.209482141084727828012999174891714, + 0.204432940075298892414161999234649, + 0.190350578064785409913256402421014, + 0.169004726639267902826583426598550, + 0.140653259715525918745189590510238, + 0.104790010322250183839876322541518, + 0.063092092629978553290700663189204, + 0.022935322010529224963732008058970) + + return _quadrature_gk(a, b, f, norm_func, x, w, v) + + +_quadrature_gk15.num_eval = 15 diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_quadpack_py.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_quadpack_py.py new file mode 100644 index 0000000000000000000000000000000000000000..af7ed047c0c523608af4c36f1ae9ef71e9e0bfd3 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_quadpack_py.py @@ -0,0 +1,1279 @@ +# Author: Travis Oliphant 2001 +# Author: Nathan Woods 2013 (nquad &c) +import sys +import warnings +from functools import partial + +from . import _quadpack +import numpy as np + +__all__ = ["quad", "dblquad", "tplquad", "nquad", "IntegrationWarning"] + + +class IntegrationWarning(UserWarning): + """ + Warning on issues during integration. + """ + pass + + +def quad(func, a, b, args=(), full_output=0, epsabs=1.49e-8, epsrel=1.49e-8, + limit=50, points=None, weight=None, wvar=None, wopts=None, maxp1=50, + limlst=50, complex_func=False): + """ + Compute a definite integral. + + Integrate func from `a` to `b` (possibly infinite interval) using a + technique from the Fortran library QUADPACK. + + Parameters + ---------- + func : {function, scipy.LowLevelCallable} + A Python function or method to integrate. If `func` takes many + arguments, it is integrated along the axis corresponding to the + first argument. + + If the user desires improved integration performance, then `f` may + be a `scipy.LowLevelCallable` with one of the signatures:: + + double func(double x) + double func(double x, void *user_data) + double func(int n, double *xx) + double func(int n, double *xx, void *user_data) + + The ``user_data`` is the data contained in the `scipy.LowLevelCallable`. + In the call forms with ``xx``, ``n`` is the length of the ``xx`` + array which contains ``xx[0] == x`` and the rest of the items are + numbers contained in the ``args`` argument of quad. + + In addition, certain ctypes call signatures are supported for + backward compatibility, but those should not be used in new code. + a : float + Lower limit of integration (use -numpy.inf for -infinity). + b : float + Upper limit of integration (use numpy.inf for +infinity). + args : tuple, optional + Extra arguments to pass to `func`. + full_output : int, optional + Non-zero to return a dictionary of integration information. + If non-zero, warning messages are also suppressed and the + message is appended to the output tuple. + complex_func : bool, optional + Indicate if the function's (`func`) return type is real + (``complex_func=False``: default) or complex (``complex_func=True``). + In both cases, the function's argument is real. + If full_output is also non-zero, the `infodict`, `message`, and + `explain` for the real and complex components are returned in + a dictionary with keys "real output" and "imag output". + + Returns + ------- + y : float + The integral of func from `a` to `b`. + abserr : float + An estimate of the absolute error in the result. + infodict : dict + A dictionary containing additional information. + message + A convergence message. + explain + Appended only with 'cos' or 'sin' weighting and infinite + integration limits, it contains an explanation of the codes in + infodict['ierlst'] + + Other Parameters + ---------------- + epsabs : float or int, optional + Absolute error tolerance. Default is 1.49e-8. `quad` tries to obtain + an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))`` + where ``i`` = integral of `func` from `a` to `b`, and ``result`` is the + numerical approximation. See `epsrel` below. + epsrel : float or int, optional + Relative error tolerance. Default is 1.49e-8. + If ``epsabs <= 0``, `epsrel` must be greater than both 5e-29 + and ``50 * (machine epsilon)``. See `epsabs` above. + limit : float or int, optional + An upper bound on the number of subintervals used in the adaptive + algorithm. + points : (sequence of floats,ints), optional + A sequence of break points in the bounded integration interval + where local difficulties of the integrand may occur (e.g., + singularities, discontinuities). The sequence does not have + to be sorted. Note that this option cannot be used in conjunction + with ``weight``. + weight : float or int, optional + String indicating weighting function. Full explanation for this + and the remaining arguments can be found below. + wvar : optional + Variables for use with weighting functions. + wopts : optional + Optional input for reusing Chebyshev moments. + maxp1 : float or int, optional + An upper bound on the number of Chebyshev moments. + limlst : int, optional + Upper bound on the number of cycles (>=3) for use with a sinusoidal + weighting and an infinite end-point. + + See Also + -------- + dblquad : double integral + tplquad : triple integral + nquad : n-dimensional integrals (uses `quad` recursively) + fixed_quad : fixed-order Gaussian quadrature + simpson : integrator for sampled data + romb : integrator for sampled data + scipy.special : for coefficients and roots of orthogonal polynomials + + Notes + ----- + For valid results, the integral must converge; behavior for divergent + integrals is not guaranteed. + + **Extra information for quad() inputs and outputs** + + If full_output is non-zero, then the third output argument + (infodict) is a dictionary with entries as tabulated below. For + infinite limits, the range is transformed to (0,1) and the + optional outputs are given with respect to this transformed range. + Let M be the input argument limit and let K be infodict['last']. + The entries are: + + 'neval' + The number of function evaluations. + 'last' + The number, K, of subintervals produced in the subdivision process. + 'alist' + A rank-1 array of length M, the first K elements of which are the + left end points of the subintervals in the partition of the + integration range. + 'blist' + A rank-1 array of length M, the first K elements of which are the + right end points of the subintervals. + 'rlist' + A rank-1 array of length M, the first K elements of which are the + integral approximations on the subintervals. + 'elist' + A rank-1 array of length M, the first K elements of which are the + moduli of the absolute error estimates on the subintervals. + 'iord' + A rank-1 integer array of length M, the first L elements of + which are pointers to the error estimates over the subintervals + with ``L=K`` if ``K<=M/2+2`` or ``L=M+1-K`` otherwise. Let I be the + sequence ``infodict['iord']`` and let E be the sequence + ``infodict['elist']``. Then ``E[I[1]], ..., E[I[L]]`` forms a + decreasing sequence. + + If the input argument points is provided (i.e., it is not None), + the following additional outputs are placed in the output + dictionary. Assume the points sequence is of length P. + + 'pts' + A rank-1 array of length P+2 containing the integration limits + and the break points of the intervals in ascending order. + This is an array giving the subintervals over which integration + will occur. + 'level' + A rank-1 integer array of length M (=limit), containing the + subdivision levels of the subintervals, i.e., if (aa,bb) is a + subinterval of ``(pts[1], pts[2])`` where ``pts[0]`` and ``pts[2]`` + are adjacent elements of ``infodict['pts']``, then (aa,bb) has level l + if ``|bb-aa| = |pts[2]-pts[1]| * 2**(-l)``. + 'ndin' + A rank-1 integer array of length P+2. After the first integration + over the intervals (pts[1], pts[2]), the error estimates over some + of the intervals may have been increased artificially in order to + put their subdivision forward. This array has ones in slots + corresponding to the subintervals for which this happens. + + **Weighting the integrand** + + The input variables, *weight* and *wvar*, are used to weight the + integrand by a select list of functions. Different integration + methods are used to compute the integral with these weighting + functions, and these do not support specifying break points. The + possible values of weight and the corresponding weighting functions are. + + ========== =================================== ===================== + ``weight`` Weight function used ``wvar`` + ========== =================================== ===================== + 'cos' cos(w*x) wvar = w + 'sin' sin(w*x) wvar = w + 'alg' g(x) = ((x-a)**alpha)*((b-x)**beta) wvar = (alpha, beta) + 'alg-loga' g(x)*log(x-a) wvar = (alpha, beta) + 'alg-logb' g(x)*log(b-x) wvar = (alpha, beta) + 'alg-log' g(x)*log(x-a)*log(b-x) wvar = (alpha, beta) + 'cauchy' 1/(x-c) wvar = c + ========== =================================== ===================== + + wvar holds the parameter w, (alpha, beta), or c depending on the weight + selected. In these expressions, a and b are the integration limits. + + For the 'cos' and 'sin' weighting, additional inputs and outputs are + available. + + For finite integration limits, the integration is performed using a + Clenshaw-Curtis method which uses Chebyshev moments. For repeated + calculations, these moments are saved in the output dictionary: + + 'momcom' + The maximum level of Chebyshev moments that have been computed, + i.e., if ``M_c`` is ``infodict['momcom']`` then the moments have been + computed for intervals of length ``|b-a| * 2**(-l)``, + ``l=0,1,...,M_c``. + 'nnlog' + A rank-1 integer array of length M(=limit), containing the + subdivision levels of the subintervals, i.e., an element of this + array is equal to l if the corresponding subinterval is + ``|b-a|* 2**(-l)``. + 'chebmo' + A rank-2 array of shape (25, maxp1) containing the computed + Chebyshev moments. These can be passed on to an integration + over the same interval by passing this array as the second + element of the sequence wopts and passing infodict['momcom'] as + the first element. + + If one of the integration limits is infinite, then a Fourier integral is + computed (assuming w neq 0). If full_output is 1 and a numerical error + is encountered, besides the error message attached to the output tuple, + a dictionary is also appended to the output tuple which translates the + error codes in the array ``info['ierlst']`` to English messages. The + output information dictionary contains the following entries instead of + 'last', 'alist', 'blist', 'rlist', and 'elist': + + 'lst' + The number of subintervals needed for the integration (call it ``K_f``). + 'rslst' + A rank-1 array of length M_f=limlst, whose first ``K_f`` elements + contain the integral contribution over the interval + ``(a+(k-1)c, a+kc)`` where ``c = (2*floor(|w|) + 1) * pi / |w|`` + and ``k=1,2,...,K_f``. + 'erlst' + A rank-1 array of length ``M_f`` containing the error estimate + corresponding to the interval in the same position in + ``infodict['rslist']``. + 'ierlst' + A rank-1 integer array of length ``M_f`` containing an error flag + corresponding to the interval in the same position in + ``infodict['rslist']``. See the explanation dictionary (last entry + in the output tuple) for the meaning of the codes. + + + **Details of QUADPACK level routines** + + `quad` calls routines from the FORTRAN library QUADPACK. This section + provides details on the conditions for each routine to be called and a + short description of each routine. The routine called depends on + `weight`, `points` and the integration limits `a` and `b`. + + ================ ============== ========== ===================== + QUADPACK routine `weight` `points` infinite bounds + ================ ============== ========== ===================== + qagse None No No + qagie None No Yes + qagpe None Yes No + qawoe 'sin', 'cos' No No + qawfe 'sin', 'cos' No either `a` or `b` + qawse 'alg*' No No + qawce 'cauchy' No No + ================ ============== ========== ===================== + + The following provides a short description from [1]_ for each + routine. + + qagse + is an integrator based on globally adaptive interval + subdivision in connection with extrapolation, which will + eliminate the effects of integrand singularities of + several types. + qagie + handles integration over infinite intervals. The infinite range is + mapped onto a finite interval and subsequently the same strategy as + in ``QAGS`` is applied. + qagpe + serves the same purposes as QAGS, but also allows the + user to provide explicit information about the location + and type of trouble-spots i.e. the abscissae of internal + singularities, discontinuities and other difficulties of + the integrand function. + qawoe + is an integrator for the evaluation of + :math:`\\int^b_a \\cos(\\omega x)f(x)dx` or + :math:`\\int^b_a \\sin(\\omega x)f(x)dx` + over a finite interval [a,b], where :math:`\\omega` and :math:`f` + are specified by the user. The rule evaluation component is based + on the modified Clenshaw-Curtis technique + + An adaptive subdivision scheme is used in connection + with an extrapolation procedure, which is a modification + of that in ``QAGS`` and allows the algorithm to deal with + singularities in :math:`f(x)`. + qawfe + calculates the Fourier transform + :math:`\\int^\\infty_a \\cos(\\omega x)f(x)dx` or + :math:`\\int^\\infty_a \\sin(\\omega x)f(x)dx` + for user-provided :math:`\\omega` and :math:`f`. The procedure of + ``QAWO`` is applied on successive finite intervals, and convergence + acceleration by means of the :math:`\\varepsilon`-algorithm is applied + to the series of integral approximations. + qawse + approximate :math:`\\int^b_a w(x)f(x)dx`, with :math:`a < b` where + :math:`w(x) = (x-a)^{\\alpha}(b-x)^{\\beta}v(x)` with + :math:`\\alpha,\\beta > -1`, where :math:`v(x)` may be one of the + following functions: :math:`1`, :math:`\\log(x-a)`, :math:`\\log(b-x)`, + :math:`\\log(x-a)\\log(b-x)`. + + The user specifies :math:`\\alpha`, :math:`\\beta` and the type of the + function :math:`v`. A globally adaptive subdivision strategy is + applied, with modified Clenshaw-Curtis integration on those + subintervals which contain `a` or `b`. + qawce + compute :math:`\\int^b_a f(x) / (x-c)dx` where the integral must be + interpreted as a Cauchy principal value integral, for user specified + :math:`c` and :math:`f`. The strategy is globally adaptive. Modified + Clenshaw-Curtis integration is used on those intervals containing the + point :math:`x = c`. + + **Integration of Complex Function of a Real Variable** + + A complex valued function, :math:`f`, of a real variable can be written as + :math:`f = g + ih`. Similarly, the integral of :math:`f` can be + written as + + .. math:: + \\int_a^b f(x) dx = \\int_a^b g(x) dx + i\\int_a^b h(x) dx + + assuming that the integrals of :math:`g` and :math:`h` exist + over the interval :math:`[a,b]` [2]_. Therefore, ``quad`` integrates + complex-valued functions by integrating the real and imaginary components + separately. + + + References + ---------- + + .. [1] Piessens, Robert; de Doncker-Kapenga, Elise; + Überhuber, Christoph W.; Kahaner, David (1983). + QUADPACK: A subroutine package for automatic integration. + Springer-Verlag. + ISBN 978-3-540-12553-2. + + .. [2] McCullough, Thomas; Phillips, Keith (1973). + Foundations of Analysis in the Complex Plane. + Holt Rinehart Winston. + ISBN 0-03-086370-8 + + Examples + -------- + Calculate :math:`\\int^4_0 x^2 dx` and compare with an analytic result + + >>> from scipy import integrate + >>> import numpy as np + >>> x2 = lambda x: x**2 + >>> integrate.quad(x2, 0, 4) + (21.333333333333332, 2.3684757858670003e-13) + >>> print(4**3 / 3.) # analytical result + 21.3333333333 + + Calculate :math:`\\int^\\infty_0 e^{-x} dx` + + >>> invexp = lambda x: np.exp(-x) + >>> integrate.quad(invexp, 0, np.inf) + (1.0, 5.842605999138044e-11) + + Calculate :math:`\\int^1_0 a x \\,dx` for :math:`a = 1, 3` + + >>> f = lambda x, a: a*x + >>> y, err = integrate.quad(f, 0, 1, args=(1,)) + >>> y + 0.5 + >>> y, err = integrate.quad(f, 0, 1, args=(3,)) + >>> y + 1.5 + + Calculate :math:`\\int^1_0 x^2 + y^2 dx` with ctypes, holding + y parameter as 1:: + + testlib.c => + double func(int n, double args[n]){ + return args[0]*args[0] + args[1]*args[1];} + compile to library testlib.* + + :: + + from scipy import integrate + import ctypes + lib = ctypes.CDLL('/home/.../testlib.*') #use absolute path + lib.func.restype = ctypes.c_double + lib.func.argtypes = (ctypes.c_int,ctypes.c_double) + integrate.quad(lib.func,0,1,(1)) + #(1.3333333333333333, 1.4802973661668752e-14) + print((1.0**3/3.0 + 1.0) - (0.0**3/3.0 + 0.0)) #Analytic result + # 1.3333333333333333 + + Be aware that pulse shapes and other sharp features as compared to the + size of the integration interval may not be integrated correctly using + this method. A simplified example of this limitation is integrating a + y-axis reflected step function with many zero values within the integrals + bounds. + + >>> y = lambda x: 1 if x<=0 else 0 + >>> integrate.quad(y, -1, 1) + (1.0, 1.1102230246251565e-14) + >>> integrate.quad(y, -1, 100) + (1.0000000002199108, 1.0189464580163188e-08) + >>> integrate.quad(y, -1, 10000) + (0.0, 0.0) + + """ + if not isinstance(args, tuple): + args = (args,) + + # check the limits of integration: \int_a^b, expect a < b + flip, a, b = b < a, min(a, b), max(a, b) + + if complex_func: + def imfunc(x, *args): + return func(x, *args).imag + + def refunc(x, *args): + return func(x, *args).real + + re_retval = quad(refunc, a, b, args, full_output, epsabs, + epsrel, limit, points, weight, wvar, wopts, + maxp1, limlst, complex_func=False) + im_retval = quad(imfunc, a, b, args, full_output, epsabs, + epsrel, limit, points, weight, wvar, wopts, + maxp1, limlst, complex_func=False) + integral = re_retval[0] + 1j*im_retval[0] + error_estimate = re_retval[1] + 1j*im_retval[1] + retval = integral, error_estimate + if full_output: + msgexp = {} + msgexp["real"] = re_retval[2:] + msgexp["imag"] = im_retval[2:] + retval = retval + (msgexp,) + + return retval + + if weight is None: + retval = _quad(func, a, b, args, full_output, epsabs, epsrel, limit, + points) + else: + if points is not None: + msg = ("Break points cannot be specified when using weighted integrand.\n" + "Continuing, ignoring specified points.") + warnings.warn(msg, IntegrationWarning, stacklevel=2) + retval = _quad_weight(func, a, b, args, full_output, epsabs, epsrel, + limlst, limit, maxp1, weight, wvar, wopts) + + if flip: + retval = (-retval[0],) + retval[1:] + + ier = retval[-1] + if ier == 0: + return retval[:-1] + + msgs = {80: "A Python error occurred possibly while calling the function.", + 1: f"The maximum number of subdivisions ({limit}) has been achieved.\n " + f"If increasing the limit yields no improvement it is advised to " + f"analyze \n the integrand in order to determine the difficulties. " + f"If the position of a \n local difficulty can be determined " + f"(singularity, discontinuity) one will \n probably gain from " + f"splitting up the interval and calling the integrator \n on the " + f"subranges. Perhaps a special-purpose integrator should be used.", + 2: "The occurrence of roundoff error is detected, which prevents \n " + "the requested tolerance from being achieved. " + "The error may be \n underestimated.", + 3: "Extremely bad integrand behavior occurs at some points of the\n " + "integration interval.", + 4: "The algorithm does not converge. Roundoff error is detected\n " + "in the extrapolation table. It is assumed that the requested " + "tolerance\n cannot be achieved, and that the returned result " + "(if full_output = 1) is \n the best which can be obtained.", + 5: "The integral is probably divergent, or slowly convergent.", + 6: "The input is invalid.", + 7: "Abnormal termination of the routine. The estimates for result\n " + "and error are less reliable. It is assumed that the requested " + "accuracy\n has not been achieved.", + 'unknown': "Unknown error."} + + if weight in ['cos','sin'] and (b == np.inf or a == -np.inf): + msgs[1] = ( + "The maximum number of cycles allowed has been achieved., e.e.\n of " + "subintervals (a+(k-1)c, a+kc) where c = (2*int(abs(omega)+1))\n " + "*pi/abs(omega), for k = 1, 2, ..., lst. " + "One can allow more cycles by increasing the value of limlst. " + "Look at info['ierlst'] with full_output=1." + ) + msgs[4] = ( + "The extrapolation table constructed for convergence acceleration\n of " + "the series formed by the integral contributions over the cycles, \n does " + "not converge to within the requested accuracy. " + "Look at \n info['ierlst'] with full_output=1." + ) + msgs[7] = ( + "Bad integrand behavior occurs within one or more of the cycles.\n " + "Location and type of the difficulty involved can be determined from \n " + "the vector info['ierlist'] obtained with full_output=1." + ) + explain = {1: "The maximum number of subdivisions (= limit) has been \n " + "achieved on this cycle.", + 2: "The occurrence of roundoff error is detected and prevents\n " + "the tolerance imposed on this cycle from being achieved.", + 3: "Extremely bad integrand behavior occurs at some points of\n " + "this cycle.", + 4: "The integral over this cycle does not converge (to within the " + "required accuracy) due to roundoff in the extrapolation " + "procedure invoked on this cycle. It is assumed that the result " + "on this interval is the best which can be obtained.", + 5: "The integral over this cycle is probably divergent or " + "slowly convergent."} + + try: + msg = msgs[ier] + except KeyError: + msg = msgs['unknown'] + + if ier in [1,2,3,4,5,7]: + if full_output: + if weight in ['cos', 'sin'] and (b == np.inf or a == -np.inf): + return retval[:-1] + (msg, explain) + else: + return retval[:-1] + (msg,) + else: + warnings.warn(msg, IntegrationWarning, stacklevel=2) + return retval[:-1] + + elif ier == 6: # Forensic decision tree when QUADPACK throws ier=6 + if epsabs <= 0: # Small error tolerance - applies to all methods + if epsrel < max(50 * sys.float_info.epsilon, 5e-29): + msg = ("If 'epsabs'<=0, 'epsrel' must be greater than both" + " 5e-29 and 50*(machine epsilon).") + elif weight in ['sin', 'cos'] and (abs(a) + abs(b) == np.inf): + msg = ("Sine or cosine weighted integrals with infinite domain" + " must have 'epsabs'>0.") + + elif weight is None: + if points is None: # QAGSE/QAGIE + msg = ("Invalid 'limit' argument. There must be" + " at least one subinterval") + else: # QAGPE + if not (min(a, b) <= min(points) <= max(points) <= max(a, b)): + msg = ("All break points in 'points' must lie within the" + " integration limits.") + elif len(points) >= limit: + msg = (f"Number of break points ({len(points):d}) " + f"must be less than subinterval limit ({limit:d})") + + else: + if maxp1 < 1: + msg = "Chebyshev moment limit maxp1 must be >=1." + + elif weight in ('cos', 'sin') and abs(a+b) == np.inf: # QAWFE + msg = "Cycle limit limlst must be >=3." + + elif weight.startswith('alg'): # QAWSE + if min(wvar) < -1: + msg = "wvar parameters (alpha, beta) must both be >= -1." + if b < a: + msg = "Integration limits a, b must satistfy a>> import numpy as np + >>> from scipy import integrate + >>> f = lambda y, x: x*y**2 + >>> integrate.dblquad(f, 0, 2, 0, 1) + (0.6666666666666667, 7.401486830834377e-15) + + Calculate :math:`\\int^{x=\\pi/4}_{x=0} \\int^{y=\\cos(x)}_{y=\\sin(x)} 1 + \\,dy \\,dx`. + + >>> f = lambda y, x: 1 + >>> integrate.dblquad(f, 0, np.pi/4, np.sin, np.cos) + (0.41421356237309503, 1.1083280054755938e-14) + + Calculate :math:`\\int^{x=1}_{x=0} \\int^{y=2-x}_{y=x} a x y \\,dy \\,dx` + for :math:`a=1, 3`. + + >>> f = lambda y, x, a: a*x*y + >>> integrate.dblquad(f, 0, 1, lambda x: x, lambda x: 2-x, args=(1,)) + (0.33333333333333337, 5.551115123125783e-15) + >>> integrate.dblquad(f, 0, 1, lambda x: x, lambda x: 2-x, args=(3,)) + (0.9999999999999999, 1.6653345369377348e-14) + + Compute the two-dimensional Gaussian Integral, which is the integral of the + Gaussian function :math:`f(x,y) = e^{-(x^{2} + y^{2})}`, over + :math:`(-\\infty,+\\infty)`. That is, compute the integral + :math:`\\iint^{+\\infty}_{-\\infty} e^{-(x^{2} + y^{2})} \\,dy\\,dx`. + + >>> f = lambda x, y: np.exp(-(x ** 2 + y ** 2)) + >>> integrate.dblquad(f, -np.inf, np.inf, -np.inf, np.inf) + (3.141592653589777, 2.5173086737433208e-08) + + """ + + def temp_ranges(*args): + return [gfun(args[0]) if callable(gfun) else gfun, + hfun(args[0]) if callable(hfun) else hfun] + + return nquad(func, [temp_ranges, [a, b]], args=args, + opts={"epsabs": epsabs, "epsrel": epsrel}) + + +def tplquad(func, a, b, gfun, hfun, qfun, rfun, args=(), epsabs=1.49e-8, + epsrel=1.49e-8): + """ + Compute a triple (definite) integral. + + Return the triple integral of ``func(z, y, x)`` from ``x = a..b``, + ``y = gfun(x)..hfun(x)``, and ``z = qfun(x,y)..rfun(x,y)``. + + Parameters + ---------- + func : function + A Python function or method of at least three variables in the + order (z, y, x). + a, b : float + The limits of integration in x: `a` < `b` + gfun : function or float + The lower boundary curve in y which is a function taking a single + floating point argument (x) and returning a floating point result + or a float indicating a constant boundary curve. + hfun : function or float + The upper boundary curve in y (same requirements as `gfun`). + qfun : function or float + The lower boundary surface in z. It must be a function that takes + two floats in the order (x, y) and returns a float or a float + indicating a constant boundary surface. + rfun : function or float + The upper boundary surface in z. (Same requirements as `qfun`.) + args : tuple, optional + Extra arguments to pass to `func`. + epsabs : float, optional + Absolute tolerance passed directly to the innermost 1-D quadrature + integration. Default is 1.49e-8. + epsrel : float, optional + Relative tolerance of the innermost 1-D integrals. Default is 1.49e-8. + + Returns + ------- + y : float + The resultant integral. + abserr : float + An estimate of the error. + + See Also + -------- + quad : Adaptive quadrature using QUADPACK + fixed_quad : Fixed-order Gaussian quadrature + dblquad : Double integrals + nquad : N-dimensional integrals + romb : Integrators for sampled data + simpson : Integrators for sampled data + scipy.special : For coefficients and roots of orthogonal polynomials + + Notes + ----- + For valid results, the integral must converge; behavior for divergent + integrals is not guaranteed. + + **Details of QUADPACK level routines** + + `quad` calls routines from the FORTRAN library QUADPACK. This section + provides details on the conditions for each routine to be called and a + short description of each routine. For each level of integration, ``qagse`` + is used for finite limits or ``qagie`` is used, if either limit (or both!) + are infinite. The following provides a short description from [1]_ for each + routine. + + qagse + is an integrator based on globally adaptive interval + subdivision in connection with extrapolation, which will + eliminate the effects of integrand singularities of + several types. + qagie + handles integration over infinite intervals. The infinite range is + mapped onto a finite interval and subsequently the same strategy as + in ``QAGS`` is applied. + + References + ---------- + + .. [1] Piessens, Robert; de Doncker-Kapenga, Elise; + Überhuber, Christoph W.; Kahaner, David (1983). + QUADPACK: A subroutine package for automatic integration. + Springer-Verlag. + ISBN 978-3-540-12553-2. + + Examples + -------- + Compute the triple integral of ``x * y * z``, over ``x`` ranging + from 1 to 2, ``y`` ranging from 2 to 3, ``z`` ranging from 0 to 1. + That is, :math:`\\int^{x=2}_{x=1} \\int^{y=3}_{y=2} \\int^{z=1}_{z=0} x y z + \\,dz \\,dy \\,dx`. + + >>> import numpy as np + >>> from scipy import integrate + >>> f = lambda z, y, x: x*y*z + >>> integrate.tplquad(f, 1, 2, 2, 3, 0, 1) + (1.8749999999999998, 3.3246447942574074e-14) + + Calculate :math:`\\int^{x=1}_{x=0} \\int^{y=1-2x}_{y=0} + \\int^{z=1-x-2y}_{z=0} x y z \\,dz \\,dy \\,dx`. + Note: `qfun`/`rfun` takes arguments in the order (x, y), even though ``f`` + takes arguments in the order (z, y, x). + + >>> f = lambda z, y, x: x*y*z + >>> integrate.tplquad(f, 0, 1, 0, lambda x: 1-2*x, 0, lambda x, y: 1-x-2*y) + (0.05416666666666668, 2.1774196738157757e-14) + + Calculate :math:`\\int^{x=1}_{x=0} \\int^{y=1}_{y=0} \\int^{z=1}_{z=0} + a x y z \\,dz \\,dy \\,dx` for :math:`a=1, 3`. + + >>> f = lambda z, y, x, a: a*x*y*z + >>> integrate.tplquad(f, 0, 1, 0, 1, 0, 1, args=(1,)) + (0.125, 5.527033708952211e-15) + >>> integrate.tplquad(f, 0, 1, 0, 1, 0, 1, args=(3,)) + (0.375, 1.6581101126856635e-14) + + Compute the three-dimensional Gaussian Integral, which is the integral of + the Gaussian function :math:`f(x,y,z) = e^{-(x^{2} + y^{2} + z^{2})}`, over + :math:`(-\\infty,+\\infty)`. That is, compute the integral + :math:`\\iiint^{+\\infty}_{-\\infty} e^{-(x^{2} + y^{2} + z^{2})} \\,dz + \\,dy\\,dx`. + + >>> f = lambda x, y, z: np.exp(-(x ** 2 + y ** 2 + z ** 2)) + >>> integrate.tplquad(f, -np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf) + (5.568327996830833, 4.4619078828029765e-08) + + """ + # f(z, y, x) + # qfun/rfun(x, y) + # gfun/hfun(x) + # nquad will hand (y, x, t0, ...) to ranges0 + # nquad will hand (x, t0, ...) to ranges1 + # Only qfun / rfun is different API... + + def ranges0(*args): + return [qfun(args[1], args[0]) if callable(qfun) else qfun, + rfun(args[1], args[0]) if callable(rfun) else rfun] + + def ranges1(*args): + return [gfun(args[0]) if callable(gfun) else gfun, + hfun(args[0]) if callable(hfun) else hfun] + + ranges = [ranges0, ranges1, [a, b]] + return nquad(func, ranges, args=args, + opts={"epsabs": epsabs, "epsrel": epsrel}) + + +def nquad(func, ranges, args=None, opts=None, full_output=False): + r""" + Integration over multiple variables. + + Wraps `quad` to enable integration over multiple variables. + Various options allow improved integration of discontinuous functions, as + well as the use of weighted integration, and generally finer control of the + integration process. + + Parameters + ---------- + func : {callable, scipy.LowLevelCallable} + The function to be integrated. Has arguments of ``x0, ... xn``, + ``t0, ... tm``, where integration is carried out over ``x0, ... xn``, + which must be floats. Where ``t0, ... tm`` are extra arguments + passed in args. + Function signature should be ``func(x0, x1, ..., xn, t0, t1, ..., tm)``. + Integration is carried out in order. That is, integration over ``x0`` + is the innermost integral, and ``xn`` is the outermost. + + If the user desires improved integration performance, then `f` may + be a `scipy.LowLevelCallable` with one of the signatures:: + + double func(int n, double *xx) + double func(int n, double *xx, void *user_data) + + where ``n`` is the number of variables and args. The ``xx`` array + contains the coordinates and extra arguments. ``user_data`` is the data + contained in the `scipy.LowLevelCallable`. + ranges : iterable object + Each element of ranges may be either a sequence of 2 numbers, or else + a callable that returns such a sequence. ``ranges[0]`` corresponds to + integration over x0, and so on. If an element of ranges is a callable, + then it will be called with all of the integration arguments available, + as well as any parametric arguments. e.g., if + ``func = f(x0, x1, x2, t0, t1)``, then ``ranges[0]`` may be defined as + either ``(a, b)`` or else as ``(a, b) = range0(x1, x2, t0, t1)``. + args : iterable object, optional + Additional arguments ``t0, ... tn``, required by ``func``, ``ranges``, + and ``opts``. + opts : iterable object or dict, optional + Options to be passed to `quad`. May be empty, a dict, or + a sequence of dicts or functions that return a dict. If empty, the + default options from scipy.integrate.quad are used. If a dict, the same + options are used for all levels of integraion. If a sequence, then each + element of the sequence corresponds to a particular integration. e.g., + ``opts[0]`` corresponds to integration over ``x0``, and so on. If a + callable, the signature must be the same as for ``ranges``. The + available options together with their default values are: + + - epsabs = 1.49e-08 + - epsrel = 1.49e-08 + - limit = 50 + - points = None + - weight = None + - wvar = None + - wopts = None + + For more information on these options, see `quad`. + + full_output : bool, optional + Partial implementation of ``full_output`` from scipy.integrate.quad. + The number of integrand function evaluations ``neval`` can be obtained + by setting ``full_output=True`` when calling nquad. + + Returns + ------- + result : float + The result of the integration. + abserr : float + The maximum of the estimates of the absolute error in the various + integration results. + out_dict : dict, optional + A dict containing additional information on the integration. + + See Also + -------- + quad : 1-D numerical integration + dblquad, tplquad : double and triple integrals + fixed_quad : fixed-order Gaussian quadrature + + Notes + ----- + For valid results, the integral must converge; behavior for divergent + integrals is not guaranteed. + + **Details of QUADPACK level routines** + + `nquad` calls routines from the FORTRAN library QUADPACK. This section + provides details on the conditions for each routine to be called and a + short description of each routine. The routine called depends on + `weight`, `points` and the integration limits `a` and `b`. + + ================ ============== ========== ===================== + QUADPACK routine `weight` `points` infinite bounds + ================ ============== ========== ===================== + qagse None No No + qagie None No Yes + qagpe None Yes No + qawoe 'sin', 'cos' No No + qawfe 'sin', 'cos' No either `a` or `b` + qawse 'alg*' No No + qawce 'cauchy' No No + ================ ============== ========== ===================== + + The following provides a short description from [1]_ for each + routine. + + qagse + is an integrator based on globally adaptive interval + subdivision in connection with extrapolation, which will + eliminate the effects of integrand singularities of + several types. + qagie + handles integration over infinite intervals. The infinite range is + mapped onto a finite interval and subsequently the same strategy as + in ``QAGS`` is applied. + qagpe + serves the same purposes as QAGS, but also allows the + user to provide explicit information about the location + and type of trouble-spots i.e. the abscissae of internal + singularities, discontinuities and other difficulties of + the integrand function. + qawoe + is an integrator for the evaluation of + :math:`\int^b_a \cos(\omega x)f(x)dx` or + :math:`\int^b_a \sin(\omega x)f(x)dx` + over a finite interval [a,b], where :math:`\omega` and :math:`f` + are specified by the user. The rule evaluation component is based + on the modified Clenshaw-Curtis technique + + An adaptive subdivision scheme is used in connection + with an extrapolation procedure, which is a modification + of that in ``QAGS`` and allows the algorithm to deal with + singularities in :math:`f(x)`. + qawfe + calculates the Fourier transform + :math:`\int^\infty_a \cos(\omega x)f(x)dx` or + :math:`\int^\infty_a \sin(\omega x)f(x)dx` + for user-provided :math:`\omega` and :math:`f`. The procedure of + ``QAWO`` is applied on successive finite intervals, and convergence + acceleration by means of the :math:`\varepsilon`-algorithm is applied + to the series of integral approximations. + qawse + approximate :math:`\int^b_a w(x)f(x)dx`, with :math:`a < b` where + :math:`w(x) = (x-a)^{\alpha}(b-x)^{\beta}v(x)` with + :math:`\alpha,\beta > -1`, where :math:`v(x)` may be one of the + following functions: :math:`1`, :math:`\log(x-a)`, :math:`\log(b-x)`, + :math:`\log(x-a)\log(b-x)`. + + The user specifies :math:`\alpha`, :math:`\beta` and the type of the + function :math:`v`. A globally adaptive subdivision strategy is + applied, with modified Clenshaw-Curtis integration on those + subintervals which contain `a` or `b`. + qawce + compute :math:`\int^b_a f(x) / (x-c)dx` where the integral must be + interpreted as a Cauchy principal value integral, for user specified + :math:`c` and :math:`f`. The strategy is globally adaptive. Modified + Clenshaw-Curtis integration is used on those intervals containing the + point :math:`x = c`. + + References + ---------- + + .. [1] Piessens, Robert; de Doncker-Kapenga, Elise; + Überhuber, Christoph W.; Kahaner, David (1983). + QUADPACK: A subroutine package for automatic integration. + Springer-Verlag. + ISBN 978-3-540-12553-2. + + Examples + -------- + Calculate + + .. math:: + + \int^{1}_{-0.15} \int^{0.8}_{0.13} \int^{1}_{-1} \int^{1}_{0} + f(x_0, x_1, x_2, x_3) \,dx_0 \,dx_1 \,dx_2 \,dx_3 , + + where + + .. math:: + + f(x_0, x_1, x_2, x_3) = \begin{cases} + x_0^2+x_1 x_2-x_3^3+ \sin{x_0}+1 & (x_0-0.2 x_3-0.5-0.25 x_1 > 0) \\ + x_0^2+x_1 x_2-x_3^3+ \sin{x_0}+0 & (x_0-0.2 x_3-0.5-0.25 x_1 \leq 0) + \end{cases} . + + >>> import numpy as np + >>> from scipy import integrate + >>> func = lambda x0,x1,x2,x3 : x0**2 + x1*x2 - x3**3 + np.sin(x0) + ( + ... 1 if (x0-.2*x3-.5-.25*x1>0) else 0) + >>> def opts0(*args, **kwargs): + ... return {'points':[0.2*args[2] + 0.5 + 0.25*args[0]]} + >>> integrate.nquad(func, [[0,1], [-1,1], [.13,.8], [-.15,1]], + ... opts=[opts0,{},{},{}], full_output=True) + (1.5267454070738633, 2.9437360001402324e-14, {'neval': 388962}) + + Calculate + + .. math:: + + \int^{t_0+t_1+1}_{t_0+t_1-1} + \int^{x_2+t_0^2 t_1^3+1}_{x_2+t_0^2 t_1^3-1} + \int^{t_0 x_1+t_1 x_2+1}_{t_0 x_1+t_1 x_2-1} + f(x_0,x_1, x_2,t_0,t_1) + \,dx_0 \,dx_1 \,dx_2, + + where + + .. math:: + + f(x_0, x_1, x_2, t_0, t_1) = \begin{cases} + x_0 x_2^2 + \sin{x_1}+2 & (x_0+t_1 x_1-t_0 > 0) \\ + x_0 x_2^2 +\sin{x_1}+1 & (x_0+t_1 x_1-t_0 \leq 0) + \end{cases} + + and :math:`(t_0, t_1) = (0, 1)` . + + >>> def func2(x0, x1, x2, t0, t1): + ... return x0*x2**2 + np.sin(x1) + 1 + (1 if x0+t1*x1-t0>0 else 0) + >>> def lim0(x1, x2, t0, t1): + ... return [t0*x1 + t1*x2 - 1, t0*x1 + t1*x2 + 1] + >>> def lim1(x2, t0, t1): + ... return [x2 + t0**2*t1**3 - 1, x2 + t0**2*t1**3 + 1] + >>> def lim2(t0, t1): + ... return [t0 + t1 - 1, t0 + t1 + 1] + >>> def opts0(x1, x2, t0, t1): + ... return {'points' : [t0 - t1*x1]} + >>> def opts1(x2, t0, t1): + ... return {} + >>> def opts2(t0, t1): + ... return {} + >>> integrate.nquad(func2, [lim0, lim1, lim2], args=(0,1), + ... opts=[opts0, opts1, opts2]) + (36.099919226771625, 1.8546948553373528e-07) + + """ + depth = len(ranges) + ranges = [rng if callable(rng) else _RangeFunc(rng) for rng in ranges] + if args is None: + args = () + if opts is None: + opts = [dict([])] * depth + + if isinstance(opts, dict): + opts = [_OptFunc(opts)] * depth + else: + opts = [opt if callable(opt) else _OptFunc(opt) for opt in opts] + return _NQuad(func, ranges, opts, full_output).integrate(*args) + + +class _RangeFunc: + def __init__(self, range_): + self.range_ = range_ + + def __call__(self, *args): + """Return stored value. + + *args needed because range_ can be float or func, and is called with + variable number of parameters. + """ + return self.range_ + + +class _OptFunc: + def __init__(self, opt): + self.opt = opt + + def __call__(self, *args): + """Return stored dict.""" + return self.opt + + +class _NQuad: + def __init__(self, func, ranges, opts, full_output): + self.abserr = 0 + self.func = func + self.ranges = ranges + self.opts = opts + self.maxdepth = len(ranges) + self.full_output = full_output + if self.full_output: + self.out_dict = {'neval': 0} + + def integrate(self, *args, **kwargs): + depth = kwargs.pop('depth', 0) + if kwargs: + raise ValueError('unexpected kwargs') + + # Get the integration range and options for this depth. + ind = -(depth + 1) + fn_range = self.ranges[ind] + low, high = fn_range(*args) + fn_opt = self.opts[ind] + opt = dict(fn_opt(*args)) + + if 'points' in opt: + opt['points'] = [x for x in opt['points'] if low <= x <= high] + if depth + 1 == self.maxdepth: + f = self.func + else: + f = partial(self.integrate, depth=depth+1) + quad_r = quad(f, low, high, args=args, full_output=self.full_output, + **opt) + value = quad_r[0] + abserr = quad_r[1] + if self.full_output: + infodict = quad_r[2] + # The 'neval' parameter in full_output returns the total + # number of times the integrand function was evaluated. + # Therefore, only the innermost integration loop counts. + if depth + 1 == self.maxdepth: + self.out_dict['neval'] += infodict['neval'] + self.abserr = max(self.abserr, abserr) + if depth > 0: + return value + else: + # Final result of N-D integration with error + if self.full_output: + return value, self.abserr, self.out_dict + else: + return value, self.abserr diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_quadrature.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_quadrature.py new file mode 100644 index 0000000000000000000000000000000000000000..7fe4ef9424eb1acb0e488f5600b2d83f8ca99090 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_quadrature.py @@ -0,0 +1,1684 @@ +from __future__ import annotations +from typing import TYPE_CHECKING, Callable, Any, cast +import numpy as np +import numpy.typing as npt +import math +import warnings +from collections import namedtuple + +from scipy.special import roots_legendre +from scipy.special import gammaln, logsumexp +from scipy._lib._util import _rng_spawn +from scipy._lib.deprecation import _deprecated + + +__all__ = ['fixed_quad', 'quadrature', 'romberg', 'romb', + 'trapezoid', 'simpson', + 'cumulative_trapezoid', 'newton_cotes', + 'qmc_quad', 'AccuracyWarning', 'cumulative_simpson'] + + +def trapezoid(y, x=None, dx=1.0, axis=-1): + r""" + Integrate along the given axis using the composite trapezoidal rule. + + If `x` is provided, the integration happens in sequence along its + elements - they are not sorted. + + Integrate `y` (`x`) along each 1d slice on the given axis, compute + :math:`\int y(x) dx`. + When `x` is specified, this integrates along the parametric curve, + computing :math:`\int_t y(t) dt = + \int_t y(t) \left.\frac{dx}{dt}\right|_{x=x(t)} dt`. + + Parameters + ---------- + y : array_like + Input array to integrate. + x : array_like, optional + The sample points corresponding to the `y` values. If `x` is None, + the sample points are assumed to be evenly spaced `dx` apart. The + default is None. + dx : scalar, optional + The spacing between sample points when `x` is None. The default is 1. + axis : int, optional + The axis along which to integrate. + + Returns + ------- + trapezoid : float or ndarray + Definite integral of `y` = n-dimensional array as approximated along + a single axis by the trapezoidal rule. If `y` is a 1-dimensional array, + then the result is a float. If `n` is greater than 1, then the result + is an `n`-1 dimensional array. + + See Also + -------- + cumulative_trapezoid, simpson, romb + + Notes + ----- + Image [2]_ illustrates trapezoidal rule -- y-axis locations of points + will be taken from `y` array, by default x-axis distances between + points will be 1.0, alternatively they can be provided with `x` array + or with `dx` scalar. Return value will be equal to combined area under + the red lines. + + References + ---------- + .. [1] Wikipedia page: https://en.wikipedia.org/wiki/Trapezoidal_rule + + .. [2] Illustration image: + https://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png + + Examples + -------- + Use the trapezoidal rule on evenly spaced points: + + >>> import numpy as np + >>> from scipy import integrate + >>> integrate.trapezoid([1, 2, 3]) + 4.0 + + The spacing between sample points can be selected by either the + ``x`` or ``dx`` arguments: + + >>> integrate.trapezoid([1, 2, 3], x=[4, 6, 8]) + 8.0 + >>> integrate.trapezoid([1, 2, 3], dx=2) + 8.0 + + Using a decreasing ``x`` corresponds to integrating in reverse: + + >>> integrate.trapezoid([1, 2, 3], x=[8, 6, 4]) + -8.0 + + More generally ``x`` is used to integrate along a parametric curve. We can + estimate the integral :math:`\int_0^1 x^2 = 1/3` using: + + >>> x = np.linspace(0, 1, num=50) + >>> y = x**2 + >>> integrate.trapezoid(y, x) + 0.33340274885464394 + + Or estimate the area of a circle, noting we repeat the sample which closes + the curve: + + >>> theta = np.linspace(0, 2 * np.pi, num=1000, endpoint=True) + >>> integrate.trapezoid(np.cos(theta), x=np.sin(theta)) + 3.141571941375841 + + ``trapezoid`` can be applied along a specified axis to do multiple + computations in one call: + + >>> a = np.arange(6).reshape(2, 3) + >>> a + array([[0, 1, 2], + [3, 4, 5]]) + >>> integrate.trapezoid(a, axis=0) + array([1.5, 2.5, 3.5]) + >>> integrate.trapezoid(a, axis=1) + array([2., 8.]) + """ + y = np.asanyarray(y) + if x is None: + d = dx + else: + x = np.asanyarray(x) + if x.ndim == 1: + d = np.diff(x) + # reshape to correct shape + shape = [1]*y.ndim + shape[axis] = d.shape[0] + d = d.reshape(shape) + else: + d = np.diff(x, axis=axis) + nd = y.ndim + slice1 = [slice(None)]*nd + slice2 = [slice(None)]*nd + slice1[axis] = slice(1, None) + slice2[axis] = slice(None, -1) + try: + ret = (d * (y[tuple(slice1)] + y[tuple(slice2)]) / 2.0).sum(axis) + except ValueError: + # Operations didn't work, cast to ndarray + d = np.asarray(d) + y = np.asarray(y) + ret = np.add.reduce(d * (y[tuple(slice1)]+y[tuple(slice2)])/2.0, axis) + return ret + + +class AccuracyWarning(Warning): + pass + + +if TYPE_CHECKING: + # workaround for mypy function attributes see: + # https://github.com/python/mypy/issues/2087#issuecomment-462726600 + from typing import Protocol + + class CacheAttributes(Protocol): + cache: dict[int, tuple[Any, Any]] +else: + CacheAttributes = Callable + + +def cache_decorator(func: Callable) -> CacheAttributes: + return cast(CacheAttributes, func) + + +@cache_decorator +def _cached_roots_legendre(n): + """ + Cache roots_legendre results to speed up calls of the fixed_quad + function. + """ + if n in _cached_roots_legendre.cache: + return _cached_roots_legendre.cache[n] + + _cached_roots_legendre.cache[n] = roots_legendre(n) + return _cached_roots_legendre.cache[n] + + +_cached_roots_legendre.cache = dict() + + +def fixed_quad(func, a, b, args=(), n=5): + """ + Compute a definite integral using fixed-order Gaussian quadrature. + + Integrate `func` from `a` to `b` using Gaussian quadrature of + order `n`. + + Parameters + ---------- + func : callable + A Python function or method to integrate (must accept vector inputs). + If integrating a vector-valued function, the returned array must have + shape ``(..., len(x))``. + a : float + Lower limit of integration. + b : float + Upper limit of integration. + args : tuple, optional + Extra arguments to pass to function, if any. + n : int, optional + Order of quadrature integration. Default is 5. + + Returns + ------- + val : float + Gaussian quadrature approximation to the integral + none : None + Statically returned value of None + + See Also + -------- + quad : adaptive quadrature using QUADPACK + dblquad : double integrals + tplquad : triple integrals + romb : integrators for sampled data + simpson : integrators for sampled data + cumulative_trapezoid : cumulative integration for sampled data + + Examples + -------- + >>> from scipy import integrate + >>> import numpy as np + >>> f = lambda x: x**8 + >>> integrate.fixed_quad(f, 0.0, 1.0, n=4) + (0.1110884353741496, None) + >>> integrate.fixed_quad(f, 0.0, 1.0, n=5) + (0.11111111111111102, None) + >>> print(1/9.0) # analytical result + 0.1111111111111111 + + >>> integrate.fixed_quad(np.cos, 0.0, np.pi/2, n=4) + (0.9999999771971152, None) + >>> integrate.fixed_quad(np.cos, 0.0, np.pi/2, n=5) + (1.000000000039565, None) + >>> np.sin(np.pi/2)-np.sin(0) # analytical result + 1.0 + + """ + x, w = _cached_roots_legendre(n) + x = np.real(x) + if np.isinf(a) or np.isinf(b): + raise ValueError("Gaussian quadrature is only available for " + "finite limits.") + y = (b-a)*(x+1)/2.0 + a + return (b-a)/2.0 * np.sum(w*func(y, *args), axis=-1), None + + +def vectorize1(func, args=(), vec_func=False): + """Vectorize the call to a function. + + This is an internal utility function used by `romberg` and + `quadrature` to create a vectorized version of a function. + + If `vec_func` is True, the function `func` is assumed to take vector + arguments. + + Parameters + ---------- + func : callable + User defined function. + args : tuple, optional + Extra arguments for the function. + vec_func : bool, optional + True if the function func takes vector arguments. + + Returns + ------- + vfunc : callable + A function that will take a vector argument and return the + result. + + """ + if vec_func: + def vfunc(x): + return func(x, *args) + else: + def vfunc(x): + if np.isscalar(x): + return func(x, *args) + x = np.asarray(x) + # call with first point to get output type + y0 = func(x[0], *args) + n = len(x) + dtype = getattr(y0, 'dtype', type(y0)) + output = np.empty((n,), dtype=dtype) + output[0] = y0 + for i in range(1, n): + output[i] = func(x[i], *args) + return output + return vfunc + + +@_deprecated("`scipy.integrate.quadrature` is deprecated as of SciPy 1.12.0" + "and will be removed in SciPy 1.15.0. Please use" + "`scipy.integrate.quad` instead.") +def quadrature(func, a, b, args=(), tol=1.49e-8, rtol=1.49e-8, maxiter=50, + vec_func=True, miniter=1): + """ + Compute a definite integral using fixed-tolerance Gaussian quadrature. + + .. deprecated:: 1.12.0 + + This function is deprecated as of SciPy 1.12.0 and will be removed + in SciPy 1.15.0. Please use `scipy.integrate.quad` instead. + + Integrate `func` from `a` to `b` using Gaussian quadrature + with absolute tolerance `tol`. + + Parameters + ---------- + func : function + A Python function or method to integrate. + a : float + Lower limit of integration. + b : float + Upper limit of integration. + args : tuple, optional + Extra arguments to pass to function. + tol, rtol : float, optional + Iteration stops when error between last two iterates is less than + `tol` OR the relative change is less than `rtol`. + maxiter : int, optional + Maximum order of Gaussian quadrature. + vec_func : bool, optional + True or False if func handles arrays as arguments (is + a "vector" function). Default is True. + miniter : int, optional + Minimum order of Gaussian quadrature. + + Returns + ------- + val : float + Gaussian quadrature approximation (within tolerance) to integral. + err : float + Difference between last two estimates of the integral. + + See Also + -------- + fixed_quad : fixed-order Gaussian quadrature + quad : adaptive quadrature using QUADPACK + dblquad : double integrals + tplquad : triple integrals + romb : integrator for sampled data + simpson : integrator for sampled data + cumulative_trapezoid : cumulative integration for sampled data + + Examples + -------- + >>> from scipy import integrate + >>> import numpy as np + >>> f = lambda x: x**8 + >>> integrate.quadrature(f, 0.0, 1.0) + (0.11111111111111106, 4.163336342344337e-17) + >>> print(1/9.0) # analytical result + 0.1111111111111111 + + >>> integrate.quadrature(np.cos, 0.0, np.pi/2) + (0.9999999999999536, 3.9611425250996035e-11) + >>> np.sin(np.pi/2)-np.sin(0) # analytical result + 1.0 + + """ + if not isinstance(args, tuple): + args = (args,) + vfunc = vectorize1(func, args, vec_func=vec_func) + val = np.inf + err = np.inf + maxiter = max(miniter+1, maxiter) + for n in range(miniter, maxiter+1): + newval = fixed_quad(vfunc, a, b, (), n)[0] + err = abs(newval-val) + val = newval + + if err < tol or err < rtol*abs(val): + break + else: + warnings.warn( + "maxiter (%d) exceeded. Latest difference = %e" % (maxiter, err), + AccuracyWarning, stacklevel=2 + ) + return val, err + + +def tupleset(t, i, value): + l = list(t) + l[i] = value + return tuple(l) + + +def cumulative_trapezoid(y, x=None, dx=1.0, axis=-1, initial=None): + """ + Cumulatively integrate y(x) using the composite trapezoidal rule. + + Parameters + ---------- + y : array_like + Values to integrate. + x : array_like, optional + The coordinate to integrate along. If None (default), use spacing `dx` + between consecutive elements in `y`. + dx : float, optional + Spacing between elements of `y`. Only used if `x` is None. + axis : int, optional + Specifies the axis to cumulate. Default is -1 (last axis). + initial : scalar, optional + If given, insert this value at the beginning of the returned result. + 0 or None are the only values accepted. Default is None, which means + `res` has one element less than `y` along the axis of integration. + + .. deprecated:: 1.12.0 + The option for non-zero inputs for `initial` will be deprecated in + SciPy 1.15.0. After this time, a ValueError will be raised if + `initial` is not None or 0. + + Returns + ------- + res : ndarray + The result of cumulative integration of `y` along `axis`. + If `initial` is None, the shape is such that the axis of integration + has one less value than `y`. If `initial` is given, the shape is equal + to that of `y`. + + See Also + -------- + numpy.cumsum, numpy.cumprod + cumulative_simpson : cumulative integration using Simpson's 1/3 rule + quad : adaptive quadrature using QUADPACK + fixed_quad : fixed-order Gaussian quadrature + dblquad : double integrals + tplquad : triple integrals + romb : integrators for sampled data + + Examples + -------- + >>> from scipy import integrate + >>> import numpy as np + >>> import matplotlib.pyplot as plt + + >>> x = np.linspace(-2, 2, num=20) + >>> y = x + >>> y_int = integrate.cumulative_trapezoid(y, x, initial=0) + >>> plt.plot(x, y_int, 'ro', x, y[0] + 0.5 * x**2, 'b-') + >>> plt.show() + + """ + y = np.asarray(y) + if y.shape[axis] == 0: + raise ValueError("At least one point is required along `axis`.") + if x is None: + d = dx + else: + x = np.asarray(x) + if x.ndim == 1: + d = np.diff(x) + # reshape to correct shape + shape = [1] * y.ndim + shape[axis] = -1 + d = d.reshape(shape) + elif len(x.shape) != len(y.shape): + raise ValueError("If given, shape of x must be 1-D or the " + "same as y.") + else: + d = np.diff(x, axis=axis) + + if d.shape[axis] != y.shape[axis] - 1: + raise ValueError("If given, length of x along axis must be the " + "same as y.") + + nd = len(y.shape) + slice1 = tupleset((slice(None),)*nd, axis, slice(1, None)) + slice2 = tupleset((slice(None),)*nd, axis, slice(None, -1)) + res = np.cumsum(d * (y[slice1] + y[slice2]) / 2.0, axis=axis) + + if initial is not None: + if initial != 0: + warnings.warn( + "The option for values for `initial` other than None or 0 is " + "deprecated as of SciPy 1.12.0 and will raise a value error in" + " SciPy 1.15.0.", + DeprecationWarning, stacklevel=2 + ) + if not np.isscalar(initial): + raise ValueError("`initial` parameter should be a scalar.") + + shape = list(res.shape) + shape[axis] = 1 + res = np.concatenate([np.full(shape, initial, dtype=res.dtype), res], + axis=axis) + + return res + + +def _basic_simpson(y, start, stop, x, dx, axis): + nd = len(y.shape) + if start is None: + start = 0 + step = 2 + slice_all = (slice(None),)*nd + slice0 = tupleset(slice_all, axis, slice(start, stop, step)) + slice1 = tupleset(slice_all, axis, slice(start+1, stop+1, step)) + slice2 = tupleset(slice_all, axis, slice(start+2, stop+2, step)) + + if x is None: # Even-spaced Simpson's rule. + result = np.sum(y[slice0] + 4.0*y[slice1] + y[slice2], axis=axis) + result *= dx / 3.0 + else: + # Account for possibly different spacings. + # Simpson's rule changes a bit. + h = np.diff(x, axis=axis) + sl0 = tupleset(slice_all, axis, slice(start, stop, step)) + sl1 = tupleset(slice_all, axis, slice(start+1, stop+1, step)) + h0 = h[sl0].astype(float, copy=False) + h1 = h[sl1].astype(float, copy=False) + hsum = h0 + h1 + hprod = h0 * h1 + h0divh1 = np.true_divide(h0, h1, out=np.zeros_like(h0), where=h1 != 0) + tmp = hsum/6.0 * (y[slice0] * + (2.0 - np.true_divide(1.0, h0divh1, + out=np.zeros_like(h0divh1), + where=h0divh1 != 0)) + + y[slice1] * (hsum * + np.true_divide(hsum, hprod, + out=np.zeros_like(hsum), + where=hprod != 0)) + + y[slice2] * (2.0 - h0divh1)) + result = np.sum(tmp, axis=axis) + return result + + +def simpson(y, *, x=None, dx=1.0, axis=-1): + """ + Integrate y(x) using samples along the given axis and the composite + Simpson's rule. If x is None, spacing of dx is assumed. + + If there are an even number of samples, N, then there are an odd + number of intervals (N-1), but Simpson's rule requires an even number + of intervals. The parameter 'even' controls how this is handled. + + Parameters + ---------- + y : array_like + Array to be integrated. + x : array_like, optional + If given, the points at which `y` is sampled. + dx : float, optional + Spacing of integration points along axis of `x`. Only used when + `x` is None. Default is 1. + axis : int, optional + Axis along which to integrate. Default is the last axis. + + Returns + ------- + float + The estimated integral computed with the composite Simpson's rule. + + See Also + -------- + quad : adaptive quadrature using QUADPACK + fixed_quad : fixed-order Gaussian quadrature + dblquad : double integrals + tplquad : triple integrals + romb : integrators for sampled data + cumulative_trapezoid : cumulative integration for sampled data + cumulative_simpson : cumulative integration using Simpson's 1/3 rule + + Notes + ----- + For an odd number of samples that are equally spaced the result is + exact if the function is a polynomial of order 3 or less. If + the samples are not equally spaced, then the result is exact only + if the function is a polynomial of order 2 or less. + + References + ---------- + .. [1] Cartwright, Kenneth V. Simpson's Rule Cumulative Integration with + MS Excel and Irregularly-spaced Data. Journal of Mathematical + Sciences and Mathematics Education. 12 (2): 1-9 + + Examples + -------- + >>> from scipy import integrate + >>> import numpy as np + >>> x = np.arange(0, 10) + >>> y = np.arange(0, 10) + + >>> integrate.simpson(y, x=x) + 40.5 + + >>> y = np.power(x, 3) + >>> integrate.simpson(y, x=x) + 1640.5 + >>> integrate.quad(lambda x: x**3, 0, 9)[0] + 1640.25 + + """ + y = np.asarray(y) + nd = len(y.shape) + N = y.shape[axis] + last_dx = dx + returnshape = 0 + if x is not None: + x = np.asarray(x) + if len(x.shape) == 1: + shapex = [1] * nd + shapex[axis] = x.shape[0] + saveshape = x.shape + returnshape = 1 + x = x.reshape(tuple(shapex)) + elif len(x.shape) != len(y.shape): + raise ValueError("If given, shape of x must be 1-D or the " + "same as y.") + if x.shape[axis] != N: + raise ValueError("If given, length of x along axis must be the " + "same as y.") + + if N % 2 == 0: + val = 0.0 + result = 0.0 + slice_all = (slice(None),) * nd + + if N == 2: + # need at least 3 points in integration axis to form parabolic + # segment. If there are two points then any of 'avg', 'first', + # 'last' should give the same result. + slice1 = tupleset(slice_all, axis, -1) + slice2 = tupleset(slice_all, axis, -2) + if x is not None: + last_dx = x[slice1] - x[slice2] + val += 0.5 * last_dx * (y[slice1] + y[slice2]) + else: + # use Simpson's rule on first intervals + result = _basic_simpson(y, 0, N-3, x, dx, axis) + + slice1 = tupleset(slice_all, axis, -1) + slice2 = tupleset(slice_all, axis, -2) + slice3 = tupleset(slice_all, axis, -3) + + h = np.asarray([dx, dx], dtype=np.float64) + if x is not None: + # grab the last two spacings from the appropriate axis + hm2 = tupleset(slice_all, axis, slice(-2, -1, 1)) + hm1 = tupleset(slice_all, axis, slice(-1, None, 1)) + + diffs = np.float64(np.diff(x, axis=axis)) + h = [np.squeeze(diffs[hm2], axis=axis), + np.squeeze(diffs[hm1], axis=axis)] + + # This is the correction for the last interval according to + # Cartwright. + # However, I used the equations given at + # https://en.wikipedia.org/wiki/Simpson%27s_rule#Composite_Simpson's_rule_for_irregularly_spaced_data + # A footnote on Wikipedia says: + # Cartwright 2017, Equation 8. The equation in Cartwright is + # calculating the first interval whereas the equations in the + # Wikipedia article are adjusting for the last integral. If the + # proper algebraic substitutions are made, the equation results in + # the values shown. + num = 2 * h[1] ** 2 + 3 * h[0] * h[1] + den = 6 * (h[1] + h[0]) + alpha = np.true_divide( + num, + den, + out=np.zeros_like(den), + where=den != 0 + ) + + num = h[1] ** 2 + 3.0 * h[0] * h[1] + den = 6 * h[0] + beta = np.true_divide( + num, + den, + out=np.zeros_like(den), + where=den != 0 + ) + + num = 1 * h[1] ** 3 + den = 6 * h[0] * (h[0] + h[1]) + eta = np.true_divide( + num, + den, + out=np.zeros_like(den), + where=den != 0 + ) + + result += alpha*y[slice1] + beta*y[slice2] - eta*y[slice3] + + result += val + else: + result = _basic_simpson(y, 0, N-2, x, dx, axis) + if returnshape: + x = x.reshape(saveshape) + return result + + +def _cumulatively_sum_simpson_integrals( + y: np.ndarray, + dx: np.ndarray, + integration_func: Callable[[np.ndarray, np.ndarray], np.ndarray], +) -> np.ndarray: + """Calculate cumulative sum of Simpson integrals. + Takes as input the integration function to be used. + The integration_func is assumed to return the cumulative sum using + composite Simpson's rule. Assumes the axis of summation is -1. + """ + sub_integrals_h1 = integration_func(y, dx) + sub_integrals_h2 = integration_func(y[..., ::-1], dx[..., ::-1])[..., ::-1] + + shape = list(sub_integrals_h1.shape) + shape[-1] += 1 + sub_integrals = np.empty(shape) + sub_integrals[..., :-1:2] = sub_integrals_h1[..., ::2] + sub_integrals[..., 1::2] = sub_integrals_h2[..., ::2] + # Integral over last subinterval can only be calculated from + # formula for h2 + sub_integrals[..., -1] = sub_integrals_h2[..., -1] + res = np.cumsum(sub_integrals, axis=-1) + return res + + +def _cumulative_simpson_equal_intervals(y: np.ndarray, dx: np.ndarray) -> np.ndarray: + """Calculate the Simpson integrals for all h1 intervals assuming equal interval + widths. The function can also be used to calculate the integral for all + h2 intervals by reversing the inputs, `y` and `dx`. + """ + d = dx[..., :-1] + f1 = y[..., :-2] + f2 = y[..., 1:-1] + f3 = y[..., 2:] + + # Calculate integral over the subintervals (eqn (10) of Reference [2]) + return d / 3 * (5 * f1 / 4 + 2 * f2 - f3 / 4) + + +def _cumulative_simpson_unequal_intervals(y: np.ndarray, dx: np.ndarray) -> np.ndarray: + """Calculate the Simpson integrals for all h1 intervals assuming unequal interval + widths. The function can also be used to calculate the integral for all + h2 intervals by reversing the inputs, `y` and `dx`. + """ + x21 = dx[..., :-1] + x32 = dx[..., 1:] + f1 = y[..., :-2] + f2 = y[..., 1:-1] + f3 = y[..., 2:] + + x31 = x21 + x32 + x21_x31 = x21/x31 + x21_x32 = x21/x32 + x21x21_x31x32 = x21_x31 * x21_x32 + + # Calculate integral over the subintervals (eqn (8) of Reference [2]) + coeff1 = 3 - x21_x31 + coeff2 = 3 + x21x21_x31x32 + x21_x31 + coeff3 = -x21x21_x31x32 + + return x21/6 * (coeff1*f1 + coeff2*f2 + coeff3*f3) + + +def _ensure_float_array(arr: npt.ArrayLike) -> np.ndarray: + arr = np.asarray(arr) + if np.issubdtype(arr.dtype, np.integer): + arr = arr.astype(float, copy=False) + return arr + + +def cumulative_simpson(y, *, x=None, dx=1.0, axis=-1, initial=None): + r""" + Cumulatively integrate y(x) using the composite Simpson's 1/3 rule. + The integral of the samples at every point is calculated by assuming a + quadratic relationship between each point and the two adjacent points. + + Parameters + ---------- + y : array_like + Values to integrate. Requires at least one point along `axis`. If two or fewer + points are provided along `axis`, Simpson's integration is not possible and the + result is calculated with `cumulative_trapezoid`. + x : array_like, optional + The coordinate to integrate along. Must have the same shape as `y` or + must be 1D with the same length as `y` along `axis`. `x` must also be + strictly increasing along `axis`. + If `x` is None (default), integration is performed using spacing `dx` + between consecutive elements in `y`. + dx : scalar or array_like, optional + Spacing between elements of `y`. Only used if `x` is None. Can either + be a float, or an array with the same shape as `y`, but of length one along + `axis`. Default is 1.0. + axis : int, optional + Specifies the axis to integrate along. Default is -1 (last axis). + initial : scalar or array_like, optional + If given, insert this value at the beginning of the returned result, + and add it to the rest of the result. Default is None, which means no + value at ``x[0]`` is returned and `res` has one element less than `y` + along the axis of integration. Can either be a float, or an array with + the same shape as `y`, but of length one along `axis`. + + Returns + ------- + res : ndarray + The result of cumulative integration of `y` along `axis`. + If `initial` is None, the shape is such that the axis of integration + has one less value than `y`. If `initial` is given, the shape is equal + to that of `y`. + + See Also + -------- + numpy.cumsum + cumulative_trapezoid : cumulative integration using the composite + trapezoidal rule + simpson : integrator for sampled data using the Composite Simpson's Rule + + Notes + ----- + + .. versionadded:: 1.12.0 + + The composite Simpson's 1/3 method can be used to approximate the definite + integral of a sampled input function :math:`y(x)` [1]_. The method assumes + a quadratic relationship over the interval containing any three consecutive + sampled points. + + Consider three consecutive points: + :math:`(x_1, y_1), (x_2, y_2), (x_3, y_3)`. + + Assuming a quadratic relationship over the three points, the integral over + the subinterval between :math:`x_1` and :math:`x_2` is given by formula + (8) of [2]_: + + .. math:: + \int_{x_1}^{x_2} y(x) dx\ &= \frac{x_2-x_1}{6}\left[\ + \left\{3-\frac{x_2-x_1}{x_3-x_1}\right\} y_1 + \ + \left\{3 + \frac{(x_2-x_1)^2}{(x_3-x_2)(x_3-x_1)} + \ + \frac{x_2-x_1}{x_3-x_1}\right\} y_2\\ + - \frac{(x_2-x_1)^2}{(x_3-x_2)(x_3-x_1)} y_3\right] + + The integral between :math:`x_2` and :math:`x_3` is given by swapping + appearances of :math:`x_1` and :math:`x_3`. The integral is estimated + separately for each subinterval and then cumulatively summed to obtain + the final result. + + For samples that are equally spaced, the result is exact if the function + is a polynomial of order three or less [1]_ and the number of subintervals + is even. Otherwise, the integral is exact for polynomials of order two or + less. + + References + ---------- + .. [1] Wikipedia page: https://en.wikipedia.org/wiki/Simpson's_rule + .. [2] Cartwright, Kenneth V. Simpson's Rule Cumulative Integration with + MS Excel and Irregularly-spaced Data. Journal of Mathematical + Sciences and Mathematics Education. 12 (2): 1-9 + + Examples + -------- + >>> from scipy import integrate + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(-2, 2, num=20) + >>> y = x**2 + >>> y_int = integrate.cumulative_simpson(y, x=x, initial=0) + >>> fig, ax = plt.subplots() + >>> ax.plot(x, y_int, 'ro', x, x**3/3 - (x[0])**3/3, 'b-') + >>> ax.grid() + >>> plt.show() + + The output of `cumulative_simpson` is similar to that of iteratively + calling `simpson` with successively higher upper limits of integration, but + not identical. + + >>> def cumulative_simpson_reference(y, x): + ... return np.asarray([integrate.simpson(y[:i], x=x[:i]) + ... for i in range(2, len(y) + 1)]) + >>> + >>> rng = np.random.default_rng(354673834679465) + >>> x, y = rng.random(size=(2, 10)) + >>> x.sort() + >>> + >>> res = integrate.cumulative_simpson(y, x=x) + >>> ref = cumulative_simpson_reference(y, x) + >>> equal = np.abs(res - ref) < 1e-15 + >>> equal # not equal when `simpson` has even number of subintervals + array([False, True, False, True, False, True, False, True, True]) + + This is expected: because `cumulative_simpson` has access to more + information than `simpson`, it can typically produce more accurate + estimates of the underlying integral over subintervals. + + """ + y = _ensure_float_array(y) + + # validate `axis` and standardize to work along the last axis + original_y = y + original_shape = y.shape + try: + y = np.swapaxes(y, axis, -1) + except IndexError as e: + message = f"`axis={axis}` is not valid for `y` with `y.ndim={y.ndim}`." + raise ValueError(message) from e + if y.shape[-1] < 3: + res = cumulative_trapezoid(original_y, x, dx=dx, axis=axis, initial=None) + res = np.swapaxes(res, axis, -1) + + elif x is not None: + x = _ensure_float_array(x) + message = ("If given, shape of `x` must be the same as `y` or 1-D with " + "the same length as `y` along `axis`.") + if not (x.shape == original_shape + or (x.ndim == 1 and len(x) == original_shape[axis])): + raise ValueError(message) + + x = np.broadcast_to(x, y.shape) if x.ndim == 1 else np.swapaxes(x, axis, -1) + dx = np.diff(x, axis=-1) + if np.any(dx <= 0): + raise ValueError("Input x must be strictly increasing.") + res = _cumulatively_sum_simpson_integrals( + y, dx, _cumulative_simpson_unequal_intervals + ) + + else: + dx = _ensure_float_array(dx) + final_dx_shape = tupleset(original_shape, axis, original_shape[axis] - 1) + alt_input_dx_shape = tupleset(original_shape, axis, 1) + message = ("If provided, `dx` must either be a scalar or have the same " + "shape as `y` but with only 1 point along `axis`.") + if not (dx.ndim == 0 or dx.shape == alt_input_dx_shape): + raise ValueError(message) + dx = np.broadcast_to(dx, final_dx_shape) + dx = np.swapaxes(dx, axis, -1) + res = _cumulatively_sum_simpson_integrals( + y, dx, _cumulative_simpson_equal_intervals + ) + + if initial is not None: + initial = _ensure_float_array(initial) + alt_initial_input_shape = tupleset(original_shape, axis, 1) + message = ("If provided, `initial` must either be a scalar or have the " + "same shape as `y` but with only 1 point along `axis`.") + if not (initial.ndim == 0 or initial.shape == alt_initial_input_shape): + raise ValueError(message) + initial = np.broadcast_to(initial, alt_initial_input_shape) + initial = np.swapaxes(initial, axis, -1) + + res += initial + res = np.concatenate((initial, res), axis=-1) + + res = np.swapaxes(res, -1, axis) + return res + + +def romb(y, dx=1.0, axis=-1, show=False): + """ + Romberg integration using samples of a function. + + Parameters + ---------- + y : array_like + A vector of ``2**k + 1`` equally-spaced samples of a function. + dx : float, optional + The sample spacing. Default is 1. + axis : int, optional + The axis along which to integrate. Default is -1 (last axis). + show : bool, optional + When `y` is a single 1-D array, then if this argument is True + print the table showing Richardson extrapolation from the + samples. Default is False. + + Returns + ------- + romb : ndarray + The integrated result for `axis`. + + See Also + -------- + quad : adaptive quadrature using QUADPACK + fixed_quad : fixed-order Gaussian quadrature + dblquad : double integrals + tplquad : triple integrals + simpson : integrators for sampled data + cumulative_trapezoid : cumulative integration for sampled data + + Examples + -------- + >>> from scipy import integrate + >>> import numpy as np + >>> x = np.arange(10, 14.25, 0.25) + >>> y = np.arange(3, 12) + + >>> integrate.romb(y) + 56.0 + + >>> y = np.sin(np.power(x, 2.5)) + >>> integrate.romb(y) + -0.742561336672229 + + >>> integrate.romb(y, show=True) + Richardson Extrapolation Table for Romberg Integration + ====================================================== + -0.81576 + 4.63862 6.45674 + -1.10581 -3.02062 -3.65245 + -2.57379 -3.06311 -3.06595 -3.05664 + -1.34093 -0.92997 -0.78776 -0.75160 -0.74256 + ====================================================== + -0.742561336672229 # may vary + + """ + y = np.asarray(y) + nd = len(y.shape) + Nsamps = y.shape[axis] + Ninterv = Nsamps-1 + n = 1 + k = 0 + while n < Ninterv: + n <<= 1 + k += 1 + if n != Ninterv: + raise ValueError("Number of samples must be one plus a " + "non-negative power of 2.") + + R = {} + slice_all = (slice(None),) * nd + slice0 = tupleset(slice_all, axis, 0) + slicem1 = tupleset(slice_all, axis, -1) + h = Ninterv * np.asarray(dx, dtype=float) + R[(0, 0)] = (y[slice0] + y[slicem1])/2.0*h + slice_R = slice_all + start = stop = step = Ninterv + for i in range(1, k+1): + start >>= 1 + slice_R = tupleset(slice_R, axis, slice(start, stop, step)) + step >>= 1 + R[(i, 0)] = 0.5*(R[(i-1, 0)] + h*y[slice_R].sum(axis=axis)) + for j in range(1, i+1): + prev = R[(i, j-1)] + R[(i, j)] = prev + (prev-R[(i-1, j-1)]) / ((1 << (2*j))-1) + h /= 2.0 + + if show: + if not np.isscalar(R[(0, 0)]): + print("*** Printing table only supported for integrals" + + " of a single data set.") + else: + try: + precis = show[0] + except (TypeError, IndexError): + precis = 5 + try: + width = show[1] + except (TypeError, IndexError): + width = 8 + formstr = "%%%d.%df" % (width, precis) + + title = "Richardson Extrapolation Table for Romberg Integration" + print(title, "=" * len(title), sep="\n", end="\n") + for i in range(k+1): + for j in range(i+1): + print(formstr % R[(i, j)], end=" ") + print() + print("=" * len(title)) + + return R[(k, k)] + +# Romberg quadratures for numeric integration. +# +# Written by Scott M. Ransom +# last revision: 14 Nov 98 +# +# Cosmetic changes by Konrad Hinsen +# last revision: 1999-7-21 +# +# Adapted to SciPy by Travis Oliphant +# last revision: Dec 2001 + + +def _difftrap(function, interval, numtraps): + """ + Perform part of the trapezoidal rule to integrate a function. + Assume that we had called difftrap with all lower powers-of-2 + starting with 1. Calling difftrap only returns the summation + of the new ordinates. It does _not_ multiply by the width + of the trapezoids. This must be performed by the caller. + 'function' is the function to evaluate (must accept vector arguments). + 'interval' is a sequence with lower and upper limits + of integration. + 'numtraps' is the number of trapezoids to use (must be a + power-of-2). + """ + if numtraps <= 0: + raise ValueError("numtraps must be > 0 in difftrap().") + elif numtraps == 1: + return 0.5*(function(interval[0])+function(interval[1])) + else: + numtosum = numtraps/2 + h = float(interval[1]-interval[0])/numtosum + lox = interval[0] + 0.5 * h + points = lox + h * np.arange(numtosum) + s = np.sum(function(points), axis=0) + return s + + +def _romberg_diff(b, c, k): + """ + Compute the differences for the Romberg quadrature corrections. + See Forman Acton's "Real Computing Made Real," p 143. + """ + tmp = 4.0**k + return (tmp * c - b)/(tmp - 1.0) + + +def _printresmat(function, interval, resmat): + # Print the Romberg result matrix. + i = j = 0 + print('Romberg integration of', repr(function), end=' ') + print('from', interval) + print('') + print('%6s %9s %9s' % ('Steps', 'StepSize', 'Results')) + for i in range(len(resmat)): + print('%6d %9f' % (2**i, (interval[1]-interval[0])/(2.**i)), end=' ') + for j in range(i+1): + print('%9f' % (resmat[i][j]), end=' ') + print('') + print('') + print('The final result is', resmat[i][j], end=' ') + print('after', 2**(len(resmat)-1)+1, 'function evaluations.') + + +@_deprecated("`scipy.integrate.romberg` is deprecated as of SciPy 1.12.0" + "and will be removed in SciPy 1.15.0. Please use" + "`scipy.integrate.quad` instead.") +def romberg(function, a, b, args=(), tol=1.48e-8, rtol=1.48e-8, show=False, + divmax=10, vec_func=False): + """ + Romberg integration of a callable function or method. + + .. deprecated:: 1.12.0 + + This function is deprecated as of SciPy 1.12.0 and will be removed + in SciPy 1.15.0. Please use `scipy.integrate.quad` instead. + + Returns the integral of `function` (a function of one variable) + over the interval (`a`, `b`). + + If `show` is 1, the triangular array of the intermediate results + will be printed. If `vec_func` is True (default is False), then + `function` is assumed to support vector arguments. + + Parameters + ---------- + function : callable + Function to be integrated. + a : float + Lower limit of integration. + b : float + Upper limit of integration. + + Returns + ------- + results : float + Result of the integration. + + Other Parameters + ---------------- + args : tuple, optional + Extra arguments to pass to function. Each element of `args` will + be passed as a single argument to `func`. Default is to pass no + extra arguments. + tol, rtol : float, optional + The desired absolute and relative tolerances. Defaults are 1.48e-8. + show : bool, optional + Whether to print the results. Default is False. + divmax : int, optional + Maximum order of extrapolation. Default is 10. + vec_func : bool, optional + Whether `func` handles arrays as arguments (i.e., whether it is a + "vector" function). Default is False. + + See Also + -------- + fixed_quad : Fixed-order Gaussian quadrature. + quad : Adaptive quadrature using QUADPACK. + dblquad : Double integrals. + tplquad : Triple integrals. + romb : Integrators for sampled data. + simpson : Integrators for sampled data. + cumulative_trapezoid : Cumulative integration for sampled data. + + References + ---------- + .. [1] 'Romberg's method' https://en.wikipedia.org/wiki/Romberg%27s_method + + Examples + -------- + Integrate a gaussian from 0 to 1 and compare to the error function. + + >>> from scipy import integrate + >>> from scipy.special import erf + >>> import numpy as np + >>> gaussian = lambda x: 1/np.sqrt(np.pi) * np.exp(-x**2) + >>> result = integrate.romberg(gaussian, 0, 1, show=True) + Romberg integration of from [0, 1] + + :: + + Steps StepSize Results + 1 1.000000 0.385872 + 2 0.500000 0.412631 0.421551 + 4 0.250000 0.419184 0.421368 0.421356 + 8 0.125000 0.420810 0.421352 0.421350 0.421350 + 16 0.062500 0.421215 0.421350 0.421350 0.421350 0.421350 + 32 0.031250 0.421317 0.421350 0.421350 0.421350 0.421350 0.421350 + + The final result is 0.421350396475 after 33 function evaluations. + + >>> print("%g %g" % (2*result, erf(1))) + 0.842701 0.842701 + + """ + if np.isinf(a) or np.isinf(b): + raise ValueError("Romberg integration only available " + "for finite limits.") + vfunc = vectorize1(function, args, vec_func=vec_func) + n = 1 + interval = [a, b] + intrange = b - a + ordsum = _difftrap(vfunc, interval, n) + result = intrange * ordsum + resmat = [[result]] + err = np.inf + last_row = resmat[0] + for i in range(1, divmax+1): + n *= 2 + ordsum += _difftrap(vfunc, interval, n) + row = [intrange * ordsum / n] + for k in range(i): + row.append(_romberg_diff(last_row[k], row[k], k+1)) + result = row[i] + lastresult = last_row[i-1] + if show: + resmat.append(row) + err = abs(result - lastresult) + if err < tol or err < rtol * abs(result): + break + last_row = row + else: + warnings.warn( + "divmax (%d) exceeded. Latest difference = %e" % (divmax, err), + AccuracyWarning, stacklevel=2) + + if show: + _printresmat(vfunc, interval, resmat) + return result + + +# Coefficients for Newton-Cotes quadrature +# +# These are the points being used +# to construct the local interpolating polynomial +# a are the weights for Newton-Cotes integration +# B is the error coefficient. +# error in these coefficients grows as N gets larger. +# or as samples are closer and closer together + +# You can use maxima to find these rational coefficients +# for equally spaced data using the commands +# a(i,N) := (integrate(product(r-j,j,0,i-1) * product(r-j,j,i+1,N),r,0,N) +# / ((N-i)! * i!) * (-1)^(N-i)); +# Be(N) := N^(N+2)/(N+2)! * (N/(N+3) - sum((i/N)^(N+2)*a(i,N),i,0,N)); +# Bo(N) := N^(N+1)/(N+1)! * (N/(N+2) - sum((i/N)^(N+1)*a(i,N),i,0,N)); +# B(N) := (if (mod(N,2)=0) then Be(N) else Bo(N)); +# +# pre-computed for equally-spaced weights +# +# num_a, den_a, int_a, num_B, den_B = _builtincoeffs[N] +# +# a = num_a*array(int_a)/den_a +# B = num_B*1.0 / den_B +# +# integrate(f(x),x,x_0,x_N) = dx*sum(a*f(x_i)) + B*(dx)^(2k+3) f^(2k+2)(x*) +# where k = N // 2 +# +_builtincoeffs = { + 1: (1,2,[1,1],-1,12), + 2: (1,3,[1,4,1],-1,90), + 3: (3,8,[1,3,3,1],-3,80), + 4: (2,45,[7,32,12,32,7],-8,945), + 5: (5,288,[19,75,50,50,75,19],-275,12096), + 6: (1,140,[41,216,27,272,27,216,41],-9,1400), + 7: (7,17280,[751,3577,1323,2989,2989,1323,3577,751],-8183,518400), + 8: (4,14175,[989,5888,-928,10496,-4540,10496,-928,5888,989], + -2368,467775), + 9: (9,89600,[2857,15741,1080,19344,5778,5778,19344,1080, + 15741,2857], -4671, 394240), + 10: (5,299376,[16067,106300,-48525,272400,-260550,427368, + -260550,272400,-48525,106300,16067], + -673175, 163459296), + 11: (11,87091200,[2171465,13486539,-3237113, 25226685,-9595542, + 15493566,15493566,-9595542,25226685,-3237113, + 13486539,2171465], -2224234463, 237758976000), + 12: (1, 5255250, [1364651,9903168,-7587864,35725120,-51491295, + 87516288,-87797136,87516288,-51491295,35725120, + -7587864,9903168,1364651], -3012, 875875), + 13: (13, 402361344000,[8181904909, 56280729661, -31268252574, + 156074417954,-151659573325,206683437987, + -43111992612,-43111992612,206683437987, + -151659573325,156074417954,-31268252574, + 56280729661,8181904909], -2639651053, + 344881152000), + 14: (7, 2501928000, [90241897,710986864,-770720657,3501442784, + -6625093363,12630121616,-16802270373,19534438464, + -16802270373,12630121616,-6625093363,3501442784, + -770720657,710986864,90241897], -3740727473, + 1275983280000) + } + + +def newton_cotes(rn, equal=0): + r""" + Return weights and error coefficient for Newton-Cotes integration. + + Suppose we have (N+1) samples of f at the positions + x_0, x_1, ..., x_N. Then an N-point Newton-Cotes formula for the + integral between x_0 and x_N is: + + :math:`\int_{x_0}^{x_N} f(x)dx = \Delta x \sum_{i=0}^{N} a_i f(x_i) + + B_N (\Delta x)^{N+2} f^{N+1} (\xi)` + + where :math:`\xi \in [x_0,x_N]` + and :math:`\Delta x = \frac{x_N-x_0}{N}` is the average samples spacing. + + If the samples are equally-spaced and N is even, then the error + term is :math:`B_N (\Delta x)^{N+3} f^{N+2}(\xi)`. + + Parameters + ---------- + rn : int + The integer order for equally-spaced data or the relative positions of + the samples with the first sample at 0 and the last at N, where N+1 is + the length of `rn`. N is the order of the Newton-Cotes integration. + equal : int, optional + Set to 1 to enforce equally spaced data. + + Returns + ------- + an : ndarray + 1-D array of weights to apply to the function at the provided sample + positions. + B : float + Error coefficient. + + Notes + ----- + Normally, the Newton-Cotes rules are used on smaller integration + regions and a composite rule is used to return the total integral. + + Examples + -------- + Compute the integral of sin(x) in [0, :math:`\pi`]: + + >>> from scipy.integrate import newton_cotes + >>> import numpy as np + >>> def f(x): + ... return np.sin(x) + >>> a = 0 + >>> b = np.pi + >>> exact = 2 + >>> for N in [2, 4, 6, 8, 10]: + ... x = np.linspace(a, b, N + 1) + ... an, B = newton_cotes(N, 1) + ... dx = (b - a) / N + ... quad = dx * np.sum(an * f(x)) + ... error = abs(quad - exact) + ... print('{:2d} {:10.9f} {:.5e}'.format(N, quad, error)) + ... + 2 2.094395102 9.43951e-02 + 4 1.998570732 1.42927e-03 + 6 2.000017814 1.78136e-05 + 8 1.999999835 1.64725e-07 + 10 2.000000001 1.14677e-09 + + """ + try: + N = len(rn)-1 + if equal: + rn = np.arange(N+1) + elif np.all(np.diff(rn) == 1): + equal = 1 + except Exception: + N = rn + rn = np.arange(N+1) + equal = 1 + + if equal and N in _builtincoeffs: + na, da, vi, nb, db = _builtincoeffs[N] + an = na * np.array(vi, dtype=float) / da + return an, float(nb)/db + + if (rn[0] != 0) or (rn[-1] != N): + raise ValueError("The sample positions must start at 0" + " and end at N") + yi = rn / float(N) + ti = 2 * yi - 1 + nvec = np.arange(N+1) + C = ti ** nvec[:, np.newaxis] + Cinv = np.linalg.inv(C) + # improve precision of result + for i in range(2): + Cinv = 2*Cinv - Cinv.dot(C).dot(Cinv) + vec = 2.0 / (nvec[::2]+1) + ai = Cinv[:, ::2].dot(vec) * (N / 2.) + + if (N % 2 == 0) and equal: + BN = N/(N+3.) + power = N+2 + else: + BN = N/(N+2.) + power = N+1 + + BN = BN - np.dot(yi**power, ai) + p1 = power+1 + fac = power*math.log(N) - gammaln(p1) + fac = math.exp(fac) + return ai, BN*fac + + +def _qmc_quad_iv(func, a, b, n_points, n_estimates, qrng, log): + + # lazy import to avoid issues with partially-initialized submodule + if not hasattr(qmc_quad, 'qmc'): + from scipy import stats + qmc_quad.stats = stats + else: + stats = qmc_quad.stats + + if not callable(func): + message = "`func` must be callable." + raise TypeError(message) + + # a, b will be modified, so copy. Oh well if it's copied twice. + a = np.atleast_1d(a).copy() + b = np.atleast_1d(b).copy() + a, b = np.broadcast_arrays(a, b) + dim = a.shape[0] + + try: + func((a + b) / 2) + except Exception as e: + message = ("`func` must evaluate the integrand at points within " + "the integration range; e.g. `func( (a + b) / 2)` " + "must return the integrand at the centroid of the " + "integration volume.") + raise ValueError(message) from e + + try: + func(np.array([a, b]).T) + vfunc = func + except Exception as e: + message = ("Exception encountered when attempting vectorized call to " + f"`func`: {e}. For better performance, `func` should " + "accept two-dimensional array `x` with shape `(len(a), " + "n_points)` and return an array of the integrand value at " + "each of the `n_points.") + warnings.warn(message, stacklevel=3) + + def vfunc(x): + return np.apply_along_axis(func, axis=-1, arr=x) + + n_points_int = np.int64(n_points) + if n_points != n_points_int: + message = "`n_points` must be an integer." + raise TypeError(message) + + n_estimates_int = np.int64(n_estimates) + if n_estimates != n_estimates_int: + message = "`n_estimates` must be an integer." + raise TypeError(message) + + if qrng is None: + qrng = stats.qmc.Halton(dim) + elif not isinstance(qrng, stats.qmc.QMCEngine): + message = "`qrng` must be an instance of scipy.stats.qmc.QMCEngine." + raise TypeError(message) + + if qrng.d != a.shape[0]: + message = ("`qrng` must be initialized with dimensionality equal to " + "the number of variables in `a`, i.e., " + "`qrng.random().shape[-1]` must equal `a.shape[0]`.") + raise ValueError(message) + + rng_seed = getattr(qrng, 'rng_seed', None) + rng = stats._qmc.check_random_state(rng_seed) + + if log not in {True, False}: + message = "`log` must be boolean (`True` or `False`)." + raise TypeError(message) + + return (vfunc, a, b, n_points_int, n_estimates_int, qrng, rng, log, stats) + + +QMCQuadResult = namedtuple('QMCQuadResult', ['integral', 'standard_error']) + + +def qmc_quad(func, a, b, *, n_estimates=8, n_points=1024, qrng=None, + log=False): + """ + Compute an integral in N-dimensions using Quasi-Monte Carlo quadrature. + + Parameters + ---------- + func : callable + The integrand. Must accept a single argument ``x``, an array which + specifies the point(s) at which to evaluate the scalar-valued + integrand, and return the value(s) of the integrand. + For efficiency, the function should be vectorized to accept an array of + shape ``(d, n_points)``, where ``d`` is the number of variables (i.e. + the dimensionality of the function domain) and `n_points` is the number + of quadrature points, and return an array of shape ``(n_points,)``, + the integrand at each quadrature point. + a, b : array-like + One-dimensional arrays specifying the lower and upper integration + limits, respectively, of each of the ``d`` variables. + n_estimates, n_points : int, optional + `n_estimates` (default: 8) statistically independent QMC samples, each + of `n_points` (default: 1024) points, will be generated by `qrng`. + The total number of points at which the integrand `func` will be + evaluated is ``n_points * n_estimates``. See Notes for details. + qrng : `~scipy.stats.qmc.QMCEngine`, optional + An instance of the QMCEngine from which to sample QMC points. + The QMCEngine must be initialized to a number of dimensions ``d`` + corresponding with the number of variables ``x1, ..., xd`` passed to + `func`. + The provided QMCEngine is used to produce the first integral estimate. + If `n_estimates` is greater than one, additional QMCEngines are + spawned from the first (with scrambling enabled, if it is an option.) + If a QMCEngine is not provided, the default `scipy.stats.qmc.Halton` + will be initialized with the number of dimensions determine from + the length of `a`. + log : boolean, default: False + When set to True, `func` returns the log of the integrand, and + the result object contains the log of the integral. + + Returns + ------- + result : object + A result object with attributes: + + integral : float + The estimate of the integral. + standard_error : + The error estimate. See Notes for interpretation. + + Notes + ----- + Values of the integrand at each of the `n_points` points of a QMC sample + are used to produce an estimate of the integral. This estimate is drawn + from a population of possible estimates of the integral, the value of + which we obtain depends on the particular points at which the integral + was evaluated. We perform this process `n_estimates` times, each time + evaluating the integrand at different scrambled QMC points, effectively + drawing i.i.d. random samples from the population of integral estimates. + The sample mean :math:`m` of these integral estimates is an + unbiased estimator of the true value of the integral, and the standard + error of the mean :math:`s` of these estimates may be used to generate + confidence intervals using the t distribution with ``n_estimates - 1`` + degrees of freedom. Perhaps counter-intuitively, increasing `n_points` + while keeping the total number of function evaluation points + ``n_points * n_estimates`` fixed tends to reduce the actual error, whereas + increasing `n_estimates` tends to decrease the error estimate. + + Examples + -------- + QMC quadrature is particularly useful for computing integrals in higher + dimensions. An example integrand is the probability density function + of a multivariate normal distribution. + + >>> import numpy as np + >>> from scipy import stats + >>> dim = 8 + >>> mean = np.zeros(dim) + >>> cov = np.eye(dim) + >>> def func(x): + ... # `multivariate_normal` expects the _last_ axis to correspond with + ... # the dimensionality of the space, so `x` must be transposed + ... return stats.multivariate_normal.pdf(x.T, mean, cov) + + To compute the integral over the unit hypercube: + + >>> from scipy.integrate import qmc_quad + >>> a = np.zeros(dim) + >>> b = np.ones(dim) + >>> rng = np.random.default_rng() + >>> qrng = stats.qmc.Halton(d=dim, seed=rng) + >>> n_estimates = 8 + >>> res = qmc_quad(func, a, b, n_estimates=n_estimates, qrng=qrng) + >>> res.integral, res.standard_error + (0.00018429555666024108, 1.0389431116001344e-07) + + A two-sided, 99% confidence interval for the integral may be estimated + as: + + >>> t = stats.t(df=n_estimates-1, loc=res.integral, + ... scale=res.standard_error) + >>> t.interval(0.99) + (0.0001839319802536469, 0.00018465913306683527) + + Indeed, the value reported by `scipy.stats.multivariate_normal` is + within this range. + + >>> stats.multivariate_normal.cdf(b, mean, cov, lower_limit=a) + 0.00018430867675187443 + + """ + args = _qmc_quad_iv(func, a, b, n_points, n_estimates, qrng, log) + func, a, b, n_points, n_estimates, qrng, rng, log, stats = args + + def sum_product(integrands, dA, log=False): + if log: + return logsumexp(integrands) + np.log(dA) + else: + return np.sum(integrands * dA) + + def mean(estimates, log=False): + if log: + return logsumexp(estimates) - np.log(n_estimates) + else: + return np.mean(estimates) + + def std(estimates, m=None, ddof=0, log=False): + m = m or mean(estimates, log) + if log: + estimates, m = np.broadcast_arrays(estimates, m) + temp = np.vstack((estimates, m + np.pi * 1j)) + diff = logsumexp(temp, axis=0) + return np.real(0.5 * (logsumexp(2 * diff) + - np.log(n_estimates - ddof))) + else: + return np.std(estimates, ddof=ddof) + + def sem(estimates, m=None, s=None, log=False): + m = m or mean(estimates, log) + s = s or std(estimates, m, ddof=1, log=log) + if log: + return s - 0.5*np.log(n_estimates) + else: + return s / np.sqrt(n_estimates) + + # The sign of the integral depends on the order of the limits. Fix this by + # ensuring that lower bounds are indeed lower and setting sign of resulting + # integral manually + if np.any(a == b): + message = ("A lower limit was equal to an upper limit, so the value " + "of the integral is zero by definition.") + warnings.warn(message, stacklevel=2) + return QMCQuadResult(-np.inf if log else 0, 0) + + i_swap = b < a + sign = (-1)**(i_swap.sum(axis=-1)) # odd # of swaps -> negative + a[i_swap], b[i_swap] = b[i_swap], a[i_swap] + + A = np.prod(b - a) + dA = A / n_points + + estimates = np.zeros(n_estimates) + rngs = _rng_spawn(qrng.rng, n_estimates) + for i in range(n_estimates): + # Generate integral estimate + sample = qrng.random(n_points) + # The rationale for transposing is that this allows users to easily + # unpack `x` into separate variables, if desired. This is consistent + # with the `xx` array passed into the `scipy.integrate.nquad` `func`. + x = stats.qmc.scale(sample, a, b).T # (n_dim, n_points) + integrands = func(x) + estimates[i] = sum_product(integrands, dA, log) + + # Get a new, independently-scrambled QRNG for next time + qrng = type(qrng)(seed=rngs[i], **qrng._init_quad) + + integral = mean(estimates, log) + standard_error = sem(estimates, m=integral, log=log) + integral = integral + np.pi*1j if (log and sign < 0) else integral*sign + return QMCQuadResult(integral, standard_error) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_tanhsinh.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_tanhsinh.py new file mode 100644 index 0000000000000000000000000000000000000000..28f17cc65bc561f389640805969de3eb7c36a784 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_tanhsinh.py @@ -0,0 +1,1231 @@ +# mypy: disable-error-code="attr-defined" +import numpy as np +from scipy import special +import scipy._lib._elementwise_iterative_method as eim +from scipy._lib._util import _RichResult + +# todo: +# figure out warning situation +# address https://github.com/scipy/scipy/pull/18650#discussion_r1233032521 +# without `minweight`, we are also suppressing infinities within the interval. +# Is that OK? If so, we can probably get rid of `status=3`. +# Add heuristic to stop when improvement is too slow / antithrashing +# support singularities? interval subdivision? this feature will be added +# eventually, but do we adjust the interface now? +# When doing log-integration, should the tolerances control the error of the +# log-integral or the error of the integral? The trouble is that `log` +# inherently looses some precision so it may not be possible to refine +# the integral further. Example: 7th moment of stats.f(15, 20) +# respect function evaluation limit? +# make public? + + +def _tanhsinh(f, a, b, *, args=(), log=False, maxfun=None, maxlevel=None, + minlevel=2, atol=None, rtol=None, preserve_shape=False, + callback=None): + """Evaluate a convergent integral numerically using tanh-sinh quadrature. + + In practice, tanh-sinh quadrature achieves quadratic convergence for + many integrands: the number of accurate *digits* scales roughly linearly + with the number of function evaluations [1]_. + + Either or both of the limits of integration may be infinite, and + singularities at the endpoints are acceptable. Divergent integrals and + integrands with non-finite derivatives or singularities within an interval + are out of scope, but the latter may be evaluated be calling `_tanhsinh` on + each sub-interval separately. + + Parameters + ---------- + f : callable + The function to be integrated. The signature must be:: + func(x: ndarray, *fargs) -> ndarray + where each element of ``x`` is a finite real and ``fargs`` is a tuple, + which may contain an arbitrary number of arrays that are broadcastable + with `x`. ``func`` must be an elementwise-scalar function; see + documentation of parameter `preserve_shape` for details. + If ``func`` returns a value with complex dtype when evaluated at + either endpoint, subsequent arguments ``x`` will have complex dtype + (but zero imaginary part). + a, b : array_like + Real lower and upper limits of integration. Must be broadcastable. + Elements may be infinite. + args : tuple, optional + Additional positional arguments to be passed to `func`. Must be arrays + broadcastable with `a` and `b`. If the callable to be integrated + requires arguments that are not broadcastable with `a` and `b`, wrap + that callable with `f`. See Examples. + log : bool, default: False + Setting to True indicates that `f` returns the log of the integrand + and that `atol` and `rtol` are expressed as the logs of the absolute + and relative errors. In this case, the result object will contain the + log of the integral and error. This is useful for integrands for which + numerical underflow or overflow would lead to inaccuracies. + When ``log=True``, the integrand (the exponential of `f`) must be real, + but it may be negative, in which case the log of the integrand is a + complex number with an imaginary part that is an odd multiple of π. + maxlevel : int, default: 10 + The maximum refinement level of the algorithm. + + At the zeroth level, `f` is called once, performing 16 function + evaluations. At each subsequent level, `f` is called once more, + approximately doubling the number of function evaluations that have + been performed. Accordingly, for many integrands, each successive level + will double the number of accurate digits in the result (up to the + limits of floating point precision). + + The algorithm will terminate after completing level `maxlevel` or after + another termination condition is satisfied, whichever comes first. + minlevel : int, default: 2 + The level at which to begin iteration (default: 2). This does not + change the total number of function evaluations or the abscissae at + which the function is evaluated; it changes only the *number of times* + `f` is called. If ``minlevel=k``, then the integrand is evaluated at + all abscissae from levels ``0`` through ``k`` in a single call. + Note that if `minlevel` exceeds `maxlevel`, the provided `minlevel` is + ignored, and `minlevel` is set equal to `maxlevel`. + atol, rtol : float, optional + Absolute termination tolerance (default: 0) and relative termination + tolerance (default: ``eps**0.75``, where ``eps`` is the precision of + the result dtype), respectively. The error estimate is as + described in [1]_ Section 5. While not theoretically rigorous or + conservative, it is said to work well in practice. Must be non-negative + and finite if `log` is False, and must be expressed as the log of a + non-negative and finite number if `log` is True. + preserve_shape : bool, default: False + In the following, "arguments of `f`" refers to the array ``x`` and + any arrays within ``fargs``. Let ``shape`` be the broadcasted shape + of `a`, `b`, and all elements of `args` (which is conceptually + distinct from ``fargs`` passed into `f`). + + - When ``preserve_shape=False`` (default), `f` must accept arguments + of *any* broadcastable shapes. + + - When ``preserve_shape=True``, `f` must accept arguments of shape + ``shape`` *or* ``shape + (n,)``, where ``(n,)`` is the number of + abscissae at which the function is being evaluated. + + In either case, for each scalar element ``xi`` within `x`, the array + returned by `f` must include the scalar ``f(xi)`` at the same index. + Consequently, the shape of the output is always the shape of the input + ``x``. + + See Examples. + + callback : callable, optional + An optional user-supplied function to be called before the first + iteration and after each iteration. + Called as ``callback(res)``, where ``res`` is a ``_RichResult`` + similar to that returned by `_differentiate` (but containing the + current iterate's values of all variables). If `callback` raises a + ``StopIteration``, the algorithm will terminate immediately and + `_tanhsinh` will return a result object. + + Returns + ------- + res : _RichResult + An instance of `scipy._lib._util._RichResult` with the following + attributes. (The descriptions are written as though the values will be + scalars; however, if `func` returns an array, the outputs will be + arrays of the same shape.) + success : bool + ``True`` when the algorithm terminated successfully (status ``0``). + status : int + An integer representing the exit status of the algorithm. + ``0`` : The algorithm converged to the specified tolerances. + ``-1`` : (unused) + ``-2`` : The maximum number of iterations was reached. + ``-3`` : A non-finite value was encountered. + ``-4`` : Iteration was terminated by `callback`. + ``1`` : The algorithm is proceeding normally (in `callback` only). + integral : float + An estimate of the integral + error : float + An estimate of the error. Only available if level two or higher + has been completed; otherwise NaN. + maxlevel : int + The maximum refinement level used. + nfev : int + The number of points at which `func` was evaluated. + + See Also + -------- + quad, quadrature + + Notes + ----- + Implements the algorithm as described in [1]_ with minor adaptations for + finite-precision arithmetic, including some described by [2]_ and [3]_. The + tanh-sinh scheme was originally introduced in [4]_. + + Due to floating-point error in the abscissae, the function may be evaluated + at the endpoints of the interval during iterations. The values returned by + the function at the endpoints will be ignored. + + References + ---------- + [1] Bailey, David H., Karthik Jeyabalan, and Xiaoye S. Li. "A comparison of + three high-precision quadrature schemes." Experimental Mathematics 14.3 + (2005): 317-329. + [2] Vanherck, Joren, Bart Sorée, and Wim Magnus. "Tanh-sinh quadrature for + single and multiple integration using floating-point arithmetic." + arXiv preprint arXiv:2007.15057 (2020). + [3] van Engelen, Robert A. "Improving the Double Exponential Quadrature + Tanh-Sinh, Sinh-Sinh and Exp-Sinh Formulas." + https://www.genivia.com/files/qthsh.pdf + [4] Takahasi, Hidetosi, and Masatake Mori. "Double exponential formulas for + numerical integration." Publications of the Research Institute for + Mathematical Sciences 9.3 (1974): 721-741. + + Example + ------- + Evaluate the Gaussian integral: + + >>> import numpy as np + >>> from scipy.integrate._tanhsinh import _tanhsinh + >>> def f(x): + ... return np.exp(-x**2) + >>> res = _tanhsinh(f, -np.inf, np.inf) + >>> res.integral # true value is np.sqrt(np.pi), 1.7724538509055159 + 1.7724538509055159 + >>> res.error # actual error is 0 + 4.0007963937534104e-16 + + The value of the Gaussian function (bell curve) is nearly zero for + arguments sufficiently far from zero, so the value of the integral + over a finite interval is nearly the same. + + >>> _tanhsinh(f, -20, 20).integral + 1.772453850905518 + + However, with unfavorable integration limits, the integration scheme + may not be able to find the important region. + + >>> _tanhsinh(f, -np.inf, 1000).integral + 4.500490856620352 + + In such cases, or when there are singularities within the interval, + break the integral into parts with endpoints at the important points. + + >>> _tanhsinh(f, -np.inf, 0).integral + _tanhsinh(f, 0, 1000).integral + 1.772453850905404 + + For integration involving very large or very small magnitudes, use + log-integration. (For illustrative purposes, the following example shows a + case in which both regular and log-integration work, but for more extreme + limits of integration, log-integration would avoid the underflow + experienced when evaluating the integral normally.) + + >>> res = _tanhsinh(f, 20, 30, rtol=1e-10) + >>> res.integral, res.error + 4.7819613911309014e-176, 4.670364401645202e-187 + >>> def log_f(x): + ... return -x**2 + >>> np.exp(res.integral), np.exp(res.error) + 4.7819613911306924e-176, 4.670364401645093e-187 + + The limits of integration and elements of `args` may be broadcastable + arrays, and integration is performed elementwise. + + >>> from scipy import stats + >>> dist = stats.gausshyper(13.8, 3.12, 2.51, 5.18) + >>> a, b = dist.support() + >>> x = np.linspace(a, b, 100) + >>> res = _tanhsinh(dist.pdf, a, x) + >>> ref = dist.cdf(x) + >>> np.allclose(res.integral, ref) + + By default, `preserve_shape` is False, and therefore the callable + `f` may be called with arrays of any broadcastable shapes. + For example: + + >>> shapes = [] + >>> def f(x, c): + ... shape = np.broadcast_shapes(x.shape, c.shape) + ... shapes.append(shape) + ... return np.sin(c*x) + >>> + >>> c = [1, 10, 30, 100] + >>> res = _tanhsinh(f, 0, 1, args=(c,), minlevel=1) + >>> shapes + [(4,), (4, 66), (3, 64), (2, 128), (1, 256)] + + To understand where these shapes are coming from - and to better + understand how `_tanhsinh` computes accurate results - note that + higher values of ``c`` correspond with higher frequency sinusoids. + The higher frequency sinusoids make the integrand more complicated, + so more function evaluations are required to achieve the target + accuracy: + + >>> res.nfev + array([ 67, 131, 259, 515]) + + The initial ``shape``, ``(4,)``, corresponds with evaluating the + integrand at a single abscissa and all four frequencies; this is used + for input validation and to determine the size and dtype of the arrays + that store results. The next shape corresponds with evaluating the + integrand at an initial grid of abscissae and all four frequencies. + Successive calls to the function double the total number of abscissae at + which the function has been evaluated. However, in later function + evaluations, the integrand is evaluated at fewer frequencies because + the corresponding integral has already converged to the required + tolerance. This saves function evaluations to improve performance, but + it requires the function to accept arguments of any shape. + + "Vector-valued" integrands, such as those written for use with + `scipy.integrate.quad_vec`, are unlikely to satisfy this requirement. + For example, consider + + >>> def f(x): + ... return [x, np.sin(10*x), np.cos(30*x), x*np.sin(100*x)**2] + + This integrand is not compatible with `_tanhsinh` as written; for instance, + the shape of the output will not be the same as the shape of ``x``. Such a + function *could* be converted to a compatible form with the introduction of + additional parameters, but this would be inconvenient. In such cases, + a simpler solution would be to use `preserve_shape`. + + >>> shapes = [] + >>> def f(x): + ... shapes.append(x.shape) + ... x0, x1, x2, x3 = x + ... return [x0, np.sin(10*x1), np.cos(30*x2), x3*np.sin(100*x3)] + >>> + >>> a = np.zeros(4) + >>> res = _tanhsinh(f, a, 1, preserve_shape=True) + >>> shapes + [(4,), (4, 66), (4, 64), (4, 128), (4, 256)] + + Here, the broadcasted shape of `a` and `b` is ``(4,)``. With + ``preserve_shape=True``, the function may be called with argument + ``x`` of shape ``(4,)`` or ``(4, n)``, and this is what we observe. + + """ + (f, a, b, log, maxfun, maxlevel, minlevel, + atol, rtol, args, preserve_shape, callback) = _tanhsinh_iv( + f, a, b, log, maxfun, maxlevel, minlevel, atol, + rtol, args, preserve_shape, callback) + + # Initialization + # `eim._initialize` does several important jobs, including + # ensuring that limits, each of the `args`, and the output of `f` + # broadcast correctly and are of consistent types. To save a function + # evaluation, I pass the midpoint of the integration interval. This comes + # at a cost of some gymnastics to ensure that the midpoint has the right + # shape and dtype. Did you know that 0d and >0d arrays follow different + # type promotion rules? + with np.errstate(over='ignore', invalid='ignore', divide='ignore'): + c = ((a.ravel() + b.ravel())/2).reshape(a.shape) + inf_a, inf_b = np.isinf(a), np.isinf(b) + c[inf_a] = b[inf_a] - 1 # takes care of infinite a + c[inf_b] = a[inf_b] + 1 # takes care of infinite b + c[inf_a & inf_b] = 0 # takes care of infinite a and b + temp = eim._initialize(f, (c,), args, complex_ok=True, + preserve_shape=preserve_shape) + f, xs, fs, args, shape, dtype, xp = temp + a = np.broadcast_to(a, shape).astype(dtype).ravel() + b = np.broadcast_to(b, shape).astype(dtype).ravel() + + # Transform improper integrals + a, b, a0, negative, abinf, ainf, binf = _transform_integrals(a, b) + + # Define variables we'll need + nit, nfev = 0, 1 # one function evaluation performed above + zero = -np.inf if log else 0 + pi = dtype.type(np.pi) + maxiter = maxlevel - minlevel + 1 + eps = np.finfo(dtype).eps + if rtol is None: + rtol = 0.75*np.log(eps) if log else eps**0.75 + + Sn = np.full(shape, zero, dtype=dtype).ravel() # latest integral estimate + Sn[np.isnan(a) | np.isnan(b) | np.isnan(fs[0])] = np.nan + Sk = np.empty_like(Sn).reshape(-1, 1)[:, 0:0] # all integral estimates + aerr = np.full(shape, np.nan, dtype=dtype).ravel() # absolute error + status = np.full(shape, eim._EINPROGRESS, dtype=int).ravel() + h0 = np.real(_get_base_step(dtype=dtype)) # base step + + # For term `d4` of error estimate ([1] Section 5), we need to keep the + # most extreme abscissae and corresponding `fj`s, `wj`s in Euler-Maclaurin + # sum. Here, we initialize these variables. + xr0 = np.full(shape, -np.inf, dtype=dtype).ravel() + fr0 = np.full(shape, np.nan, dtype=dtype).ravel() + wr0 = np.zeros(shape, dtype=dtype).ravel() + xl0 = np.full(shape, np.inf, dtype=dtype).ravel() + fl0 = np.full(shape, np.nan, dtype=dtype).ravel() + wl0 = np.zeros(shape, dtype=dtype).ravel() + d4 = np.zeros(shape, dtype=dtype).ravel() + + work = _RichResult( + Sn=Sn, Sk=Sk, aerr=aerr, h=h0, log=log, dtype=dtype, pi=pi, eps=eps, + a=a.reshape(-1, 1), b=b.reshape(-1, 1), # integration limits + n=minlevel, nit=nit, nfev=nfev, status=status, # iter/eval counts + xr0=xr0, fr0=fr0, wr0=wr0, xl0=xl0, fl0=fl0, wl0=wl0, d4=d4, # err est + ainf=ainf, binf=binf, abinf=abinf, a0=a0.reshape(-1, 1)) # transforms + # Constant scalars don't need to be put in `work` unless they need to be + # passed outside `tanhsinh`. Examples: atol, rtol, h0, minlevel. + + # Correspondence between terms in the `work` object and the result + res_work_pairs = [('status', 'status'), ('integral', 'Sn'), + ('error', 'aerr'), ('nit', 'nit'), ('nfev', 'nfev')] + + def pre_func_eval(work): + # Determine abscissae at which to evaluate `f` + work.h = h0 / 2**work.n + xjc, wj = _get_pairs(work.n, h0, dtype=work.dtype, + inclusive=(work.n == minlevel)) + work.xj, work.wj = _transform_to_limits(xjc, wj, work.a, work.b) + + # Perform abscissae substitutions for infinite limits of integration + xj = work.xj.copy() + xj[work.abinf] = xj[work.abinf] / (1 - xj[work.abinf]**2) + xj[work.binf] = 1/xj[work.binf] - 1 + work.a0[work.binf] + xj[work.ainf] *= -1 + return xj + + def post_func_eval(x, fj, work): + # Weight integrand as required by substitutions for infinite limits + if work.log: + fj[work.abinf] += (np.log(1 + work.xj[work.abinf] ** 2) + - 2*np.log(1 - work.xj[work.abinf] ** 2)) + fj[work.binf] -= 2 * np.log(work.xj[work.binf]) + else: + fj[work.abinf] *= ((1 + work.xj[work.abinf]**2) / + (1 - work.xj[work.abinf]**2)**2) + fj[work.binf] *= work.xj[work.binf]**-2. + + # Estimate integral with Euler-Maclaurin Sum + fjwj, Sn = _euler_maclaurin_sum(fj, work) + if work.Sk.shape[-1]: + Snm1 = work.Sk[:, -1] + Sn = (special.logsumexp([Snm1 - np.log(2), Sn], axis=0) if log + else Snm1 / 2 + Sn) + + work.fjwj = fjwj + work.Sn = Sn + + def check_termination(work): + """Terminate due to convergence or encountering non-finite values""" + stop = np.zeros(work.Sn.shape, dtype=bool) + + # Terminate before first iteration if integration limits are equal + if work.nit == 0: + i = (work.a == work.b).ravel() # ravel singleton dimension + zero = -np.inf if log else 0 + work.Sn[i] = zero + work.aerr[i] = zero + work.status[i] = eim._ECONVERGED + stop[i] = True + else: + # Terminate if convergence criterion is met + work.rerr, work.aerr = _estimate_error(work) + i = ((work.rerr < rtol) | (work.rerr + np.real(work.Sn) < atol) if log + else (work.rerr < rtol) | (work.rerr * abs(work.Sn) < atol)) + work.status[i] = eim._ECONVERGED + stop[i] = True + + # Terminate if integral estimate becomes invalid + if log: + i = (np.isposinf(np.real(work.Sn)) | np.isnan(work.Sn)) & ~stop + else: + i = ~np.isfinite(work.Sn) & ~stop + work.status[i] = eim._EVALUEERR + stop[i] = True + + return stop + + def post_termination_check(work): + work.n += 1 + work.Sk = np.concatenate((work.Sk, work.Sn[:, np.newaxis]), axis=-1) + return + + def customize_result(res, shape): + # If the integration limits were such that b < a, we reversed them + # to perform the calculation, and the final result needs to be negated. + if log and np.any(negative): + pi = res['integral'].dtype.type(np.pi) + j = np.complex64(1j) # minimum complex type + res['integral'] = res['integral'] + negative*pi*j + else: + res['integral'][negative] *= -1 + + # For this algorithm, it seems more appropriate to report the maximum + # level rather than the number of iterations in which it was performed. + res['maxlevel'] = minlevel + res['nit'] - 1 + res['maxlevel'][res['nit'] == 0] = -1 + del res['nit'] + return shape + + # Suppress all warnings initially, since there are many places in the code + # for which this is expected behavior. + with np.errstate(over='ignore', invalid='ignore', divide='ignore'): + res = eim._loop(work, callback, shape, maxiter, f, args, dtype, pre_func_eval, + post_func_eval, check_termination, post_termination_check, + customize_result, res_work_pairs, xp, preserve_shape) + return res + + +def _get_base_step(dtype=np.float64): + # Compute the base step length for the provided dtype. Theoretically, the + # Euler-Maclaurin sum is infinite, but it gets cut off when either the + # weights underflow or the abscissae cannot be distinguished from the + # limits of integration. The latter happens to occur first for float32 and + # float64, and it occurs when `xjc` (the abscissa complement) + # in `_compute_pair` underflows. We can solve for the argument `tmax` at + # which it will underflow using [2] Eq. 13. + fmin = 4*np.finfo(dtype).tiny # stay a little away from the limit + tmax = np.arcsinh(np.log(2/fmin - 1) / np.pi) + + # Based on this, we can choose a base step size `h` for level 0. + # The number of function evaluations will be `2 + m*2^(k+1)`, where `k` is + # the level and `m` is an integer we get to choose. I choose + # m = _N_BASE_STEPS = `8` somewhat arbitrarily, but a rationale is that a + # power of 2 makes floating point arithmetic more predictable. It also + # results in a base step size close to `1`, which is what [1] uses (and I + # used here until I found [2] and these ideas settled). + h0 = tmax / _N_BASE_STEPS + return h0.astype(dtype) + + +_N_BASE_STEPS = 8 + + +def _compute_pair(k, h0): + # Compute the abscissa-weight pairs for each level k. See [1] page 9. + + # For now, we compute and store in 64-bit precision. If higher-precision + # data types become better supported, it would be good to compute these + # using the highest precision available. Or, once there is an Array API- + # compatible arbitrary precision array, we can compute at the required + # precision. + + # "....each level k of abscissa-weight pairs uses h = 2 **-k" + # We adapt to floating point arithmetic using ideas of [2]. + h = h0 / 2**k + max = _N_BASE_STEPS * 2**k + + # For iterations after the first, "....the integrand function needs to be + # evaluated only at the odd-indexed abscissas at each level." + j = np.arange(max+1) if k == 0 else np.arange(1, max+1, 2) + jh = j * h + + # "In this case... the weights wj = u1/cosh(u2)^2, where..." + pi_2 = np.pi / 2 + u1 = pi_2*np.cosh(jh) + u2 = pi_2*np.sinh(jh) + # Denominators get big here. Overflow then underflow doesn't need warning. + # with np.errstate(under='ignore', over='ignore'): + wj = u1 / np.cosh(u2)**2 + # "We actually store 1-xj = 1/(...)." + xjc = 1 / (np.exp(u2) * np.cosh(u2)) # complement of xj = np.tanh(u2) + + # When level k == 0, the zeroth xj corresponds with xj = 0. To simplify + # code, the function will be evaluated there twice; each gets half weight. + wj[0] = wj[0] / 2 if k == 0 else wj[0] + + return xjc, wj # store at full precision + + +def _pair_cache(k, h0): + # Cache the abscissa-weight pairs up to a specified level. + # Abscissae and weights of consecutive levels are concatenated. + # `index` records the indices that correspond with each level: + # `xjc[index[k]:index[k+1]` extracts the level `k` abscissae. + if h0 != _pair_cache.h0: + _pair_cache.xjc = np.empty(0) + _pair_cache.wj = np.empty(0) + _pair_cache.indices = [0] + + xjcs = [_pair_cache.xjc] + wjs = [_pair_cache.wj] + + for i in range(len(_pair_cache.indices)-1, k + 1): + xjc, wj = _compute_pair(i, h0) + xjcs.append(xjc) + wjs.append(wj) + _pair_cache.indices.append(_pair_cache.indices[-1] + len(xjc)) + + _pair_cache.xjc = np.concatenate(xjcs) + _pair_cache.wj = np.concatenate(wjs) + _pair_cache.h0 = h0 + +_pair_cache.xjc = np.empty(0) +_pair_cache.wj = np.empty(0) +_pair_cache.indices = [0] +_pair_cache.h0 = None + + +def _get_pairs(k, h0, inclusive=False, dtype=np.float64): + # Retrieve the specified abscissa-weight pairs from the cache + # If `inclusive`, return all up to and including the specified level + if len(_pair_cache.indices) <= k+2 or h0 != _pair_cache.h0: + _pair_cache(k, h0) + + xjc = _pair_cache.xjc + wj = _pair_cache.wj + indices = _pair_cache.indices + + start = 0 if inclusive else indices[k] + end = indices[k+1] + + return xjc[start:end].astype(dtype), wj[start:end].astype(dtype) + + +def _transform_to_limits(xjc, wj, a, b): + # Transform integral according to user-specified limits. This is just + # math that follows from the fact that the standard limits are (-1, 1). + # Note: If we had stored xj instead of xjc, we would have + # xj = alpha * xj + beta, where beta = (a + b)/2 + alpha = (b - a) / 2 + xj = np.concatenate((-alpha * xjc + b, alpha * xjc + a), axis=-1) + wj = wj*alpha # arguments get broadcasted, so we can't use *= + wj = np.concatenate((wj, wj), axis=-1) + + # Points at the boundaries can be generated due to finite precision + # arithmetic, but these function values aren't supposed to be included in + # the Euler-Maclaurin sum. Ideally we wouldn't evaluate the function at + # these points; however, we can't easily filter out points since this + # function is vectorized. Instead, zero the weights. + invalid = (xj <= a) | (xj >= b) + wj[invalid] = 0 + return xj, wj + + +def _euler_maclaurin_sum(fj, work): + # Perform the Euler-Maclaurin Sum, [1] Section 4 + + # The error estimate needs to know the magnitude of the last term + # omitted from the Euler-Maclaurin sum. This is a bit involved because + # it may have been computed at a previous level. I sure hope it's worth + # all the trouble. + xr0, fr0, wr0 = work.xr0, work.fr0, work.wr0 + xl0, fl0, wl0 = work.xl0, work.fl0, work.wl0 + + # It is much more convenient to work with the transposes of our work + # variables here. + xj, fj, wj = work.xj.T, fj.T, work.wj.T + n_x, n_active = xj.shape # number of abscissae, number of active elements + + # We'll work with the left and right sides separately + xr, xl = xj.reshape(2, n_x // 2, n_active).copy() # this gets modified + fr, fl = fj.reshape(2, n_x // 2, n_active) + wr, wl = wj.reshape(2, n_x // 2, n_active) + + invalid_r = ~np.isfinite(fr) | (wr == 0) + invalid_l = ~np.isfinite(fl) | (wl == 0) + + # integer index of the maximum abscissa at this level + xr[invalid_r] = -np.inf + ir = np.argmax(xr, axis=0, keepdims=True) + # abscissa, function value, and weight at this index + xr_max = np.take_along_axis(xr, ir, axis=0)[0] + fr_max = np.take_along_axis(fr, ir, axis=0)[0] + wr_max = np.take_along_axis(wr, ir, axis=0)[0] + # boolean indices at which maximum abscissa at this level exceeds + # the incumbent maximum abscissa (from all previous levels) + j = xr_max > xr0 + # Update record of the incumbent abscissa, function value, and weight + xr0[j] = xr_max[j] + fr0[j] = fr_max[j] + wr0[j] = wr_max[j] + + # integer index of the minimum abscissa at this level + xl[invalid_l] = np.inf + il = np.argmin(xl, axis=0, keepdims=True) + # abscissa, function value, and weight at this index + xl_min = np.take_along_axis(xl, il, axis=0)[0] + fl_min = np.take_along_axis(fl, il, axis=0)[0] + wl_min = np.take_along_axis(wl, il, axis=0)[0] + # boolean indices at which minimum abscissa at this level is less than + # the incumbent minimum abscissa (from all previous levels) + j = xl_min < xl0 + # Update record of the incumbent abscissa, function value, and weight + xl0[j] = xl_min[j] + fl0[j] = fl_min[j] + wl0[j] = wl_min[j] + fj = fj.T + + # Compute the error estimate `d4` - the magnitude of the leftmost or + # rightmost term, whichever is greater. + flwl0 = fl0 + np.log(wl0) if work.log else fl0 * wl0 # leftmost term + frwr0 = fr0 + np.log(wr0) if work.log else fr0 * wr0 # rightmost term + magnitude = np.real if work.log else np.abs + work.d4 = np.maximum(magnitude(flwl0), magnitude(frwr0)) + + # There are two approaches to dealing with function values that are + # numerically infinite due to approaching a singularity - zero them, or + # replace them with the function value at the nearest non-infinite point. + # [3] pg. 22 suggests the latter, so let's do that given that we have the + # information. + fr0b = np.broadcast_to(fr0[np.newaxis, :], fr.shape) + fl0b = np.broadcast_to(fl0[np.newaxis, :], fl.shape) + fr[invalid_r] = fr0b[invalid_r] + fl[invalid_l] = fl0b[invalid_l] + + # When wj is zero, log emits a warning + # with np.errstate(divide='ignore'): + fjwj = fj + np.log(work.wj) if work.log else fj * work.wj + + # update integral estimate + Sn = (special.logsumexp(fjwj + np.log(work.h), axis=-1) if work.log + else np.sum(fjwj, axis=-1) * work.h) + + work.xr0, work.fr0, work.wr0 = xr0, fr0, wr0 + work.xl0, work.fl0, work.wl0 = xl0, fl0, wl0 + + return fjwj, Sn + + +def _estimate_error(work): + # Estimate the error according to [1] Section 5 + + if work.n == 0 or work.nit == 0: + # The paper says to use "one" as the error before it can be calculated. + # NaN seems to be more appropriate. + nan = np.full_like(work.Sn, np.nan) + return nan, nan + + indices = _pair_cache.indices + + n_active = len(work.Sn) # number of active elements + axis_kwargs = dict(axis=-1, keepdims=True) + + # With a jump start (starting at level higher than 0), we haven't + # explicitly calculated the integral estimate at lower levels. But we have + # all the function value-weight products, so we can compute the + # lower-level estimates. + if work.Sk.shape[-1] == 0: + h = 2 * work.h # step size at this level + n_x = indices[work.n] # number of abscissa up to this level + # The right and left fjwj terms from all levels are concatenated along + # the last axis. Get out only the terms up to this level. + fjwj_rl = work.fjwj.reshape(n_active, 2, -1) + fjwj = fjwj_rl[:, :, :n_x].reshape(n_active, 2*n_x) + # Compute the Euler-Maclaurin sum at this level + Snm1 = (special.logsumexp(fjwj, **axis_kwargs) + np.log(h) if work.log + else np.sum(fjwj, **axis_kwargs) * h) + work.Sk = np.concatenate((Snm1, work.Sk), axis=-1) + + if work.n == 1: + nan = np.full_like(work.Sn, np.nan) + return nan, nan + + # The paper says not to calculate the error for n<=2, but it's not clear + # about whether it starts at level 0 or level 1. We start at level 0, so + # why not compute the error beginning in level 2? + if work.Sk.shape[-1] < 2: + h = 4 * work.h # step size at this level + n_x = indices[work.n-1] # number of abscissa up to this level + # The right and left fjwj terms from all levels are concatenated along + # the last axis. Get out only the terms up to this level. + fjwj_rl = work.fjwj.reshape(len(work.Sn), 2, -1) + fjwj = fjwj_rl[..., :n_x].reshape(n_active, 2*n_x) + # Compute the Euler-Maclaurin sum at this level + Snm2 = (special.logsumexp(fjwj, **axis_kwargs) + np.log(h) if work.log + else np.sum(fjwj, **axis_kwargs) * h) + work.Sk = np.concatenate((Snm2, work.Sk), axis=-1) + + Snm2 = work.Sk[..., -2] + Snm1 = work.Sk[..., -1] + + e1 = work.eps + + if work.log: + log_e1 = np.log(e1) + # Currently, only real integrals are supported in log-scale. All + # complex values have imaginary part in increments of pi*j, which just + # carries sign information of the original integral, so use of + # `np.real` here is equivalent to absolute value in real scale. + d1 = np.real(special.logsumexp([work.Sn, Snm1 + work.pi*1j], axis=0)) + d2 = np.real(special.logsumexp([work.Sn, Snm2 + work.pi*1j], axis=0)) + d3 = log_e1 + np.max(np.real(work.fjwj), axis=-1) + d4 = work.d4 + aerr = np.max([d1 ** 2 / d2, 2 * d1, d3, d4], axis=0) + rerr = np.maximum(log_e1, aerr - np.real(work.Sn)) + else: + # Note: explicit computation of log10 of each of these is unnecessary. + d1 = np.abs(work.Sn - Snm1) + d2 = np.abs(work.Sn - Snm2) + d3 = e1 * np.max(np.abs(work.fjwj), axis=-1) + d4 = work.d4 + # If `d1` is 0, no need to warn. This does the right thing. + # with np.errstate(divide='ignore'): + aerr = np.max([d1**(np.log(d1)/np.log(d2)), d1**2, d3, d4], axis=0) + rerr = np.maximum(e1, aerr/np.abs(work.Sn)) + return rerr, aerr.reshape(work.Sn.shape) + + +def _transform_integrals(a, b): + # Transform integrals to a form with finite a < b + # For b < a, we reverse the limits and will multiply the final result by -1 + # For infinite limit on the right, we use the substitution x = 1/t - 1 + a + # For infinite limit on the left, we substitute x = -x and treat as above + # For infinite limits, we substitute x = t / (1-t**2) + + negative = b < a + a[negative], b[negative] = b[negative], a[negative] + + abinf = np.isinf(a) & np.isinf(b) + a[abinf], b[abinf] = -1, 1 + + ainf = np.isinf(a) + a[ainf], b[ainf] = -b[ainf], -a[ainf] + + binf = np.isinf(b) + a0 = a.copy() + a[binf], b[binf] = 0, 1 + + return a, b, a0, negative, abinf, ainf, binf + + +def _tanhsinh_iv(f, a, b, log, maxfun, maxlevel, minlevel, + atol, rtol, args, preserve_shape, callback): + # Input validation and standardization + + message = '`f` must be callable.' + if not callable(f): + raise ValueError(message) + + message = 'All elements of `a` and `b` must be real numbers.' + a, b = np.broadcast_arrays(a, b) + if np.any(np.iscomplex(a)) or np.any(np.iscomplex(b)): + raise ValueError(message) + + message = '`log` must be True or False.' + if log not in {True, False}: + raise ValueError(message) + log = bool(log) + + if atol is None: + atol = -np.inf if log else 0 + + rtol_temp = rtol if rtol is not None else 0. + + params = np.asarray([atol, rtol_temp, 0.]) + message = "`atol` and `rtol` must be real numbers." + if not np.issubdtype(params.dtype, np.floating): + raise ValueError(message) + + if log: + message = '`atol` and `rtol` may not be positive infinity.' + if np.any(np.isposinf(params)): + raise ValueError(message) + else: + message = '`atol` and `rtol` must be non-negative and finite.' + if np.any(params < 0) or np.any(np.isinf(params)): + raise ValueError(message) + atol = params[0] + rtol = rtol if rtol is None else params[1] + + BIGINT = float(2**62) + if maxfun is None and maxlevel is None: + maxlevel = 10 + + maxfun = BIGINT if maxfun is None else maxfun + maxlevel = BIGINT if maxlevel is None else maxlevel + + message = '`maxfun`, `maxlevel`, and `minlevel` must be integers.' + params = np.asarray([maxfun, maxlevel, minlevel]) + if not (np.issubdtype(params.dtype, np.number) + and np.all(np.isreal(params)) + and np.all(params.astype(np.int64) == params)): + raise ValueError(message) + message = '`maxfun`, `maxlevel`, and `minlevel` must be non-negative.' + if np.any(params < 0): + raise ValueError(message) + maxfun, maxlevel, minlevel = params.astype(np.int64) + minlevel = min(minlevel, maxlevel) + + if not np.iterable(args): + args = (args,) + + message = '`preserve_shape` must be True or False.' + if preserve_shape not in {True, False}: + raise ValueError(message) + + if callback is not None and not callable(callback): + raise ValueError('`callback` must be callable.') + + return (f, a, b, log, maxfun, maxlevel, minlevel, + atol, rtol, args, preserve_shape, callback) + + +def _logsumexp(x, axis=0): + # logsumexp raises with empty array + x = np.asarray(x) + shape = list(x.shape) + if shape[axis] == 0: + shape.pop(axis) + return np.full(shape, fill_value=-np.inf, dtype=x.dtype) + else: + return special.logsumexp(x, axis=axis) + + +def _nsum_iv(f, a, b, step, args, log, maxterms, atol, rtol): + # Input validation and standardization + + message = '`f` must be callable.' + if not callable(f): + raise ValueError(message) + + message = 'All elements of `a`, `b`, and `step` must be real numbers.' + a, b, step = np.broadcast_arrays(a, b, step) + dtype = np.result_type(a.dtype, b.dtype, step.dtype) + if not np.issubdtype(dtype, np.number) or np.issubdtype(dtype, np.complexfloating): + raise ValueError(message) + + valid_a = np.isfinite(a) + valid_b = b >= a # NaNs will be False + valid_step = np.isfinite(step) & (step > 0) + valid_abstep = valid_a & valid_b & valid_step + + message = '`log` must be True or False.' + if log not in {True, False}: + raise ValueError(message) + + if atol is None: + atol = -np.inf if log else 0 + + rtol_temp = rtol if rtol is not None else 0. + + params = np.asarray([atol, rtol_temp, 0.]) + message = "`atol` and `rtol` must be real numbers." + if not np.issubdtype(params.dtype, np.floating): + raise ValueError(message) + + if log: + message = '`atol`, `rtol` may not be positive infinity or NaN.' + if np.any(np.isposinf(params) | np.isnan(params)): + raise ValueError(message) + else: + message = '`atol`, and `rtol` must be non-negative and finite.' + if np.any((params < 0) | (~np.isfinite(params))): + raise ValueError(message) + atol = params[0] + rtol = rtol if rtol is None else params[1] + + maxterms_int = int(maxterms) + if maxterms_int != maxterms or maxterms < 0: + message = "`maxterms` must be a non-negative integer." + raise ValueError(message) + + if not np.iterable(args): + args = (args,) + + return f, a, b, step, valid_abstep, args, log, maxterms_int, atol, rtol + + +def _nsum(f, a, b, step=1, args=(), log=False, maxterms=int(2**20), atol=None, + rtol=None): + r"""Evaluate a convergent sum. + + For finite `b`, this evaluates:: + + f(a + np.arange(n)*step).sum() + + where ``n = int((b - a) / step) + 1``. If `f` is smooth, positive, and + monotone decreasing, `b` may be infinite, in which case the infinite sum + is approximated using integration. + + Parameters + ---------- + f : callable + The function that evaluates terms to be summed. The signature must be:: + + f(x: ndarray, *args) -> ndarray + + where each element of ``x`` is a finite real and ``args`` is a tuple, + which may contain an arbitrary number of arrays that are broadcastable + with `x`. `f` must represent a smooth, positive, and monotone decreasing + function of `x`; `_nsum` performs no checks to verify that these conditions + are met and may return erroneous results if they are violated. + a, b : array_like + Real lower and upper limits of summed terms. Must be broadcastable. + Each element of `a` must be finite and less than the corresponding + element in `b`, but elements of `b` may be infinite. + step : array_like + Finite, positive, real step between summed terms. Must be broadcastable + with `a` and `b`. + args : tuple, optional + Additional positional arguments to be passed to `f`. Must be arrays + broadcastable with `a`, `b`, and `step`. If the callable to be summed + requires arguments that are not broadcastable with `a`, `b`, and `step`, + wrap that callable with `f`. See Examples. + log : bool, default: False + Setting to True indicates that `f` returns the log of the terms + and that `atol` and `rtol` are expressed as the logs of the absolute + and relative errors. In this case, the result object will contain the + log of the sum and error. This is useful for summands for which + numerical underflow or overflow would lead to inaccuracies. + maxterms : int, default: 2**32 + The maximum number of terms to evaluate when summing directly. + Additional function evaluations may be performed for input + validation and integral evaluation. + atol, rtol : float, optional + Absolute termination tolerance (default: 0) and relative termination + tolerance (default: ``eps**0.5``, where ``eps`` is the precision of + the result dtype), respectively. Must be non-negative + and finite if `log` is False, and must be expressed as the log of a + non-negative and finite number if `log` is True. + + Returns + ------- + res : _RichResult + An instance of `scipy._lib._util._RichResult` with the following + attributes. (The descriptions are written as though the values will be + scalars; however, if `func` returns an array, the outputs will be + + arrays of the same shape.) + success : bool + ``True`` when the algorithm terminated successfully (status ``0``). + status : int + An integer representing the exit status of the algorithm. + ``0`` : The algorithm converged to the specified tolerances. + ``-1`` : Element(s) of `a`, `b`, or `step` are invalid + ``-2`` : Numerical integration reached its iteration limit; the sum may be divergent. + ``-3`` : A non-finite value was encountered. + sum : float + An estimate of the sum. + error : float + An estimate of the absolute error, assuming all terms are non-negative. + nfev : int + The number of points at which `func` was evaluated. + + See Also + -------- + tanhsinh + + Notes + ----- + The method implemented for infinite summation is related to the integral + test for convergence of an infinite series: assuming `step` size 1 for + simplicity of exposition, the sum of a monotone decreasing function is bounded by + + .. math:: + + \int_u^\infty f(x) dx \leq \sum_{k=u}^\infty f(k) \leq \int_u^\infty f(x) dx + f(u) + + Let :math:`a` represent `a`, :math:`n` represent `maxterms`, :math:`\epsilon_a` + represent `atol`, and :math:`\epsilon_r` represent `rtol`. + The implementation first evaluates the integral :math:`S_l=\int_a^\infty f(x) dx` + as a lower bound of the infinite sum. Then, it seeks a value :math:`c > a` such + that :math:`f(c) < \epsilon_a + S_l \epsilon_r`, if it exists; otherwise, + let :math:`c = a + n`. Then the infinite sum is approximated as + + .. math:: + + \sum_{k=a}^{c-1} f(k) + \int_c^\infty f(x) dx + f(c)/2 + + and the reported error is :math:`f(c)/2` plus the error estimate of + numerical integration. The approach described above is generalized for non-unit + `step` and finite `b` that is too large for direct evaluation of the sum, + i.e. ``b - a + 1 > maxterms``. + + References + ---------- + [1] Wikipedia. "Integral test for convergence." + https://en.wikipedia.org/wiki/Integral_test_for_convergence + + Examples + -------- + Compute the infinite sum of the reciprocals of squared integers. + + >>> import numpy as np + >>> from scipy.integrate._tanhsinh import _nsum + >>> res = _nsum(lambda k: 1/k**2, 1, np.inf, maxterms=1e3) + >>> ref = np.pi**2/6 # true value + >>> res.error # estimated error + 4.990014980029223e-07 + >>> (res.sum - ref)/ref # true error + -1.0101760641302586e-10 + >>> res.nfev # number of points at which callable was evaluated + 1142 + + Compute the infinite sums of the reciprocals of integers raised to powers ``p``. + + >>> from scipy import special + >>> p = np.arange(2, 10) + >>> res = _nsum(lambda k, p: 1/k**p, 1, np.inf, maxterms=1e3, args=(p,)) + >>> ref = special.zeta(p, 1) + >>> np.allclose(res.sum, ref) + True + + """ # noqa: E501 + # Potential future work: + # - more careful testing of when `b` is slightly less than `a` plus an + # integer multiple of step (needed before this is public) + # - improve error estimate of `_direct` sum + # - add other methods for convergence acceleration (Richardson, epsilon) + # - support infinite lower limit? + # - support negative monotone increasing functions? + # - b < a / negative step? + # - complex-valued function? + # - check for violations of monotonicity? + + # Function-specific input validation / standardization + tmp = _nsum_iv(f, a, b, step, args, log, maxterms, atol, rtol) + f, a, b, step, valid_abstep, args, log, maxterms, atol, rtol = tmp + + # Additional elementwise algorithm input validation / standardization + tmp = eim._initialize(f, (a,), args, complex_ok=False) + f, xs, fs, args, shape, dtype, xp = tmp + + # Finish preparing `a`, `b`, and `step` arrays + a = xs[0] + b = np.broadcast_to(b, shape).ravel().astype(dtype) + step = np.broadcast_to(step, shape).ravel().astype(dtype) + valid_abstep = np.broadcast_to(valid_abstep, shape).ravel() + nterms = np.floor((b - a) / step) + b = a + nterms*step + + # Define constants + eps = np.finfo(dtype).eps + zero = np.asarray(-np.inf if log else 0, dtype=dtype)[()] + if rtol is None: + rtol = 0.5*np.log(eps) if log else eps**0.5 + constants = (dtype, log, eps, zero, rtol, atol, maxterms) + + # Prepare result arrays + S = np.empty_like(a) + E = np.empty_like(a) + status = np.zeros(len(a), dtype=int) + nfev = np.ones(len(a), dtype=int) # one function evaluation above + + # Branch for direct sum evaluation / integral approximation / invalid input + i1 = (nterms + 1 <= maxterms) & valid_abstep + i2 = (nterms + 1 > maxterms) & valid_abstep + i3 = ~valid_abstep + + if np.any(i1): + args_direct = [arg[i1] for arg in args] + tmp = _direct(f, a[i1], b[i1], step[i1], args_direct, constants) + S[i1], E[i1] = tmp[:-1] + nfev[i1] += tmp[-1] + status[i1] = -3 * (~np.isfinite(S[i1])) + + if np.any(i2): + args_indirect = [arg[i2] for arg in args] + tmp = _integral_bound(f, a[i2], b[i2], step[i2], args_indirect, constants) + S[i2], E[i2], status[i2] = tmp[:-1] + nfev[i2] += tmp[-1] + + if np.any(i3): + S[i3], E[i3] = np.nan, np.nan + status[i3] = -1 + + # Return results + S, E = S.reshape(shape)[()], E.reshape(shape)[()] + status, nfev = status.reshape(shape)[()], nfev.reshape(shape)[()] + return _RichResult(sum=S, error=E, status=status, success=status == 0, + nfev=nfev) + + +def _direct(f, a, b, step, args, constants, inclusive=True): + # Directly evaluate the sum. + + # When used in the context of distributions, `args` would contain the + # distribution parameters. We have broadcasted for simplicity, but we could + # reduce function evaluations when distribution parameters are the same but + # sum limits differ. Roughly: + # - compute the function at all points between min(a) and max(b), + # - compute the cumulative sum, + # - take the difference between elements of the cumulative sum + # corresponding with b and a. + # This is left to future enhancement + + dtype, log, eps, zero, _, _, _ = constants + + # To allow computation in a single vectorized call, find the maximum number + # of points (over all slices) at which the function needs to be evaluated. + # Note: if `inclusive` is `True`, then we want `1` more term in the sum. + # I didn't think it was great style to use `True` as `1` in Python, so I + # explicitly converted it to an `int` before using it. + inclusive_adjustment = int(inclusive) + steps = np.round((b - a) / step) + inclusive_adjustment + # Equivalently, steps = np.round((b - a) / step) + inclusive + max_steps = int(np.max(steps)) + + # In each slice, the function will be evaluated at the same number of points, + # but excessive points (those beyond the right sum limit `b`) are replaced + # with NaN to (potentially) reduce the time of these unnecessary calculations. + # Use a new last axis for these calculations for consistency with other + # elementwise algorithms. + a2, b2, step2 = a[:, np.newaxis], b[:, np.newaxis], step[:, np.newaxis] + args2 = [arg[:, np.newaxis] for arg in args] + ks = a2 + np.arange(max_steps, dtype=dtype) * step2 + i_nan = ks >= (b2 + inclusive_adjustment*step2/2) + ks[i_nan] = np.nan + fs = f(ks, *args2) + + # The function evaluated at NaN is NaN, and NaNs are zeroed in the sum. + # In some cases it may be faster to loop over slices than to vectorize + # like this. This is an optimization that can be added later. + fs[i_nan] = zero + nfev = max_steps - i_nan.sum(axis=-1) + S = _logsumexp(fs, axis=-1) if log else np.sum(fs, axis=-1) + # Rough, non-conservative error estimate. See gh-19667 for improvement ideas. + E = np.real(S) + np.log(eps) if log else eps * abs(S) + return S, E, nfev + + +def _integral_bound(f, a, b, step, args, constants): + # Estimate the sum with integral approximation + dtype, log, _, _, rtol, atol, maxterms = constants + log2 = np.log(2, dtype=dtype) + + # Get a lower bound on the sum and compute effective absolute tolerance + lb = _tanhsinh(f, a, b, args=args, atol=atol, rtol=rtol, log=log) + tol = np.broadcast_to(atol, lb.integral.shape) + tol = _logsumexp((tol, rtol + lb.integral)) if log else tol + rtol*lb.integral + i_skip = lb.status < 0 # avoid unnecessary f_evals if integral is divergent + tol[i_skip] = np.nan + status = lb.status + + # As in `_direct`, we'll need a temporary new axis for points + # at which to evaluate the function. Append axis at the end for + # consistency with other elementwise algorithms. + a2 = a[..., np.newaxis] + step2 = step[..., np.newaxis] + args2 = [arg[..., np.newaxis] for arg in args] + + # Find the location of a term that is less than the tolerance (if possible) + log2maxterms = np.floor(np.log2(maxterms)) if maxterms else 0 + n_steps = np.concatenate([2**np.arange(0, log2maxterms), [maxterms]], dtype=dtype) + nfev = len(n_steps) + ks = a2 + n_steps * step2 + fks = f(ks, *args2) + nt = np.minimum(np.sum(fks > tol[:, np.newaxis], axis=-1), n_steps.shape[-1]-1) + n_steps = n_steps[nt] + + # Directly evaluate the sum up to this term + k = a + n_steps * step + left, left_error, left_nfev = _direct(f, a, k, step, args, + constants, inclusive=False) + i_skip |= np.isposinf(left) # if sum is not finite, no sense in continuing + status[np.isposinf(left)] = -3 + k[i_skip] = np.nan + + # Use integration to estimate the remaining sum + # Possible optimization for future work: if there were no terms less than + # the tolerance, there is no need to compute the integral to better accuracy. + # Something like: + # atol = np.maximum(atol, np.minimum(fk/2 - fb/2)) + # rtol = np.maximum(rtol, np.minimum((fk/2 - fb/2)/left)) + # where `fk`/`fb` are currently calculated below. + right = _tanhsinh(f, k, b, args=args, atol=atol, rtol=rtol, log=log) + + # Calculate the full estimate and error from the pieces + fk = fks[np.arange(len(fks)), nt] + fb = f(b, *args) + nfev += 1 + if log: + log_step = np.log(step) + S_terms = (left, right.integral - log_step, fk - log2, fb - log2) + S = _logsumexp(S_terms, axis=0) + E_terms = (left_error, right.error - log_step, fk-log2, fb-log2+np.pi*1j) + E = _logsumexp(E_terms, axis=0).real + else: + S = left + right.integral/step + fk/2 + fb/2 + E = left_error + right.error/step + fk/2 - fb/2 + status[~i_skip] = right.status[~i_skip] + return S, E, status, left_nfev + right.nfev + nfev + lb.nfev diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_test_multivariate.cpython-310-x86_64-linux-gnu.so b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_test_multivariate.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..fbe799fa8bfe4c5f1b2d2ed5edc07fe91db628ef Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/_test_multivariate.cpython-310-x86_64-linux-gnu.so differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/dop.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/dop.py new file mode 100644 index 0000000000000000000000000000000000000000..bf67a9a35b7d2959c2617aadc5638b577a45b9b5 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/dop.py @@ -0,0 +1,15 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__: list[str] = [] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="integrate", module="dop", + private_modules=["_dop"], all=__all__, + attribute=name) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/lsoda.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/lsoda.py new file mode 100644 index 0000000000000000000000000000000000000000..1bc1f1da3c4f0aefad9da73b6405b957ce9335b4 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/lsoda.py @@ -0,0 +1,15 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = ['lsoda'] # noqa: F822 + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="integrate", module="lsoda", + private_modules=["_lsoda"], all=__all__, + attribute=name) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/odepack.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/odepack.py new file mode 100644 index 0000000000000000000000000000000000000000..7bb4c1a8c9be375df855abe6e1b30ca9711f2607 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/odepack.py @@ -0,0 +1,17 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.integrate` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = ['odeint', 'ODEintWarning'] # noqa: F822 + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="integrate", module="odepack", + private_modules=["_odepack_py"], all=__all__, + attribute=name) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/quadpack.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/quadpack.py new file mode 100644 index 0000000000000000000000000000000000000000..144584988095c8855da8c34253c045f1a3940572 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/quadpack.py @@ -0,0 +1,23 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.integrate` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + "quad", + "dblquad", + "tplquad", + "nquad", + "IntegrationWarning", +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="integrate", module="quadpack", + private_modules=["_quadpack_py"], all=__all__, + attribute=name) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/__init__.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/__init__.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b803826b452161f0c85006dc020f3c26dd047937 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test__quad_vec.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test__quad_vec.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fbb6ad29aec81d186b9945056f842536860d4a0a Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test__quad_vec.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_banded_ode_solvers.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_banded_ode_solvers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3752633bc030ccd3ea4be11b2d48ee68015f6a9 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_banded_ode_solvers.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_bvp.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_bvp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a622ba234232576260d7fcb73c3413a07602d33 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_bvp.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_integrate.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_integrate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f12c4ce8e99e78407324321d9a8f5cf400a9e808 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_integrate.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_odeint_jac.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_odeint_jac.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..adf996ed9e886471e7b8e53ec8bd00b668e321ff Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_odeint_jac.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_quadpack.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_quadpack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..99b91815e22a5fcc8e252b4828bb09d7b97a2a0d Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_quadpack.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_quadrature.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_quadrature.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..591e9b6985b3c7da8e905a6e9f9e7f392c32ccd3 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_quadrature.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_tanhsinh.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_tanhsinh.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb6c999b201c1565128d69c351d9b9c1ac5a4a6d Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_tanhsinh.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/test__quad_vec.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/test__quad_vec.py new file mode 100644 index 0000000000000000000000000000000000000000..c88650ca1010b3543f4577dbb6d24cdbba36f18e --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/test__quad_vec.py @@ -0,0 +1,215 @@ +import pytest + +import numpy as np +from numpy.testing import assert_allclose + +from scipy.integrate import quad_vec + +from multiprocessing.dummy import Pool + + +quadrature_params = pytest.mark.parametrize( + 'quadrature', [None, "gk15", "gk21", "trapezoid"]) + + +@quadrature_params +def test_quad_vec_simple(quadrature): + n = np.arange(10) + def f(x): + return x ** n + for epsabs in [0.1, 1e-3, 1e-6]: + if quadrature == 'trapezoid' and epsabs < 1e-4: + # slow: skip + continue + + kwargs = dict(epsabs=epsabs, quadrature=quadrature) + + exact = 2**(n+1)/(n + 1) + + res, err = quad_vec(f, 0, 2, norm='max', **kwargs) + assert_allclose(res, exact, rtol=0, atol=epsabs) + + res, err = quad_vec(f, 0, 2, norm='2', **kwargs) + assert np.linalg.norm(res - exact) < epsabs + + res, err = quad_vec(f, 0, 2, norm='max', points=(0.5, 1.0), **kwargs) + assert_allclose(res, exact, rtol=0, atol=epsabs) + + res, err, *rest = quad_vec(f, 0, 2, norm='max', + epsrel=1e-8, + full_output=True, + limit=10000, + **kwargs) + assert_allclose(res, exact, rtol=0, atol=epsabs) + + +@quadrature_params +def test_quad_vec_simple_inf(quadrature): + def f(x): + return 1 / (1 + np.float64(x) ** 2) + + for epsabs in [0.1, 1e-3, 1e-6]: + if quadrature == 'trapezoid' and epsabs < 1e-4: + # slow: skip + continue + + kwargs = dict(norm='max', epsabs=epsabs, quadrature=quadrature) + + res, err = quad_vec(f, 0, np.inf, **kwargs) + assert_allclose(res, np.pi/2, rtol=0, atol=max(epsabs, err)) + + res, err = quad_vec(f, 0, -np.inf, **kwargs) + assert_allclose(res, -np.pi/2, rtol=0, atol=max(epsabs, err)) + + res, err = quad_vec(f, -np.inf, 0, **kwargs) + assert_allclose(res, np.pi/2, rtol=0, atol=max(epsabs, err)) + + res, err = quad_vec(f, np.inf, 0, **kwargs) + assert_allclose(res, -np.pi/2, rtol=0, atol=max(epsabs, err)) + + res, err = quad_vec(f, -np.inf, np.inf, **kwargs) + assert_allclose(res, np.pi, rtol=0, atol=max(epsabs, err)) + + res, err = quad_vec(f, np.inf, -np.inf, **kwargs) + assert_allclose(res, -np.pi, rtol=0, atol=max(epsabs, err)) + + res, err = quad_vec(f, np.inf, np.inf, **kwargs) + assert_allclose(res, 0, rtol=0, atol=max(epsabs, err)) + + res, err = quad_vec(f, -np.inf, -np.inf, **kwargs) + assert_allclose(res, 0, rtol=0, atol=max(epsabs, err)) + + res, err = quad_vec(f, 0, np.inf, points=(1.0, 2.0), **kwargs) + assert_allclose(res, np.pi/2, rtol=0, atol=max(epsabs, err)) + + def f(x): + return np.sin(x + 2) / (1 + x ** 2) + exact = np.pi / np.e * np.sin(2) + epsabs = 1e-5 + + res, err, info = quad_vec(f, -np.inf, np.inf, limit=1000, norm='max', epsabs=epsabs, + quadrature=quadrature, full_output=True) + assert info.status == 1 + assert_allclose(res, exact, rtol=0, atol=max(epsabs, 1.5 * err)) + + +def test_quad_vec_args(): + def f(x, a): + return x * (x + a) * np.arange(3) + a = 2 + exact = np.array([0, 4/3, 8/3]) + + res, err = quad_vec(f, 0, 1, args=(a,)) + assert_allclose(res, exact, rtol=0, atol=1e-4) + + +def _lorenzian(x): + return 1 / (1 + x**2) + + +@pytest.mark.fail_slow(5) +def test_quad_vec_pool(): + f = _lorenzian + res, err = quad_vec(f, -np.inf, np.inf, norm='max', epsabs=1e-4, workers=4) + assert_allclose(res, np.pi, rtol=0, atol=1e-4) + + with Pool(10) as pool: + def f(x): + return 1 / (1 + x ** 2) + res, _ = quad_vec(f, -np.inf, np.inf, norm='max', epsabs=1e-4, workers=pool.map) + assert_allclose(res, np.pi, rtol=0, atol=1e-4) + + +def _func_with_args(x, a): + return x * (x + a) * np.arange(3) + + +@pytest.mark.fail_slow(5) +@pytest.mark.parametrize('extra_args', [2, (2,)]) +@pytest.mark.parametrize('workers', [1, 10]) +def test_quad_vec_pool_args(extra_args, workers): + f = _func_with_args + exact = np.array([0, 4/3, 8/3]) + + res, err = quad_vec(f, 0, 1, args=extra_args, workers=workers) + assert_allclose(res, exact, rtol=0, atol=1e-4) + + with Pool(workers) as pool: + res, err = quad_vec(f, 0, 1, args=extra_args, workers=pool.map) + assert_allclose(res, exact, rtol=0, atol=1e-4) + + +@quadrature_params +def test_num_eval(quadrature): + def f(x): + count[0] += 1 + return x**5 + + count = [0] + res = quad_vec(f, 0, 1, norm='max', full_output=True, quadrature=quadrature) + assert res[2].neval == count[0] + + +def test_info(): + def f(x): + return np.ones((3, 2, 1)) + + res, err, info = quad_vec(f, 0, 1, norm='max', full_output=True) + + assert info.success is True + assert info.status == 0 + assert info.message == 'Target precision reached.' + assert info.neval > 0 + assert info.intervals.shape[1] == 2 + assert info.integrals.shape == (info.intervals.shape[0], 3, 2, 1) + assert info.errors.shape == (info.intervals.shape[0],) + + +def test_nan_inf(): + def f_nan(x): + return np.nan + + def f_inf(x): + return np.inf if x < 0.1 else 1/x + + res, err, info = quad_vec(f_nan, 0, 1, full_output=True) + assert info.status == 3 + + res, err, info = quad_vec(f_inf, 0, 1, full_output=True) + assert info.status == 3 + + +@pytest.mark.parametrize('a,b', [(0, 1), (0, np.inf), (np.inf, 0), + (-np.inf, np.inf), (np.inf, -np.inf)]) +def test_points(a, b): + # Check that initial interval splitting is done according to + # `points`, by checking that consecutive sets of 15 point (for + # gk15) function evaluations lie between `points` + + points = (0, 0.25, 0.5, 0.75, 1.0) + points += tuple(-x for x in points) + + quadrature_points = 15 + interval_sets = [] + count = 0 + + def f(x): + nonlocal count + + if count % quadrature_points == 0: + interval_sets.append(set()) + + count += 1 + interval_sets[-1].add(float(x)) + return 0.0 + + quad_vec(f, a, b, points=points, quadrature='gk15', limit=0) + + # Check that all point sets lie in a single `points` interval + for p in interval_sets: + j = np.searchsorted(sorted(points), tuple(p)) + assert np.all(j == j[0]) + +def test_trapz_deprecation(): + with pytest.deprecated_call(match="`quadrature='trapz'`"): + quad_vec(lambda x: x, 0, 1, quadrature="trapz") diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/test_banded_ode_solvers.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/test_banded_ode_solvers.py new file mode 100644 index 0000000000000000000000000000000000000000..f34d45d94fd754bc8d2c90609ac308f6d3e4706b --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/test_banded_ode_solvers.py @@ -0,0 +1,218 @@ +import itertools +import numpy as np +from numpy.testing import assert_allclose +from scipy.integrate import ode + + +def _band_count(a): + """Returns ml and mu, the lower and upper band sizes of a.""" + nrows, ncols = a.shape + ml = 0 + for k in range(-nrows+1, 0): + if np.diag(a, k).any(): + ml = -k + break + mu = 0 + for k in range(nrows-1, 0, -1): + if np.diag(a, k).any(): + mu = k + break + return ml, mu + + +def _linear_func(t, y, a): + """Linear system dy/dt = a * y""" + return a.dot(y) + + +def _linear_jac(t, y, a): + """Jacobian of a * y is a.""" + return a + + +def _linear_banded_jac(t, y, a): + """Banded Jacobian.""" + ml, mu = _band_count(a) + bjac = [np.r_[[0] * k, np.diag(a, k)] for k in range(mu, 0, -1)] + bjac.append(np.diag(a)) + for k in range(-1, -ml-1, -1): + bjac.append(np.r_[np.diag(a, k), [0] * (-k)]) + return bjac + + +def _solve_linear_sys(a, y0, tend=1, dt=0.1, + solver=None, method='bdf', use_jac=True, + with_jacobian=False, banded=False): + """Use scipy.integrate.ode to solve a linear system of ODEs. + + a : square ndarray + Matrix of the linear system to be solved. + y0 : ndarray + Initial condition + tend : float + Stop time. + dt : float + Step size of the output. + solver : str + If not None, this must be "vode", "lsoda" or "zvode". + method : str + Either "bdf" or "adams". + use_jac : bool + Determines if the jacobian function is passed to ode(). + with_jacobian : bool + Passed to ode.set_integrator(). + banded : bool + Determines whether a banded or full jacobian is used. + If `banded` is True, `lband` and `uband` are determined by the + values in `a`. + """ + if banded: + lband, uband = _band_count(a) + else: + lband = None + uband = None + + if use_jac: + if banded: + r = ode(_linear_func, _linear_banded_jac) + else: + r = ode(_linear_func, _linear_jac) + else: + r = ode(_linear_func) + + if solver is None: + if np.iscomplexobj(a): + solver = "zvode" + else: + solver = "vode" + + r.set_integrator(solver, + with_jacobian=with_jacobian, + method=method, + lband=lband, uband=uband, + rtol=1e-9, atol=1e-10, + ) + t0 = 0 + r.set_initial_value(y0, t0) + r.set_f_params(a) + r.set_jac_params(a) + + t = [t0] + y = [y0] + while r.successful() and r.t < tend: + r.integrate(r.t + dt) + t.append(r.t) + y.append(r.y) + + t = np.array(t) + y = np.array(y) + return t, y + + +def _analytical_solution(a, y0, t): + """ + Analytical solution to the linear differential equations dy/dt = a*y. + + The solution is only valid if `a` is diagonalizable. + + Returns a 2-D array with shape (len(t), len(y0)). + """ + lam, v = np.linalg.eig(a) + c = np.linalg.solve(v, y0) + e = c * np.exp(lam * t.reshape(-1, 1)) + sol = e.dot(v.T) + return sol + + +def test_banded_ode_solvers(): + # Test the "lsoda", "vode" and "zvode" solvers of the `ode` class + # with a system that has a banded Jacobian matrix. + + t_exact = np.linspace(0, 1.0, 5) + + # --- Real arrays for testing the "lsoda" and "vode" solvers --- + + # lband = 2, uband = 1: + a_real = np.array([[-0.6, 0.1, 0.0, 0.0, 0.0], + [0.2, -0.5, 0.9, 0.0, 0.0], + [0.1, 0.1, -0.4, 0.1, 0.0], + [0.0, 0.3, -0.1, -0.9, -0.3], + [0.0, 0.0, 0.1, 0.1, -0.7]]) + + # lband = 0, uband = 1: + a_real_upper = np.triu(a_real) + + # lband = 2, uband = 0: + a_real_lower = np.tril(a_real) + + # lband = 0, uband = 0: + a_real_diag = np.triu(a_real_lower) + + real_matrices = [a_real, a_real_upper, a_real_lower, a_real_diag] + real_solutions = [] + + for a in real_matrices: + y0 = np.arange(1, a.shape[0] + 1) + y_exact = _analytical_solution(a, y0, t_exact) + real_solutions.append((y0, t_exact, y_exact)) + + def check_real(idx, solver, meth, use_jac, with_jac, banded): + a = real_matrices[idx] + y0, t_exact, y_exact = real_solutions[idx] + t, y = _solve_linear_sys(a, y0, + tend=t_exact[-1], + dt=t_exact[1] - t_exact[0], + solver=solver, + method=meth, + use_jac=use_jac, + with_jacobian=with_jac, + banded=banded) + assert_allclose(t, t_exact) + assert_allclose(y, y_exact) + + for idx in range(len(real_matrices)): + p = [['vode', 'lsoda'], # solver + ['bdf', 'adams'], # method + [False, True], # use_jac + [False, True], # with_jacobian + [False, True]] # banded + for solver, meth, use_jac, with_jac, banded in itertools.product(*p): + check_real(idx, solver, meth, use_jac, with_jac, banded) + + # --- Complex arrays for testing the "zvode" solver --- + + # complex, lband = 2, uband = 1: + a_complex = a_real - 0.5j * a_real + + # complex, lband = 0, uband = 0: + a_complex_diag = np.diag(np.diag(a_complex)) + + complex_matrices = [a_complex, a_complex_diag] + complex_solutions = [] + + for a in complex_matrices: + y0 = np.arange(1, a.shape[0] + 1) + 1j + y_exact = _analytical_solution(a, y0, t_exact) + complex_solutions.append((y0, t_exact, y_exact)) + + def check_complex(idx, solver, meth, use_jac, with_jac, banded): + a = complex_matrices[idx] + y0, t_exact, y_exact = complex_solutions[idx] + t, y = _solve_linear_sys(a, y0, + tend=t_exact[-1], + dt=t_exact[1] - t_exact[0], + solver=solver, + method=meth, + use_jac=use_jac, + with_jacobian=with_jac, + banded=banded) + assert_allclose(t, t_exact) + assert_allclose(y, y_exact) + + for idx in range(len(complex_matrices)): + p = [['bdf', 'adams'], # method + [False, True], # use_jac + [False, True], # with_jacobian + [False, True]] # banded + for meth, use_jac, with_jac, banded in itertools.product(*p): + check_complex(idx, "zvode", meth, use_jac, with_jac, banded) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/test_bvp.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/test_bvp.py new file mode 100644 index 0000000000000000000000000000000000000000..edaf80bec586831d255c6df48e2b953f40a563fa --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/test_bvp.py @@ -0,0 +1,711 @@ +import sys + +try: + from StringIO import StringIO +except ImportError: + from io import StringIO + +import numpy as np +from numpy.testing import (assert_, assert_array_equal, assert_allclose, + assert_equal) +from pytest import raises as assert_raises + +from scipy.sparse import coo_matrix +from scipy.special import erf +from scipy.integrate._bvp import (modify_mesh, estimate_fun_jac, + estimate_bc_jac, compute_jac_indices, + construct_global_jac, solve_bvp) + + +def exp_fun(x, y): + return np.vstack((y[1], y[0])) + + +def exp_fun_jac(x, y): + df_dy = np.empty((2, 2, x.shape[0])) + df_dy[0, 0] = 0 + df_dy[0, 1] = 1 + df_dy[1, 0] = 1 + df_dy[1, 1] = 0 + return df_dy + + +def exp_bc(ya, yb): + return np.hstack((ya[0] - 1, yb[0])) + + +def exp_bc_complex(ya, yb): + return np.hstack((ya[0] - 1 - 1j, yb[0])) + + +def exp_bc_jac(ya, yb): + dbc_dya = np.array([ + [1, 0], + [0, 0] + ]) + dbc_dyb = np.array([ + [0, 0], + [1, 0] + ]) + return dbc_dya, dbc_dyb + + +def exp_sol(x): + return (np.exp(-x) - np.exp(x - 2)) / (1 - np.exp(-2)) + + +def sl_fun(x, y, p): + return np.vstack((y[1], -p[0]**2 * y[0])) + + +def sl_fun_jac(x, y, p): + n, m = y.shape + df_dy = np.empty((n, 2, m)) + df_dy[0, 0] = 0 + df_dy[0, 1] = 1 + df_dy[1, 0] = -p[0]**2 + df_dy[1, 1] = 0 + + df_dp = np.empty((n, 1, m)) + df_dp[0, 0] = 0 + df_dp[1, 0] = -2 * p[0] * y[0] + + return df_dy, df_dp + + +def sl_bc(ya, yb, p): + return np.hstack((ya[0], yb[0], ya[1] - p[0])) + + +def sl_bc_jac(ya, yb, p): + dbc_dya = np.zeros((3, 2)) + dbc_dya[0, 0] = 1 + dbc_dya[2, 1] = 1 + + dbc_dyb = np.zeros((3, 2)) + dbc_dyb[1, 0] = 1 + + dbc_dp = np.zeros((3, 1)) + dbc_dp[2, 0] = -1 + + return dbc_dya, dbc_dyb, dbc_dp + + +def sl_sol(x, p): + return np.sin(p[0] * x) + + +def emden_fun(x, y): + return np.vstack((y[1], -y[0]**5)) + + +def emden_fun_jac(x, y): + df_dy = np.empty((2, 2, x.shape[0])) + df_dy[0, 0] = 0 + df_dy[0, 1] = 1 + df_dy[1, 0] = -5 * y[0]**4 + df_dy[1, 1] = 0 + return df_dy + + +def emden_bc(ya, yb): + return np.array([ya[1], yb[0] - (3/4)**0.5]) + + +def emden_bc_jac(ya, yb): + dbc_dya = np.array([ + [0, 1], + [0, 0] + ]) + dbc_dyb = np.array([ + [0, 0], + [1, 0] + ]) + return dbc_dya, dbc_dyb + + +def emden_sol(x): + return (1 + x**2/3)**-0.5 + + +def undefined_fun(x, y): + return np.zeros_like(y) + + +def undefined_bc(ya, yb): + return np.array([ya[0], yb[0] - 1]) + + +def big_fun(x, y): + f = np.zeros_like(y) + f[::2] = y[1::2] + return f + + +def big_bc(ya, yb): + return np.hstack((ya[::2], yb[::2] - 1)) + + +def big_sol(x, n): + y = np.ones((2 * n, x.size)) + y[::2] = x + return x + + +def big_fun_with_parameters(x, y, p): + """ Big version of sl_fun, with two parameters. + + The two differential equations represented by sl_fun are broadcast to the + number of rows of y, rotating between the parameters p[0] and p[1]. + Here are the differential equations: + + dy[0]/dt = y[1] + dy[1]/dt = -p[0]**2 * y[0] + dy[2]/dt = y[3] + dy[3]/dt = -p[1]**2 * y[2] + dy[4]/dt = y[5] + dy[5]/dt = -p[0]**2 * y[4] + dy[6]/dt = y[7] + dy[7]/dt = -p[1]**2 * y[6] + . + . + . + + """ + f = np.zeros_like(y) + f[::2] = y[1::2] + f[1::4] = -p[0]**2 * y[::4] + f[3::4] = -p[1]**2 * y[2::4] + return f + + +def big_fun_with_parameters_jac(x, y, p): + # big version of sl_fun_jac, with two parameters + n, m = y.shape + df_dy = np.zeros((n, n, m)) + df_dy[range(0, n, 2), range(1, n, 2)] = 1 + df_dy[range(1, n, 4), range(0, n, 4)] = -p[0]**2 + df_dy[range(3, n, 4), range(2, n, 4)] = -p[1]**2 + + df_dp = np.zeros((n, 2, m)) + df_dp[range(1, n, 4), 0] = -2 * p[0] * y[range(0, n, 4)] + df_dp[range(3, n, 4), 1] = -2 * p[1] * y[range(2, n, 4)] + + return df_dy, df_dp + + +def big_bc_with_parameters(ya, yb, p): + # big version of sl_bc, with two parameters + return np.hstack((ya[::2], yb[::2], ya[1] - p[0], ya[3] - p[1])) + + +def big_bc_with_parameters_jac(ya, yb, p): + # big version of sl_bc_jac, with two parameters + n = ya.shape[0] + dbc_dya = np.zeros((n + 2, n)) + dbc_dyb = np.zeros((n + 2, n)) + + dbc_dya[range(n // 2), range(0, n, 2)] = 1 + dbc_dyb[range(n // 2, n), range(0, n, 2)] = 1 + + dbc_dp = np.zeros((n + 2, 2)) + dbc_dp[n, 0] = -1 + dbc_dya[n, 1] = 1 + dbc_dp[n + 1, 1] = -1 + dbc_dya[n + 1, 3] = 1 + + return dbc_dya, dbc_dyb, dbc_dp + + +def big_sol_with_parameters(x, p): + # big version of sl_sol, with two parameters + return np.vstack((np.sin(p[0] * x), np.sin(p[1] * x))) + + +def shock_fun(x, y): + eps = 1e-3 + return np.vstack(( + y[1], + -(x * y[1] + eps * np.pi**2 * np.cos(np.pi * x) + + np.pi * x * np.sin(np.pi * x)) / eps + )) + + +def shock_bc(ya, yb): + return np.array([ya[0] + 2, yb[0]]) + + +def shock_sol(x): + eps = 1e-3 + k = np.sqrt(2 * eps) + return np.cos(np.pi * x) + erf(x / k) / erf(1 / k) + + +def nonlin_bc_fun(x, y): + # laplace eq. + return np.stack([y[1], np.zeros_like(x)]) + + +def nonlin_bc_bc(ya, yb): + phiA, phipA = ya + phiC, phipC = yb + + kappa, ioA, ioC, V, f = 1.64, 0.01, 1.0e-4, 0.5, 38.9 + + # Butler-Volmer Kinetics at Anode + hA = 0.0-phiA-0.0 + iA = ioA * (np.exp(f*hA) - np.exp(-f*hA)) + res0 = iA + kappa * phipA + + # Butler-Volmer Kinetics at Cathode + hC = V - phiC - 1.0 + iC = ioC * (np.exp(f*hC) - np.exp(-f*hC)) + res1 = iC - kappa*phipC + + return np.array([res0, res1]) + + +def nonlin_bc_sol(x): + return -0.13426436116763119 - 1.1308709 * x + + +def test_modify_mesh(): + x = np.array([0, 1, 3, 9], dtype=float) + x_new = modify_mesh(x, np.array([0]), np.array([2])) + assert_array_equal(x_new, np.array([0, 0.5, 1, 3, 5, 7, 9])) + + x = np.array([-6, -3, 0, 3, 6], dtype=float) + x_new = modify_mesh(x, np.array([1], dtype=int), np.array([0, 2, 3])) + assert_array_equal(x_new, [-6, -5, -4, -3, -1.5, 0, 1, 2, 3, 4, 5, 6]) + + +def test_compute_fun_jac(): + x = np.linspace(0, 1, 5) + y = np.empty((2, x.shape[0])) + y[0] = 0.01 + y[1] = 0.02 + p = np.array([]) + df_dy, df_dp = estimate_fun_jac(lambda x, y, p: exp_fun(x, y), x, y, p) + df_dy_an = exp_fun_jac(x, y) + assert_allclose(df_dy, df_dy_an) + assert_(df_dp is None) + + x = np.linspace(0, np.pi, 5) + y = np.empty((2, x.shape[0])) + y[0] = np.sin(x) + y[1] = np.cos(x) + p = np.array([1.0]) + df_dy, df_dp = estimate_fun_jac(sl_fun, x, y, p) + df_dy_an, df_dp_an = sl_fun_jac(x, y, p) + assert_allclose(df_dy, df_dy_an) + assert_allclose(df_dp, df_dp_an) + + x = np.linspace(0, 1, 10) + y = np.empty((2, x.shape[0])) + y[0] = (3/4)**0.5 + y[1] = 1e-4 + p = np.array([]) + df_dy, df_dp = estimate_fun_jac(lambda x, y, p: emden_fun(x, y), x, y, p) + df_dy_an = emden_fun_jac(x, y) + assert_allclose(df_dy, df_dy_an) + assert_(df_dp is None) + + +def test_compute_bc_jac(): + ya = np.array([-1.0, 2]) + yb = np.array([0.5, 3]) + p = np.array([]) + dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac( + lambda ya, yb, p: exp_bc(ya, yb), ya, yb, p) + dbc_dya_an, dbc_dyb_an = exp_bc_jac(ya, yb) + assert_allclose(dbc_dya, dbc_dya_an) + assert_allclose(dbc_dyb, dbc_dyb_an) + assert_(dbc_dp is None) + + ya = np.array([0.0, 1]) + yb = np.array([0.0, -1]) + p = np.array([0.5]) + dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(sl_bc, ya, yb, p) + dbc_dya_an, dbc_dyb_an, dbc_dp_an = sl_bc_jac(ya, yb, p) + assert_allclose(dbc_dya, dbc_dya_an) + assert_allclose(dbc_dyb, dbc_dyb_an) + assert_allclose(dbc_dp, dbc_dp_an) + + ya = np.array([0.5, 100]) + yb = np.array([-1000, 10.5]) + p = np.array([]) + dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac( + lambda ya, yb, p: emden_bc(ya, yb), ya, yb, p) + dbc_dya_an, dbc_dyb_an = emden_bc_jac(ya, yb) + assert_allclose(dbc_dya, dbc_dya_an) + assert_allclose(dbc_dyb, dbc_dyb_an) + assert_(dbc_dp is None) + + +def test_compute_jac_indices(): + n = 2 + m = 4 + k = 2 + i, j = compute_jac_indices(n, m, k) + s = coo_matrix((np.ones_like(i), (i, j))).toarray() + s_true = np.array([ + [1, 1, 1, 1, 0, 0, 0, 0, 1, 1], + [1, 1, 1, 1, 0, 0, 0, 0, 1, 1], + [0, 0, 1, 1, 1, 1, 0, 0, 1, 1], + [0, 0, 1, 1, 1, 1, 0, 0, 1, 1], + [0, 0, 0, 0, 1, 1, 1, 1, 1, 1], + [0, 0, 0, 0, 1, 1, 1, 1, 1, 1], + [1, 1, 0, 0, 0, 0, 1, 1, 1, 1], + [1, 1, 0, 0, 0, 0, 1, 1, 1, 1], + [1, 1, 0, 0, 0, 0, 1, 1, 1, 1], + [1, 1, 0, 0, 0, 0, 1, 1, 1, 1], + ]) + assert_array_equal(s, s_true) + + +def test_compute_global_jac(): + n = 2 + m = 5 + k = 1 + i_jac, j_jac = compute_jac_indices(2, 5, 1) + x = np.linspace(0, 1, 5) + h = np.diff(x) + y = np.vstack((np.sin(np.pi * x), np.pi * np.cos(np.pi * x))) + p = np.array([3.0]) + + f = sl_fun(x, y, p) + + x_middle = x[:-1] + 0.5 * h + y_middle = 0.5 * (y[:, :-1] + y[:, 1:]) - h/8 * (f[:, 1:] - f[:, :-1]) + + df_dy, df_dp = sl_fun_jac(x, y, p) + df_dy_middle, df_dp_middle = sl_fun_jac(x_middle, y_middle, p) + dbc_dya, dbc_dyb, dbc_dp = sl_bc_jac(y[:, 0], y[:, -1], p) + + J = construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy, df_dy_middle, + df_dp, df_dp_middle, dbc_dya, dbc_dyb, dbc_dp) + J = J.toarray() + + def J_block(h, p): + return np.array([ + [h**2*p**2/12 - 1, -0.5*h, -h**2*p**2/12 + 1, -0.5*h], + [0.5*h*p**2, h**2*p**2/12 - 1, 0.5*h*p**2, 1 - h**2*p**2/12] + ]) + + J_true = np.zeros((m * n + k, m * n + k)) + for i in range(m - 1): + J_true[i * n: (i + 1) * n, i * n: (i + 2) * n] = J_block(h[i], p[0]) + + J_true[:(m - 1) * n:2, -1] = p * h**2/6 * (y[0, :-1] - y[0, 1:]) + J_true[1:(m - 1) * n:2, -1] = p * (h * (y[0, :-1] + y[0, 1:]) + + h**2/6 * (y[1, :-1] - y[1, 1:])) + + J_true[8, 0] = 1 + J_true[9, 8] = 1 + J_true[10, 1] = 1 + J_true[10, 10] = -1 + + assert_allclose(J, J_true, rtol=1e-10) + + df_dy, df_dp = estimate_fun_jac(sl_fun, x, y, p) + df_dy_middle, df_dp_middle = estimate_fun_jac(sl_fun, x_middle, y_middle, p) + dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(sl_bc, y[:, 0], y[:, -1], p) + J = construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy, df_dy_middle, + df_dp, df_dp_middle, dbc_dya, dbc_dyb, dbc_dp) + J = J.toarray() + assert_allclose(J, J_true, rtol=2e-8, atol=2e-8) + + +def test_parameter_validation(): + x = [0, 1, 0.5] + y = np.zeros((2, 3)) + assert_raises(ValueError, solve_bvp, exp_fun, exp_bc, x, y) + + x = np.linspace(0, 1, 5) + y = np.zeros((2, 4)) + assert_raises(ValueError, solve_bvp, exp_fun, exp_bc, x, y) + + def fun(x, y, p): + return exp_fun(x, y) + def bc(ya, yb, p): + return exp_bc(ya, yb) + + y = np.zeros((2, x.shape[0])) + assert_raises(ValueError, solve_bvp, fun, bc, x, y, p=[1]) + + def wrong_shape_fun(x, y): + return np.zeros(3) + + assert_raises(ValueError, solve_bvp, wrong_shape_fun, bc, x, y) + + S = np.array([[0, 0]]) + assert_raises(ValueError, solve_bvp, exp_fun, exp_bc, x, y, S=S) + + +def test_no_params(): + x = np.linspace(0, 1, 5) + x_test = np.linspace(0, 1, 100) + y = np.zeros((2, x.shape[0])) + for fun_jac in [None, exp_fun_jac]: + for bc_jac in [None, exp_bc_jac]: + sol = solve_bvp(exp_fun, exp_bc, x, y, fun_jac=fun_jac, + bc_jac=bc_jac) + + assert_equal(sol.status, 0) + assert_(sol.success) + + assert_equal(sol.x.size, 5) + + sol_test = sol.sol(x_test) + + assert_allclose(sol_test[0], exp_sol(x_test), atol=1e-5) + + f_test = exp_fun(x_test, sol_test) + r = sol.sol(x_test, 1) - f_test + rel_res = r / (1 + np.abs(f_test)) + norm_res = np.sum(rel_res**2, axis=0)**0.5 + assert_(np.all(norm_res < 1e-3)) + + assert_(np.all(sol.rms_residuals < 1e-3)) + assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10) + assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10) + + +def test_with_params(): + x = np.linspace(0, np.pi, 5) + x_test = np.linspace(0, np.pi, 100) + y = np.ones((2, x.shape[0])) + + for fun_jac in [None, sl_fun_jac]: + for bc_jac in [None, sl_bc_jac]: + sol = solve_bvp(sl_fun, sl_bc, x, y, p=[0.5], fun_jac=fun_jac, + bc_jac=bc_jac) + + assert_equal(sol.status, 0) + assert_(sol.success) + + assert_(sol.x.size < 10) + + assert_allclose(sol.p, [1], rtol=1e-4) + + sol_test = sol.sol(x_test) + + assert_allclose(sol_test[0], sl_sol(x_test, [1]), + rtol=1e-4, atol=1e-4) + + f_test = sl_fun(x_test, sol_test, [1]) + r = sol.sol(x_test, 1) - f_test + rel_res = r / (1 + np.abs(f_test)) + norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5 + assert_(np.all(norm_res < 1e-3)) + + assert_(np.all(sol.rms_residuals < 1e-3)) + assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10) + assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10) + + +def test_singular_term(): + x = np.linspace(0, 1, 10) + x_test = np.linspace(0.05, 1, 100) + y = np.empty((2, 10)) + y[0] = (3/4)**0.5 + y[1] = 1e-4 + S = np.array([[0, 0], [0, -2]]) + + for fun_jac in [None, emden_fun_jac]: + for bc_jac in [None, emden_bc_jac]: + sol = solve_bvp(emden_fun, emden_bc, x, y, S=S, fun_jac=fun_jac, + bc_jac=bc_jac) + + assert_equal(sol.status, 0) + assert_(sol.success) + + assert_equal(sol.x.size, 10) + + sol_test = sol.sol(x_test) + assert_allclose(sol_test[0], emden_sol(x_test), atol=1e-5) + + f_test = emden_fun(x_test, sol_test) + S.dot(sol_test) / x_test + r = sol.sol(x_test, 1) - f_test + rel_res = r / (1 + np.abs(f_test)) + norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5 + + assert_(np.all(norm_res < 1e-3)) + assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10) + assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10) + + +def test_complex(): + # The test is essentially the same as test_no_params, but boundary + # conditions are turned into complex. + x = np.linspace(0, 1, 5) + x_test = np.linspace(0, 1, 100) + y = np.zeros((2, x.shape[0]), dtype=complex) + for fun_jac in [None, exp_fun_jac]: + for bc_jac in [None, exp_bc_jac]: + sol = solve_bvp(exp_fun, exp_bc_complex, x, y, fun_jac=fun_jac, + bc_jac=bc_jac) + + assert_equal(sol.status, 0) + assert_(sol.success) + + sol_test = sol.sol(x_test) + + assert_allclose(sol_test[0].real, exp_sol(x_test), atol=1e-5) + assert_allclose(sol_test[0].imag, exp_sol(x_test), atol=1e-5) + + f_test = exp_fun(x_test, sol_test) + r = sol.sol(x_test, 1) - f_test + rel_res = r / (1 + np.abs(f_test)) + norm_res = np.sum(np.real(rel_res * np.conj(rel_res)), + axis=0) ** 0.5 + assert_(np.all(norm_res < 1e-3)) + + assert_(np.all(sol.rms_residuals < 1e-3)) + assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10) + assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10) + + +def test_failures(): + x = np.linspace(0, 1, 2) + y = np.zeros((2, x.size)) + res = solve_bvp(exp_fun, exp_bc, x, y, tol=1e-5, max_nodes=5) + assert_equal(res.status, 1) + assert_(not res.success) + + x = np.linspace(0, 1, 5) + y = np.zeros((2, x.size)) + res = solve_bvp(undefined_fun, undefined_bc, x, y) + assert_equal(res.status, 2) + assert_(not res.success) + + +def test_big_problem(): + n = 30 + x = np.linspace(0, 1, 5) + y = np.zeros((2 * n, x.size)) + sol = solve_bvp(big_fun, big_bc, x, y) + + assert_equal(sol.status, 0) + assert_(sol.success) + + sol_test = sol.sol(x) + + assert_allclose(sol_test[0], big_sol(x, n)) + + f_test = big_fun(x, sol_test) + r = sol.sol(x, 1) - f_test + rel_res = r / (1 + np.abs(f_test)) + norm_res = np.sum(np.real(rel_res * np.conj(rel_res)), axis=0) ** 0.5 + assert_(np.all(norm_res < 1e-3)) + + assert_(np.all(sol.rms_residuals < 1e-3)) + assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10) + assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10) + + +def test_big_problem_with_parameters(): + n = 30 + x = np.linspace(0, np.pi, 5) + x_test = np.linspace(0, np.pi, 100) + y = np.ones((2 * n, x.size)) + + for fun_jac in [None, big_fun_with_parameters_jac]: + for bc_jac in [None, big_bc_with_parameters_jac]: + sol = solve_bvp(big_fun_with_parameters, big_bc_with_parameters, x, + y, p=[0.5, 0.5], fun_jac=fun_jac, bc_jac=bc_jac) + + assert_equal(sol.status, 0) + assert_(sol.success) + + assert_allclose(sol.p, [1, 1], rtol=1e-4) + + sol_test = sol.sol(x_test) + + for isol in range(0, n, 4): + assert_allclose(sol_test[isol], + big_sol_with_parameters(x_test, [1, 1])[0], + rtol=1e-4, atol=1e-4) + assert_allclose(sol_test[isol + 2], + big_sol_with_parameters(x_test, [1, 1])[1], + rtol=1e-4, atol=1e-4) + + f_test = big_fun_with_parameters(x_test, sol_test, [1, 1]) + r = sol.sol(x_test, 1) - f_test + rel_res = r / (1 + np.abs(f_test)) + norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5 + assert_(np.all(norm_res < 1e-3)) + + assert_(np.all(sol.rms_residuals < 1e-3)) + assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10) + assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10) + + +def test_shock_layer(): + x = np.linspace(-1, 1, 5) + x_test = np.linspace(-1, 1, 100) + y = np.zeros((2, x.size)) + sol = solve_bvp(shock_fun, shock_bc, x, y) + + assert_equal(sol.status, 0) + assert_(sol.success) + + assert_(sol.x.size < 110) + + sol_test = sol.sol(x_test) + assert_allclose(sol_test[0], shock_sol(x_test), rtol=1e-5, atol=1e-5) + + f_test = shock_fun(x_test, sol_test) + r = sol.sol(x_test, 1) - f_test + rel_res = r / (1 + np.abs(f_test)) + norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5 + + assert_(np.all(norm_res < 1e-3)) + assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10) + assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10) + + +def test_nonlin_bc(): + x = np.linspace(0, 0.1, 5) + x_test = x + y = np.zeros([2, x.size]) + sol = solve_bvp(nonlin_bc_fun, nonlin_bc_bc, x, y) + + assert_equal(sol.status, 0) + assert_(sol.success) + + assert_(sol.x.size < 8) + + sol_test = sol.sol(x_test) + assert_allclose(sol_test[0], nonlin_bc_sol(x_test), rtol=1e-5, atol=1e-5) + + f_test = nonlin_bc_fun(x_test, sol_test) + r = sol.sol(x_test, 1) - f_test + rel_res = r / (1 + np.abs(f_test)) + norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5 + + assert_(np.all(norm_res < 1e-3)) + assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10) + assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10) + + +def test_verbose(): + # Smoke test that checks the printing does something and does not crash + x = np.linspace(0, 1, 5) + y = np.zeros((2, x.shape[0])) + for verbose in [0, 1, 2]: + old_stdout = sys.stdout + sys.stdout = StringIO() + try: + sol = solve_bvp(exp_fun, exp_bc, x, y, verbose=verbose) + text = sys.stdout.getvalue() + finally: + sys.stdout = old_stdout + + assert_(sol.success) + if verbose == 0: + assert_(not text, text) + if verbose >= 1: + assert_("Solved in" in text, text) + if verbose >= 2: + assert_("Max residual" in text, text) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/test_integrate.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/test_integrate.py new file mode 100644 index 0000000000000000000000000000000000000000..ff228ed1719641b5b7013defef5e74dbfd0e07e5 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/test_integrate.py @@ -0,0 +1,834 @@ +# Authors: Nils Wagner, Ed Schofield, Pauli Virtanen, John Travers +""" +Tests for numerical integration. +""" +import numpy as np +from numpy import (arange, zeros, array, dot, sqrt, cos, sin, eye, pi, exp, + allclose) + +from numpy.testing import ( + assert_, assert_array_almost_equal, + assert_allclose, assert_array_equal, assert_equal, assert_warns) +from pytest import raises as assert_raises +from scipy.integrate import odeint, ode, complex_ode + +#------------------------------------------------------------------------------ +# Test ODE integrators +#------------------------------------------------------------------------------ + + +class TestOdeint: + # Check integrate.odeint + + def _do_problem(self, problem): + t = arange(0.0, problem.stop_t, 0.05) + + # Basic case + z, infodict = odeint(problem.f, problem.z0, t, full_output=True) + assert_(problem.verify(z, t)) + + # Use tfirst=True + z, infodict = odeint(lambda t, y: problem.f(y, t), problem.z0, t, + full_output=True, tfirst=True) + assert_(problem.verify(z, t)) + + if hasattr(problem, 'jac'): + # Use Dfun + z, infodict = odeint(problem.f, problem.z0, t, Dfun=problem.jac, + full_output=True) + assert_(problem.verify(z, t)) + + # Use Dfun and tfirst=True + z, infodict = odeint(lambda t, y: problem.f(y, t), problem.z0, t, + Dfun=lambda t, y: problem.jac(y, t), + full_output=True, tfirst=True) + assert_(problem.verify(z, t)) + + def test_odeint(self): + for problem_cls in PROBLEMS: + problem = problem_cls() + if problem.cmplx: + continue + self._do_problem(problem) + + +class TestODEClass: + + ode_class = None # Set in subclass. + + def _do_problem(self, problem, integrator, method='adams'): + + # ode has callback arguments in different order than odeint + def f(t, z): + return problem.f(z, t) + jac = None + if hasattr(problem, 'jac'): + def jac(t, z): + return problem.jac(z, t) + + integrator_params = {} + if problem.lband is not None or problem.uband is not None: + integrator_params['uband'] = problem.uband + integrator_params['lband'] = problem.lband + + ig = self.ode_class(f, jac) + ig.set_integrator(integrator, + atol=problem.atol/10, + rtol=problem.rtol/10, + method=method, + **integrator_params) + + ig.set_initial_value(problem.z0, t=0.0) + z = ig.integrate(problem.stop_t) + + assert_array_equal(z, ig.y) + assert_(ig.successful(), (problem, method)) + assert_(ig.get_return_code() > 0, (problem, method)) + assert_(problem.verify(array([z]), problem.stop_t), (problem, method)) + + +class TestOde(TestODEClass): + + ode_class = ode + + def test_vode(self): + # Check the vode solver + for problem_cls in PROBLEMS: + problem = problem_cls() + if problem.cmplx: + continue + if not problem.stiff: + self._do_problem(problem, 'vode', 'adams') + self._do_problem(problem, 'vode', 'bdf') + + def test_zvode(self): + # Check the zvode solver + for problem_cls in PROBLEMS: + problem = problem_cls() + if not problem.stiff: + self._do_problem(problem, 'zvode', 'adams') + self._do_problem(problem, 'zvode', 'bdf') + + def test_lsoda(self): + # Check the lsoda solver + for problem_cls in PROBLEMS: + problem = problem_cls() + if problem.cmplx: + continue + self._do_problem(problem, 'lsoda') + + def test_dopri5(self): + # Check the dopri5 solver + for problem_cls in PROBLEMS: + problem = problem_cls() + if problem.cmplx: + continue + if problem.stiff: + continue + if hasattr(problem, 'jac'): + continue + self._do_problem(problem, 'dopri5') + + def test_dop853(self): + # Check the dop853 solver + for problem_cls in PROBLEMS: + problem = problem_cls() + if problem.cmplx: + continue + if problem.stiff: + continue + if hasattr(problem, 'jac'): + continue + self._do_problem(problem, 'dop853') + + def test_concurrent_fail(self): + for sol in ('vode', 'zvode', 'lsoda'): + def f(t, y): + return 1.0 + + r = ode(f).set_integrator(sol) + r.set_initial_value(0, 0) + + r2 = ode(f).set_integrator(sol) + r2.set_initial_value(0, 0) + + r.integrate(r.t + 0.1) + r2.integrate(r2.t + 0.1) + + assert_raises(RuntimeError, r.integrate, r.t + 0.1) + + def test_concurrent_ok(self): + def f(t, y): + return 1.0 + + for k in range(3): + for sol in ('vode', 'zvode', 'lsoda', 'dopri5', 'dop853'): + r = ode(f).set_integrator(sol) + r.set_initial_value(0, 0) + + r2 = ode(f).set_integrator(sol) + r2.set_initial_value(0, 0) + + r.integrate(r.t + 0.1) + r2.integrate(r2.t + 0.1) + r2.integrate(r2.t + 0.1) + + assert_allclose(r.y, 0.1) + assert_allclose(r2.y, 0.2) + + for sol in ('dopri5', 'dop853'): + r = ode(f).set_integrator(sol) + r.set_initial_value(0, 0) + + r2 = ode(f).set_integrator(sol) + r2.set_initial_value(0, 0) + + r.integrate(r.t + 0.1) + r.integrate(r.t + 0.1) + r2.integrate(r2.t + 0.1) + r.integrate(r.t + 0.1) + r2.integrate(r2.t + 0.1) + + assert_allclose(r.y, 0.3) + assert_allclose(r2.y, 0.2) + + +class TestComplexOde(TestODEClass): + + ode_class = complex_ode + + def test_vode(self): + # Check the vode solver + for problem_cls in PROBLEMS: + problem = problem_cls() + if not problem.stiff: + self._do_problem(problem, 'vode', 'adams') + else: + self._do_problem(problem, 'vode', 'bdf') + + def test_lsoda(self): + # Check the lsoda solver + for problem_cls in PROBLEMS: + problem = problem_cls() + self._do_problem(problem, 'lsoda') + + def test_dopri5(self): + # Check the dopri5 solver + for problem_cls in PROBLEMS: + problem = problem_cls() + if problem.stiff: + continue + if hasattr(problem, 'jac'): + continue + self._do_problem(problem, 'dopri5') + + def test_dop853(self): + # Check the dop853 solver + for problem_cls in PROBLEMS: + problem = problem_cls() + if problem.stiff: + continue + if hasattr(problem, 'jac'): + continue + self._do_problem(problem, 'dop853') + + +class TestSolout: + # Check integrate.ode correctly handles solout for dopri5 and dop853 + def _run_solout_test(self, integrator): + # Check correct usage of solout + ts = [] + ys = [] + t0 = 0.0 + tend = 10.0 + y0 = [1.0, 2.0] + + def solout(t, y): + ts.append(t) + ys.append(y.copy()) + + def rhs(t, y): + return [y[0] + y[1], -y[1]**2] + + ig = ode(rhs).set_integrator(integrator) + ig.set_solout(solout) + ig.set_initial_value(y0, t0) + ret = ig.integrate(tend) + assert_array_equal(ys[0], y0) + assert_array_equal(ys[-1], ret) + assert_equal(ts[0], t0) + assert_equal(ts[-1], tend) + + def test_solout(self): + for integrator in ('dopri5', 'dop853'): + self._run_solout_test(integrator) + + def _run_solout_after_initial_test(self, integrator): + # Check if solout works even if it is set after the initial value. + ts = [] + ys = [] + t0 = 0.0 + tend = 10.0 + y0 = [1.0, 2.0] + + def solout(t, y): + ts.append(t) + ys.append(y.copy()) + + def rhs(t, y): + return [y[0] + y[1], -y[1]**2] + + ig = ode(rhs).set_integrator(integrator) + ig.set_initial_value(y0, t0) + ig.set_solout(solout) + ret = ig.integrate(tend) + assert_array_equal(ys[0], y0) + assert_array_equal(ys[-1], ret) + assert_equal(ts[0], t0) + assert_equal(ts[-1], tend) + + def test_solout_after_initial(self): + for integrator in ('dopri5', 'dop853'): + self._run_solout_after_initial_test(integrator) + + def _run_solout_break_test(self, integrator): + # Check correct usage of stopping via solout + ts = [] + ys = [] + t0 = 0.0 + tend = 10.0 + y0 = [1.0, 2.0] + + def solout(t, y): + ts.append(t) + ys.append(y.copy()) + if t > tend/2.0: + return -1 + + def rhs(t, y): + return [y[0] + y[1], -y[1]**2] + + ig = ode(rhs).set_integrator(integrator) + ig.set_solout(solout) + ig.set_initial_value(y0, t0) + ret = ig.integrate(tend) + assert_array_equal(ys[0], y0) + assert_array_equal(ys[-1], ret) + assert_equal(ts[0], t0) + assert_(ts[-1] > tend/2.0) + assert_(ts[-1] < tend) + + def test_solout_break(self): + for integrator in ('dopri5', 'dop853'): + self._run_solout_break_test(integrator) + + +class TestComplexSolout: + # Check integrate.ode correctly handles solout for dopri5 and dop853 + def _run_solout_test(self, integrator): + # Check correct usage of solout + ts = [] + ys = [] + t0 = 0.0 + tend = 20.0 + y0 = [0.0] + + def solout(t, y): + ts.append(t) + ys.append(y.copy()) + + def rhs(t, y): + return [1.0/(t - 10.0 - 1j)] + + ig = complex_ode(rhs).set_integrator(integrator) + ig.set_solout(solout) + ig.set_initial_value(y0, t0) + ret = ig.integrate(tend) + assert_array_equal(ys[0], y0) + assert_array_equal(ys[-1], ret) + assert_equal(ts[0], t0) + assert_equal(ts[-1], tend) + + def test_solout(self): + for integrator in ('dopri5', 'dop853'): + self._run_solout_test(integrator) + + def _run_solout_break_test(self, integrator): + # Check correct usage of stopping via solout + ts = [] + ys = [] + t0 = 0.0 + tend = 20.0 + y0 = [0.0] + + def solout(t, y): + ts.append(t) + ys.append(y.copy()) + if t > tend/2.0: + return -1 + + def rhs(t, y): + return [1.0/(t - 10.0 - 1j)] + + ig = complex_ode(rhs).set_integrator(integrator) + ig.set_solout(solout) + ig.set_initial_value(y0, t0) + ret = ig.integrate(tend) + assert_array_equal(ys[0], y0) + assert_array_equal(ys[-1], ret) + assert_equal(ts[0], t0) + assert_(ts[-1] > tend/2.0) + assert_(ts[-1] < tend) + + def test_solout_break(self): + for integrator in ('dopri5', 'dop853'): + self._run_solout_break_test(integrator) + + +#------------------------------------------------------------------------------ +# Test problems +#------------------------------------------------------------------------------ + + +class ODE: + """ + ODE problem + """ + stiff = False + cmplx = False + stop_t = 1 + z0 = [] + + lband = None + uband = None + + atol = 1e-6 + rtol = 1e-5 + + +class SimpleOscillator(ODE): + r""" + Free vibration of a simple oscillator:: + m \ddot{u} + k u = 0, u(0) = u_0 \dot{u}(0) \dot{u}_0 + Solution:: + u(t) = u_0*cos(sqrt(k/m)*t)+\dot{u}_0*sin(sqrt(k/m)*t)/sqrt(k/m) + """ + stop_t = 1 + 0.09 + z0 = array([1.0, 0.1], float) + + k = 4.0 + m = 1.0 + + def f(self, z, t): + tmp = zeros((2, 2), float) + tmp[0, 1] = 1.0 + tmp[1, 0] = -self.k / self.m + return dot(tmp, z) + + def verify(self, zs, t): + omega = sqrt(self.k / self.m) + u = self.z0[0]*cos(omega*t) + self.z0[1]*sin(omega*t)/omega + return allclose(u, zs[:, 0], atol=self.atol, rtol=self.rtol) + + +class ComplexExp(ODE): + r"""The equation :lm:`\dot u = i u`""" + stop_t = 1.23*pi + z0 = exp([1j, 2j, 3j, 4j, 5j]) + cmplx = True + + def f(self, z, t): + return 1j*z + + def jac(self, z, t): + return 1j*eye(5) + + def verify(self, zs, t): + u = self.z0 * exp(1j*t) + return allclose(u, zs, atol=self.atol, rtol=self.rtol) + + +class Pi(ODE): + r"""Integrate 1/(t + 1j) from t=-10 to t=10""" + stop_t = 20 + z0 = [0] + cmplx = True + + def f(self, z, t): + return array([1./(t - 10 + 1j)]) + + def verify(self, zs, t): + u = -2j * np.arctan(10) + return allclose(u, zs[-1, :], atol=self.atol, rtol=self.rtol) + + +class CoupledDecay(ODE): + r""" + 3 coupled decays suited for banded treatment + (banded mode makes it necessary when N>>3) + """ + + stiff = True + stop_t = 0.5 + z0 = [5.0, 7.0, 13.0] + lband = 1 + uband = 0 + + lmbd = [0.17, 0.23, 0.29] # fictitious decay constants + + def f(self, z, t): + lmbd = self.lmbd + return np.array([-lmbd[0]*z[0], + -lmbd[1]*z[1] + lmbd[0]*z[0], + -lmbd[2]*z[2] + lmbd[1]*z[1]]) + + def jac(self, z, t): + # The full Jacobian is + # + # [-lmbd[0] 0 0 ] + # [ lmbd[0] -lmbd[1] 0 ] + # [ 0 lmbd[1] -lmbd[2]] + # + # The lower and upper bandwidths are lband=1 and uband=0, resp. + # The representation of this array in packed format is + # + # [-lmbd[0] -lmbd[1] -lmbd[2]] + # [ lmbd[0] lmbd[1] 0 ] + + lmbd = self.lmbd + j = np.zeros((self.lband + self.uband + 1, 3), order='F') + + def set_j(ri, ci, val): + j[self.uband + ri - ci, ci] = val + set_j(0, 0, -lmbd[0]) + set_j(1, 0, lmbd[0]) + set_j(1, 1, -lmbd[1]) + set_j(2, 1, lmbd[1]) + set_j(2, 2, -lmbd[2]) + return j + + def verify(self, zs, t): + # Formulae derived by hand + lmbd = np.array(self.lmbd) + d10 = lmbd[1] - lmbd[0] + d21 = lmbd[2] - lmbd[1] + d20 = lmbd[2] - lmbd[0] + e0 = np.exp(-lmbd[0] * t) + e1 = np.exp(-lmbd[1] * t) + e2 = np.exp(-lmbd[2] * t) + u = np.vstack(( + self.z0[0] * e0, + self.z0[1] * e1 + self.z0[0] * lmbd[0] / d10 * (e0 - e1), + self.z0[2] * e2 + self.z0[1] * lmbd[1] / d21 * (e1 - e2) + + lmbd[1] * lmbd[0] * self.z0[0] / d10 * + (1 / d20 * (e0 - e2) - 1 / d21 * (e1 - e2)))).transpose() + return allclose(u, zs, atol=self.atol, rtol=self.rtol) + + +PROBLEMS = [SimpleOscillator, ComplexExp, Pi, CoupledDecay] + +#------------------------------------------------------------------------------ + + +def f(t, x): + dxdt = [x[1], -x[0]] + return dxdt + + +def jac(t, x): + j = array([[0.0, 1.0], + [-1.0, 0.0]]) + return j + + +def f1(t, x, omega): + dxdt = [omega*x[1], -omega*x[0]] + return dxdt + + +def jac1(t, x, omega): + j = array([[0.0, omega], + [-omega, 0.0]]) + return j + + +def f2(t, x, omega1, omega2): + dxdt = [omega1*x[1], -omega2*x[0]] + return dxdt + + +def jac2(t, x, omega1, omega2): + j = array([[0.0, omega1], + [-omega2, 0.0]]) + return j + + +def fv(t, x, omega): + dxdt = [omega[0]*x[1], -omega[1]*x[0]] + return dxdt + + +def jacv(t, x, omega): + j = array([[0.0, omega[0]], + [-omega[1], 0.0]]) + return j + + +class ODECheckParameterUse: + """Call an ode-class solver with several cases of parameter use.""" + + # solver_name must be set before tests can be run with this class. + + # Set these in subclasses. + solver_name = '' + solver_uses_jac = False + + def _get_solver(self, f, jac): + solver = ode(f, jac) + if self.solver_uses_jac: + solver.set_integrator(self.solver_name, atol=1e-9, rtol=1e-7, + with_jacobian=self.solver_uses_jac) + else: + # XXX Shouldn't set_integrator *always* accept the keyword arg + # 'with_jacobian', and perhaps raise an exception if it is set + # to True if the solver can't actually use it? + solver.set_integrator(self.solver_name, atol=1e-9, rtol=1e-7) + return solver + + def _check_solver(self, solver): + ic = [1.0, 0.0] + solver.set_initial_value(ic, 0.0) + solver.integrate(pi) + assert_array_almost_equal(solver.y, [-1.0, 0.0]) + + def test_no_params(self): + solver = self._get_solver(f, jac) + self._check_solver(solver) + + def test_one_scalar_param(self): + solver = self._get_solver(f1, jac1) + omega = 1.0 + solver.set_f_params(omega) + if self.solver_uses_jac: + solver.set_jac_params(omega) + self._check_solver(solver) + + def test_two_scalar_params(self): + solver = self._get_solver(f2, jac2) + omega1 = 1.0 + omega2 = 1.0 + solver.set_f_params(omega1, omega2) + if self.solver_uses_jac: + solver.set_jac_params(omega1, omega2) + self._check_solver(solver) + + def test_vector_param(self): + solver = self._get_solver(fv, jacv) + omega = [1.0, 1.0] + solver.set_f_params(omega) + if self.solver_uses_jac: + solver.set_jac_params(omega) + self._check_solver(solver) + + def test_warns_on_failure(self): + # Set nsteps small to ensure failure + solver = self._get_solver(f, jac) + solver.set_integrator(self.solver_name, nsteps=1) + ic = [1.0, 0.0] + solver.set_initial_value(ic, 0.0) + assert_warns(UserWarning, solver.integrate, pi) + + +class TestDOPRI5CheckParameterUse(ODECheckParameterUse): + solver_name = 'dopri5' + solver_uses_jac = False + + +class TestDOP853CheckParameterUse(ODECheckParameterUse): + solver_name = 'dop853' + solver_uses_jac = False + + +class TestVODECheckParameterUse(ODECheckParameterUse): + solver_name = 'vode' + solver_uses_jac = True + + +class TestZVODECheckParameterUse(ODECheckParameterUse): + solver_name = 'zvode' + solver_uses_jac = True + + +class TestLSODACheckParameterUse(ODECheckParameterUse): + solver_name = 'lsoda' + solver_uses_jac = True + + +def test_odeint_trivial_time(): + # Test that odeint succeeds when given a single time point + # and full_output=True. This is a regression test for gh-4282. + y0 = 1 + t = [0] + y, info = odeint(lambda y, t: -y, y0, t, full_output=True) + assert_array_equal(y, np.array([[y0]])) + + +def test_odeint_banded_jacobian(): + # Test the use of the `Dfun`, `ml` and `mu` options of odeint. + + def func(y, t, c): + return c.dot(y) + + def jac(y, t, c): + return c + + def jac_transpose(y, t, c): + return c.T.copy(order='C') + + def bjac_rows(y, t, c): + jac = np.vstack((np.r_[0, np.diag(c, 1)], + np.diag(c), + np.r_[np.diag(c, -1), 0], + np.r_[np.diag(c, -2), 0, 0])) + return jac + + def bjac_cols(y, t, c): + return bjac_rows(y, t, c).T.copy(order='C') + + c = array([[-205, 0.01, 0.00, 0.0], + [0.1, -2.50, 0.02, 0.0], + [1e-3, 0.01, -2.0, 0.01], + [0.00, 0.00, 0.1, -1.0]]) + + y0 = np.ones(4) + t = np.array([0, 5, 10, 100]) + + # Use the full Jacobian. + sol1, info1 = odeint(func, y0, t, args=(c,), full_output=True, + atol=1e-13, rtol=1e-11, mxstep=10000, + Dfun=jac) + + # Use the transposed full Jacobian, with col_deriv=True. + sol2, info2 = odeint(func, y0, t, args=(c,), full_output=True, + atol=1e-13, rtol=1e-11, mxstep=10000, + Dfun=jac_transpose, col_deriv=True) + + # Use the banded Jacobian. + sol3, info3 = odeint(func, y0, t, args=(c,), full_output=True, + atol=1e-13, rtol=1e-11, mxstep=10000, + Dfun=bjac_rows, ml=2, mu=1) + + # Use the transposed banded Jacobian, with col_deriv=True. + sol4, info4 = odeint(func, y0, t, args=(c,), full_output=True, + atol=1e-13, rtol=1e-11, mxstep=10000, + Dfun=bjac_cols, ml=2, mu=1, col_deriv=True) + + assert_allclose(sol1, sol2, err_msg="sol1 != sol2") + assert_allclose(sol1, sol3, atol=1e-12, err_msg="sol1 != sol3") + assert_allclose(sol3, sol4, err_msg="sol3 != sol4") + + # Verify that the number of jacobian evaluations was the same for the + # calls of odeint with a full jacobian and with a banded jacobian. This is + # a regression test--there was a bug in the handling of banded jacobians + # that resulted in an incorrect jacobian matrix being passed to the LSODA + # code. That would cause errors or excessive jacobian evaluations. + assert_array_equal(info1['nje'], info2['nje']) + assert_array_equal(info3['nje'], info4['nje']) + + # Test the use of tfirst + sol1ty, info1ty = odeint(lambda t, y, c: func(y, t, c), y0, t, args=(c,), + full_output=True, atol=1e-13, rtol=1e-11, + mxstep=10000, + Dfun=lambda t, y, c: jac(y, t, c), tfirst=True) + # The code should execute the exact same sequence of floating point + # calculations, so these should be exactly equal. We'll be safe and use + # a small tolerance. + assert_allclose(sol1, sol1ty, rtol=1e-12, err_msg="sol1 != sol1ty") + + +def test_odeint_errors(): + def sys1d(x, t): + return -100*x + + def bad1(x, t): + return 1.0/0 + + def bad2(x, t): + return "foo" + + def bad_jac1(x, t): + return 1.0/0 + + def bad_jac2(x, t): + return [["foo"]] + + def sys2d(x, t): + return [-100*x[0], -0.1*x[1]] + + def sys2d_bad_jac(x, t): + return [[1.0/0, 0], [0, -0.1]] + + assert_raises(ZeroDivisionError, odeint, bad1, 1.0, [0, 1]) + assert_raises(ValueError, odeint, bad2, 1.0, [0, 1]) + + assert_raises(ZeroDivisionError, odeint, sys1d, 1.0, [0, 1], Dfun=bad_jac1) + assert_raises(ValueError, odeint, sys1d, 1.0, [0, 1], Dfun=bad_jac2) + + assert_raises(ZeroDivisionError, odeint, sys2d, [1.0, 1.0], [0, 1], + Dfun=sys2d_bad_jac) + + +def test_odeint_bad_shapes(): + # Tests of some errors that can occur with odeint. + + def badrhs(x, t): + return [1, -1] + + def sys1(x, t): + return -100*x + + def badjac(x, t): + return [[0, 0, 0]] + + # y0 must be at most 1-d. + bad_y0 = [[0, 0], [0, 0]] + assert_raises(ValueError, odeint, sys1, bad_y0, [0, 1]) + + # t must be at most 1-d. + bad_t = [[0, 1], [2, 3]] + assert_raises(ValueError, odeint, sys1, [10.0], bad_t) + + # y0 is 10, but badrhs(x, t) returns [1, -1]. + assert_raises(RuntimeError, odeint, badrhs, 10, [0, 1]) + + # shape of array returned by badjac(x, t) is not correct. + assert_raises(RuntimeError, odeint, sys1, [10, 10], [0, 1], Dfun=badjac) + + +def test_repeated_t_values(): + """Regression test for gh-8217.""" + + def func(x, t): + return -0.25*x + + t = np.zeros(10) + sol = odeint(func, [1.], t) + assert_array_equal(sol, np.ones((len(t), 1))) + + tau = 4*np.log(2) + t = [0]*9 + [tau, 2*tau, 2*tau, 3*tau] + sol = odeint(func, [1, 2], t, rtol=1e-12, atol=1e-12) + expected_sol = np.array([[1.0, 2.0]]*9 + + [[0.5, 1.0], + [0.25, 0.5], + [0.25, 0.5], + [0.125, 0.25]]) + assert_allclose(sol, expected_sol) + + # Edge case: empty t sequence. + sol = odeint(func, [1.], []) + assert_array_equal(sol, np.array([], dtype=np.float64).reshape((0, 1))) + + # t values are not monotonic. + assert_raises(ValueError, odeint, func, [1.], [0, 1, 0.5, 0]) + assert_raises(ValueError, odeint, func, [1, 2, 3], [0, -1, -2, 3]) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/test_odeint_jac.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/test_odeint_jac.py new file mode 100644 index 0000000000000000000000000000000000000000..7d28ccc93f4444f3f2e0b71da01c573d4f903dbc --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/test_odeint_jac.py @@ -0,0 +1,74 @@ +import numpy as np +from numpy.testing import assert_equal, assert_allclose +from scipy.integrate import odeint +import scipy.integrate._test_odeint_banded as banded5x5 + + +def rhs(y, t): + dydt = np.zeros_like(y) + banded5x5.banded5x5(t, y, dydt) + return dydt + + +def jac(y, t): + n = len(y) + jac = np.zeros((n, n), order='F') + banded5x5.banded5x5_jac(t, y, 1, 1, jac) + return jac + + +def bjac(y, t): + n = len(y) + bjac = np.zeros((4, n), order='F') + banded5x5.banded5x5_bjac(t, y, 1, 1, bjac) + return bjac + + +JACTYPE_FULL = 1 +JACTYPE_BANDED = 4 + + +def check_odeint(jactype): + if jactype == JACTYPE_FULL: + ml = None + mu = None + jacobian = jac + elif jactype == JACTYPE_BANDED: + ml = 2 + mu = 1 + jacobian = bjac + else: + raise ValueError(f"invalid jactype: {jactype!r}") + + y0 = np.arange(1.0, 6.0) + # These tolerances must match the tolerances used in banded5x5.f. + rtol = 1e-11 + atol = 1e-13 + dt = 0.125 + nsteps = 64 + t = dt * np.arange(nsteps+1) + + sol, info = odeint(rhs, y0, t, + Dfun=jacobian, ml=ml, mu=mu, + atol=atol, rtol=rtol, full_output=True) + yfinal = sol[-1] + odeint_nst = info['nst'][-1] + odeint_nfe = info['nfe'][-1] + odeint_nje = info['nje'][-1] + + y1 = y0.copy() + # Pure Fortran solution. y1 is modified in-place. + nst, nfe, nje = banded5x5.banded5x5_solve(y1, nsteps, dt, jactype) + + # It is likely that yfinal and y1 are *exactly* the same, but + # we'll be cautious and use assert_allclose. + assert_allclose(yfinal, y1, rtol=1e-12) + assert_equal((odeint_nst, odeint_nfe, odeint_nje), (nst, nfe, nje)) + + +def test_odeint_full_jac(): + check_odeint(JACTYPE_FULL) + + +def test_odeint_banded_jac(): + check_odeint(JACTYPE_BANDED) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/test_quadpack.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/test_quadpack.py new file mode 100644 index 0000000000000000000000000000000000000000..a503cb54918b95c80d14ed5282c3c8d260a59c63 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/test_quadpack.py @@ -0,0 +1,680 @@ +import sys +import math +import numpy as np +from numpy import sqrt, cos, sin, arctan, exp, log, pi +from numpy.testing import (assert_, + assert_allclose, assert_array_less, assert_almost_equal) +import pytest + +from scipy.integrate import quad, dblquad, tplquad, nquad +from scipy.special import erf, erfc +from scipy._lib._ccallback import LowLevelCallable + +import ctypes +import ctypes.util +from scipy._lib._ccallback_c import sine_ctypes + +import scipy.integrate._test_multivariate as clib_test + + +def assert_quad(value_and_err, tabled_value, error_tolerance=1.5e-8): + value, err = value_and_err + assert_allclose(value, tabled_value, atol=err, rtol=0) + if error_tolerance is not None: + assert_array_less(err, error_tolerance) + + +def get_clib_test_routine(name, restype, *argtypes): + ptr = getattr(clib_test, name) + return ctypes.cast(ptr, ctypes.CFUNCTYPE(restype, *argtypes)) + + +class TestCtypesQuad: + def setup_method(self): + if sys.platform == 'win32': + files = ['api-ms-win-crt-math-l1-1-0.dll'] + elif sys.platform == 'darwin': + files = ['libm.dylib'] + else: + files = ['libm.so', 'libm.so.6'] + + for file in files: + try: + self.lib = ctypes.CDLL(file) + break + except OSError: + pass + else: + # This test doesn't work on some Linux platforms (Fedora for + # example) that put an ld script in libm.so - see gh-5370 + pytest.skip("Ctypes can't import libm.so") + + restype = ctypes.c_double + argtypes = (ctypes.c_double,) + for name in ['sin', 'cos', 'tan']: + func = getattr(self.lib, name) + func.restype = restype + func.argtypes = argtypes + + def test_typical(self): + assert_quad(quad(self.lib.sin, 0, 5), quad(math.sin, 0, 5)[0]) + assert_quad(quad(self.lib.cos, 0, 5), quad(math.cos, 0, 5)[0]) + assert_quad(quad(self.lib.tan, 0, 1), quad(math.tan, 0, 1)[0]) + + def test_ctypes_sine(self): + quad(LowLevelCallable(sine_ctypes), 0, 1) + + def test_ctypes_variants(self): + sin_0 = get_clib_test_routine('_sin_0', ctypes.c_double, + ctypes.c_double, ctypes.c_void_p) + + sin_1 = get_clib_test_routine('_sin_1', ctypes.c_double, + ctypes.c_int, ctypes.POINTER(ctypes.c_double), + ctypes.c_void_p) + + sin_2 = get_clib_test_routine('_sin_2', ctypes.c_double, + ctypes.c_double) + + sin_3 = get_clib_test_routine('_sin_3', ctypes.c_double, + ctypes.c_int, ctypes.POINTER(ctypes.c_double)) + + sin_4 = get_clib_test_routine('_sin_3', ctypes.c_double, + ctypes.c_int, ctypes.c_double) + + all_sigs = [sin_0, sin_1, sin_2, sin_3, sin_4] + legacy_sigs = [sin_2, sin_4] + legacy_only_sigs = [sin_4] + + # LowLevelCallables work for new signatures + for j, func in enumerate(all_sigs): + callback = LowLevelCallable(func) + if func in legacy_only_sigs: + pytest.raises(ValueError, quad, callback, 0, pi) + else: + assert_allclose(quad(callback, 0, pi)[0], 2.0) + + # Plain ctypes items work only for legacy signatures + for j, func in enumerate(legacy_sigs): + if func in legacy_sigs: + assert_allclose(quad(func, 0, pi)[0], 2.0) + else: + pytest.raises(ValueError, quad, func, 0, pi) + + +class TestMultivariateCtypesQuad: + def setup_method(self): + restype = ctypes.c_double + argtypes = (ctypes.c_int, ctypes.c_double) + for name in ['_multivariate_typical', '_multivariate_indefinite', + '_multivariate_sin']: + func = get_clib_test_routine(name, restype, *argtypes) + setattr(self, name, func) + + def test_typical(self): + # 1) Typical function with two extra arguments: + assert_quad(quad(self._multivariate_typical, 0, pi, (2, 1.8)), + 0.30614353532540296487) + + def test_indefinite(self): + # 2) Infinite integration limits --- Euler's constant + assert_quad(quad(self._multivariate_indefinite, 0, np.inf), + 0.577215664901532860606512) + + def test_threadsafety(self): + # Ensure multivariate ctypes are threadsafe + def threadsafety(y): + return y + quad(self._multivariate_sin, 0, 1)[0] + assert_quad(quad(threadsafety, 0, 1), 0.9596976941318602) + + +class TestQuad: + def test_typical(self): + # 1) Typical function with two extra arguments: + def myfunc(x, n, z): # Bessel function integrand + return cos(n*x-z*sin(x))/pi + assert_quad(quad(myfunc, 0, pi, (2, 1.8)), 0.30614353532540296487) + + def test_indefinite(self): + # 2) Infinite integration limits --- Euler's constant + def myfunc(x): # Euler's constant integrand + return -exp(-x)*log(x) + assert_quad(quad(myfunc, 0, np.inf), 0.577215664901532860606512) + + def test_singular(self): + # 3) Singular points in region of integration. + def myfunc(x): + if 0 < x < 2.5: + return sin(x) + elif 2.5 <= x <= 5.0: + return exp(-x) + else: + return 0.0 + + assert_quad(quad(myfunc, 0, 10, points=[2.5, 5.0]), + 1 - cos(2.5) + exp(-2.5) - exp(-5.0)) + + def test_sine_weighted_finite(self): + # 4) Sine weighted integral (finite limits) + def myfunc(x, a): + return exp(a*(x-1)) + + ome = 2.0**3.4 + assert_quad(quad(myfunc, 0, 1, args=20, weight='sin', wvar=ome), + (20*sin(ome)-ome*cos(ome)+ome*exp(-20))/(20**2 + ome**2)) + + def test_sine_weighted_infinite(self): + # 5) Sine weighted integral (infinite limits) + def myfunc(x, a): + return exp(-x*a) + + a = 4.0 + ome = 3.0 + assert_quad(quad(myfunc, 0, np.inf, args=a, weight='sin', wvar=ome), + ome/(a**2 + ome**2)) + + def test_cosine_weighted_infinite(self): + # 6) Cosine weighted integral (negative infinite limits) + def myfunc(x, a): + return exp(x*a) + + a = 2.5 + ome = 2.3 + assert_quad(quad(myfunc, -np.inf, 0, args=a, weight='cos', wvar=ome), + a/(a**2 + ome**2)) + + def test_algebraic_log_weight(self): + # 6) Algebraic-logarithmic weight. + def myfunc(x, a): + return 1/(1+x+2**(-a)) + + a = 1.5 + assert_quad(quad(myfunc, -1, 1, args=a, weight='alg', + wvar=(-0.5, -0.5)), + pi/sqrt((1+2**(-a))**2 - 1)) + + def test_cauchypv_weight(self): + # 7) Cauchy prinicpal value weighting w(x) = 1/(x-c) + def myfunc(x, a): + return 2.0**(-a)/((x-1)**2+4.0**(-a)) + + a = 0.4 + tabledValue = ((2.0**(-0.4)*log(1.5) - + 2.0**(-1.4)*log((4.0**(-a)+16) / (4.0**(-a)+1)) - + arctan(2.0**(a+2)) - + arctan(2.0**a)) / + (4.0**(-a) + 1)) + assert_quad(quad(myfunc, 0, 5, args=0.4, weight='cauchy', wvar=2.0), + tabledValue, error_tolerance=1.9e-8) + + def test_b_less_than_a(self): + def f(x, p, q): + return p * np.exp(-q*x) + + val_1, err_1 = quad(f, 0, np.inf, args=(2, 3)) + val_2, err_2 = quad(f, np.inf, 0, args=(2, 3)) + assert_allclose(val_1, -val_2, atol=max(err_1, err_2)) + + def test_b_less_than_a_2(self): + def f(x, s): + return np.exp(-x**2 / 2 / s) / np.sqrt(2.*s) + + val_1, err_1 = quad(f, -np.inf, np.inf, args=(2,)) + val_2, err_2 = quad(f, np.inf, -np.inf, args=(2,)) + assert_allclose(val_1, -val_2, atol=max(err_1, err_2)) + + def test_b_less_than_a_3(self): + def f(x): + return 1.0 + + val_1, err_1 = quad(f, 0, 1, weight='alg', wvar=(0, 0)) + val_2, err_2 = quad(f, 1, 0, weight='alg', wvar=(0, 0)) + assert_allclose(val_1, -val_2, atol=max(err_1, err_2)) + + def test_b_less_than_a_full_output(self): + def f(x): + return 1.0 + + res_1 = quad(f, 0, 1, weight='alg', wvar=(0, 0), full_output=True) + res_2 = quad(f, 1, 0, weight='alg', wvar=(0, 0), full_output=True) + err = max(res_1[1], res_2[1]) + assert_allclose(res_1[0], -res_2[0], atol=err) + + def test_double_integral(self): + # 8) Double Integral test + def simpfunc(y, x): # Note order of arguments. + return x+y + + a, b = 1.0, 2.0 + assert_quad(dblquad(simpfunc, a, b, lambda x: x, lambda x: 2*x), + 5/6.0 * (b**3.0-a**3.0)) + + def test_double_integral2(self): + def func(x0, x1, t0, t1): + return x0 + x1 + t0 + t1 + def g(x): + return x + def h(x): + return 2 * x + args = 1, 2 + assert_quad(dblquad(func, 1, 2, g, h, args=args),35./6 + 9*.5) + + def test_double_integral3(self): + def func(x0, x1): + return x0 + x1 + 1 + 2 + assert_quad(dblquad(func, 1, 2, 1, 2),6.) + + @pytest.mark.parametrize( + "x_lower, x_upper, y_lower, y_upper, expected", + [ + # Multiple integration of a function in n = 2 variables: f(x, y, z) + # over domain D = [-inf, 0] for all n. + (-np.inf, 0, -np.inf, 0, np.pi / 4), + # Multiple integration of a function in n = 2 variables: f(x, y, z) + # over domain D = [-inf, -1] for each n (one at a time). + (-np.inf, -1, -np.inf, 0, np.pi / 4 * erfc(1)), + (-np.inf, 0, -np.inf, -1, np.pi / 4 * erfc(1)), + # Multiple integration of a function in n = 2 variables: f(x, y, z) + # over domain D = [-inf, -1] for all n. + (-np.inf, -1, -np.inf, -1, np.pi / 4 * (erfc(1) ** 2)), + # Multiple integration of a function in n = 2 variables: f(x, y, z) + # over domain D = [-inf, 1] for each n (one at a time). + (-np.inf, 1, -np.inf, 0, np.pi / 4 * (erf(1) + 1)), + (-np.inf, 0, -np.inf, 1, np.pi / 4 * (erf(1) + 1)), + # Multiple integration of a function in n = 2 variables: f(x, y, z) + # over domain D = [-inf, 1] for all n. + (-np.inf, 1, -np.inf, 1, np.pi / 4 * ((erf(1) + 1) ** 2)), + # Multiple integration of a function in n = 2 variables: f(x, y, z) + # over domain Dx = [-inf, -1] and Dy = [-inf, 1]. + (-np.inf, -1, -np.inf, 1, np.pi / 4 * ((erf(1) + 1) * erfc(1))), + # Multiple integration of a function in n = 2 variables: f(x, y, z) + # over domain Dx = [-inf, 1] and Dy = [-inf, -1]. + (-np.inf, 1, -np.inf, -1, np.pi / 4 * ((erf(1) + 1) * erfc(1))), + # Multiple integration of a function in n = 2 variables: f(x, y, z) + # over domain D = [0, inf] for all n. + (0, np.inf, 0, np.inf, np.pi / 4), + # Multiple integration of a function in n = 2 variables: f(x, y, z) + # over domain D = [1, inf] for each n (one at a time). + (1, np.inf, 0, np.inf, np.pi / 4 * erfc(1)), + (0, np.inf, 1, np.inf, np.pi / 4 * erfc(1)), + # Multiple integration of a function in n = 2 variables: f(x, y, z) + # over domain D = [1, inf] for all n. + (1, np.inf, 1, np.inf, np.pi / 4 * (erfc(1) ** 2)), + # Multiple integration of a function in n = 2 variables: f(x, y, z) + # over domain D = [-1, inf] for each n (one at a time). + (-1, np.inf, 0, np.inf, np.pi / 4 * (erf(1) + 1)), + (0, np.inf, -1, np.inf, np.pi / 4 * (erf(1) + 1)), + # Multiple integration of a function in n = 2 variables: f(x, y, z) + # over domain D = [-1, inf] for all n. + (-1, np.inf, -1, np.inf, np.pi / 4 * ((erf(1) + 1) ** 2)), + # Multiple integration of a function in n = 2 variables: f(x, y, z) + # over domain Dx = [-1, inf] and Dy = [1, inf]. + (-1, np.inf, 1, np.inf, np.pi / 4 * ((erf(1) + 1) * erfc(1))), + # Multiple integration of a function in n = 2 variables: f(x, y, z) + # over domain Dx = [1, inf] and Dy = [-1, inf]. + (1, np.inf, -1, np.inf, np.pi / 4 * ((erf(1) + 1) * erfc(1))), + # Multiple integration of a function in n = 2 variables: f(x, y, z) + # over domain D = [-inf, inf] for all n. + (-np.inf, np.inf, -np.inf, np.inf, np.pi) + ] + ) + def test_double_integral_improper( + self, x_lower, x_upper, y_lower, y_upper, expected + ): + # The Gaussian Integral. + def f(x, y): + return np.exp(-x ** 2 - y ** 2) + + assert_quad( + dblquad(f, x_lower, x_upper, y_lower, y_upper), + expected, + error_tolerance=3e-8 + ) + + def test_triple_integral(self): + # 9) Triple Integral test + def simpfunc(z, y, x, t): # Note order of arguments. + return (x+y+z)*t + + a, b = 1.0, 2.0 + assert_quad(tplquad(simpfunc, a, b, + lambda x: x, lambda x: 2*x, + lambda x, y: x - y, lambda x, y: x + y, + (2.,)), + 2*8/3.0 * (b**4.0 - a**4.0)) + + @pytest.mark.xslow + @pytest.mark.parametrize( + "x_lower, x_upper, y_lower, y_upper, z_lower, z_upper, expected", + [ + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain D = [-inf, 0] for all n. + (-np.inf, 0, -np.inf, 0, -np.inf, 0, (np.pi ** (3 / 2)) / 8), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain D = [-inf, -1] for each n (one at a time). + (-np.inf, -1, -np.inf, 0, -np.inf, 0, + (np.pi ** (3 / 2)) / 8 * erfc(1)), + (-np.inf, 0, -np.inf, -1, -np.inf, 0, + (np.pi ** (3 / 2)) / 8 * erfc(1)), + (-np.inf, 0, -np.inf, 0, -np.inf, -1, + (np.pi ** (3 / 2)) / 8 * erfc(1)), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain D = [-inf, -1] for each n (two at a time). + (-np.inf, -1, -np.inf, -1, -np.inf, 0, + (np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)), + (-np.inf, -1, -np.inf, 0, -np.inf, -1, + (np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)), + (-np.inf, 0, -np.inf, -1, -np.inf, -1, + (np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain D = [-inf, -1] for all n. + (-np.inf, -1, -np.inf, -1, -np.inf, -1, + (np.pi ** (3 / 2)) / 8 * (erfc(1) ** 3)), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain Dx = [-inf, -1] and Dy = Dz = [-inf, 1]. + (-np.inf, -1, -np.inf, 1, -np.inf, 1, + (np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain Dx = Dy = [-inf, -1] and Dz = [-inf, 1]. + (-np.inf, -1, -np.inf, -1, -np.inf, 1, + (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain Dx = Dz = [-inf, -1] and Dy = [-inf, 1]. + (-np.inf, -1, -np.inf, 1, -np.inf, -1, + (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain Dx = [-inf, 1] and Dy = Dz = [-inf, -1]. + (-np.inf, 1, -np.inf, -1, -np.inf, -1, + (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain Dx = Dy = [-inf, 1] and Dz = [-inf, -1]. + (-np.inf, 1, -np.inf, 1, -np.inf, -1, + (np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain Dx = Dz = [-inf, 1] and Dy = [-inf, -1]. + (-np.inf, 1, -np.inf, -1, -np.inf, 1, + (np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain D = [-inf, 1] for each n (one at a time). + (-np.inf, 1, -np.inf, 0, -np.inf, 0, + (np.pi ** (3 / 2)) / 8 * (erf(1) + 1)), + (-np.inf, 0, -np.inf, 1, -np.inf, 0, + (np.pi ** (3 / 2)) / 8 * (erf(1) + 1)), + (-np.inf, 0, -np.inf, 0, -np.inf, 1, + (np.pi ** (3 / 2)) / 8 * (erf(1) + 1)), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain D = [-inf, 1] for each n (two at a time). + (-np.inf, 1, -np.inf, 1, -np.inf, 0, + (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)), + (-np.inf, 1, -np.inf, 0, -np.inf, 1, + (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)), + (-np.inf, 0, -np.inf, 1, -np.inf, 1, + (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain D = [-inf, 1] for all n. + (-np.inf, 1, -np.inf, 1, -np.inf, 1, + (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 3)), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain D = [0, inf] for all n. + (0, np.inf, 0, np.inf, 0, np.inf, (np.pi ** (3 / 2)) / 8), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain D = [1, inf] for each n (one at a time). + (1, np.inf, 0, np.inf, 0, np.inf, + (np.pi ** (3 / 2)) / 8 * erfc(1)), + (0, np.inf, 1, np.inf, 0, np.inf, + (np.pi ** (3 / 2)) / 8 * erfc(1)), + (0, np.inf, 0, np.inf, 1, np.inf, + (np.pi ** (3 / 2)) / 8 * erfc(1)), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain D = [1, inf] for each n (two at a time). + (1, np.inf, 1, np.inf, 0, np.inf, + (np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)), + (1, np.inf, 0, np.inf, 1, np.inf, + (np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)), + (0, np.inf, 1, np.inf, 1, np.inf, + (np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain D = [1, inf] for all n. + (1, np.inf, 1, np.inf, 1, np.inf, + (np.pi ** (3 / 2)) / 8 * (erfc(1) ** 3)), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain D = [-1, inf] for each n (one at a time). + (-1, np.inf, 0, np.inf, 0, np.inf, + (np.pi ** (3 / 2)) / 8 * (erf(1) + 1)), + (0, np.inf, -1, np.inf, 0, np.inf, + (np.pi ** (3 / 2)) / 8 * (erf(1) + 1)), + (0, np.inf, 0, np.inf, -1, np.inf, + (np.pi ** (3 / 2)) / 8 * (erf(1) + 1)), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain D = [-1, inf] for each n (two at a time). + (-1, np.inf, -1, np.inf, 0, np.inf, + (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)), + (-1, np.inf, 0, np.inf, -1, np.inf, + (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)), + (0, np.inf, -1, np.inf, -1, np.inf, + (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain D = [-1, inf] for all n. + (-1, np.inf, -1, np.inf, -1, np.inf, + (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 3)), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain Dx = [1, inf] and Dy = Dz = [-1, inf]. + (1, np.inf, -1, np.inf, -1, np.inf, + (np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain Dx = Dy = [1, inf] and Dz = [-1, inf]. + (1, np.inf, 1, np.inf, -1, np.inf, + (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain Dx = Dz = [1, inf] and Dy = [-1, inf]. + (1, np.inf, -1, np.inf, 1, np.inf, + (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain Dx = [-1, inf] and Dy = Dz = [1, inf]. + (-1, np.inf, 1, np.inf, 1, np.inf, + (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain Dx = Dy = [-1, inf] and Dz = [1, inf]. + (-1, np.inf, -1, np.inf, 1, np.inf, + (np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain Dx = Dz = [-1, inf] and Dy = [1, inf]. + (-1, np.inf, 1, np.inf, -1, np.inf, + (np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain D = [-inf, inf] for all n. + (-np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, + np.pi ** (3 / 2)), + ], + ) + def test_triple_integral_improper( + self, + x_lower, + x_upper, + y_lower, + y_upper, + z_lower, + z_upper, + expected + ): + # The Gaussian Integral. + def f(x, y, z): + return np.exp(-x ** 2 - y ** 2 - z ** 2) + + assert_quad( + tplquad(f, x_lower, x_upper, y_lower, y_upper, z_lower, z_upper), + expected, + error_tolerance=6e-8 + ) + + def test_complex(self): + def tfunc(x): + return np.exp(1j*x) + + assert np.allclose( + quad(tfunc, 0, np.pi/2, complex_func=True)[0], + 1+1j) + + # We consider a divergent case in order to force quadpack + # to return an error message. The output is compared + # against what is returned by explicit integration + # of the parts. + kwargs = {'a': 0, 'b': np.inf, 'full_output': True, + 'weight': 'cos', 'wvar': 1} + res_c = quad(tfunc, complex_func=True, **kwargs) + res_r = quad(lambda x: np.real(np.exp(1j*x)), + complex_func=False, + **kwargs) + res_i = quad(lambda x: np.imag(np.exp(1j*x)), + complex_func=False, + **kwargs) + + np.testing.assert_equal(res_c[0], res_r[0] + 1j*res_i[0]) + np.testing.assert_equal(res_c[1], res_r[1] + 1j*res_i[1]) + + assert len(res_c[2]['real']) == len(res_r[2:]) == 3 + assert res_c[2]['real'][2] == res_r[4] + assert res_c[2]['real'][1] == res_r[3] + assert res_c[2]['real'][0]['lst'] == res_r[2]['lst'] + + assert len(res_c[2]['imag']) == len(res_i[2:]) == 1 + assert res_c[2]['imag'][0]['lst'] == res_i[2]['lst'] + + +class TestNQuad: + @pytest.mark.fail_slow(2) + def test_fixed_limits(self): + def func1(x0, x1, x2, x3): + val = (x0**2 + x1*x2 - x3**3 + np.sin(x0) + + (1 if (x0 - 0.2*x3 - 0.5 - 0.25*x1 > 0) else 0)) + return val + + def opts_basic(*args): + return {'points': [0.2*args[2] + 0.5 + 0.25*args[0]]} + + res = nquad(func1, [[0, 1], [-1, 1], [.13, .8], [-.15, 1]], + opts=[opts_basic, {}, {}, {}], full_output=True) + assert_quad(res[:-1], 1.5267454070738635) + assert_(res[-1]['neval'] > 0 and res[-1]['neval'] < 4e5) + + @pytest.mark.fail_slow(2) + def test_variable_limits(self): + scale = .1 + + def func2(x0, x1, x2, x3, t0, t1): + val = (x0*x1*x3**2 + np.sin(x2) + 1 + + (1 if x0 + t1*x1 - t0 > 0 else 0)) + return val + + def lim0(x1, x2, x3, t0, t1): + return [scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) - 1, + scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) + 1] + + def lim1(x2, x3, t0, t1): + return [scale * (t0*x2 + t1*x3) - 1, + scale * (t0*x2 + t1*x3) + 1] + + def lim2(x3, t0, t1): + return [scale * (x3 + t0**2*t1**3) - 1, + scale * (x3 + t0**2*t1**3) + 1] + + def lim3(t0, t1): + return [scale * (t0 + t1) - 1, scale * (t0 + t1) + 1] + + def opts0(x1, x2, x3, t0, t1): + return {'points': [t0 - t1*x1]} + + def opts1(x2, x3, t0, t1): + return {} + + def opts2(x3, t0, t1): + return {} + + def opts3(t0, t1): + return {} + + res = nquad(func2, [lim0, lim1, lim2, lim3], args=(0, 0), + opts=[opts0, opts1, opts2, opts3]) + assert_quad(res, 25.066666666666663) + + def test_square_separate_ranges_and_opts(self): + def f(y, x): + return 1.0 + + assert_quad(nquad(f, [[-1, 1], [-1, 1]], opts=[{}, {}]), 4.0) + + def test_square_aliased_ranges_and_opts(self): + def f(y, x): + return 1.0 + + r = [-1, 1] + opt = {} + assert_quad(nquad(f, [r, r], opts=[opt, opt]), 4.0) + + def test_square_separate_fn_ranges_and_opts(self): + def f(y, x): + return 1.0 + + def fn_range0(*args): + return (-1, 1) + + def fn_range1(*args): + return (-1, 1) + + def fn_opt0(*args): + return {} + + def fn_opt1(*args): + return {} + + ranges = [fn_range0, fn_range1] + opts = [fn_opt0, fn_opt1] + assert_quad(nquad(f, ranges, opts=opts), 4.0) + + def test_square_aliased_fn_ranges_and_opts(self): + def f(y, x): + return 1.0 + + def fn_range(*args): + return (-1, 1) + + def fn_opt(*args): + return {} + + ranges = [fn_range, fn_range] + opts = [fn_opt, fn_opt] + assert_quad(nquad(f, ranges, opts=opts), 4.0) + + def test_matching_quad(self): + def func(x): + return x**2 + 1 + + res, reserr = quad(func, 0, 4) + res2, reserr2 = nquad(func, ranges=[[0, 4]]) + assert_almost_equal(res, res2) + assert_almost_equal(reserr, reserr2) + + def test_matching_dblquad(self): + def func2d(x0, x1): + return x0**2 + x1**3 - x0 * x1 + 1 + + res, reserr = dblquad(func2d, -2, 2, lambda x: -3, lambda x: 3) + res2, reserr2 = nquad(func2d, [[-3, 3], (-2, 2)]) + assert_almost_equal(res, res2) + assert_almost_equal(reserr, reserr2) + + def test_matching_tplquad(self): + def func3d(x0, x1, x2, c0, c1): + return x0**2 + c0 * x1**3 - x0 * x1 + 1 + c1 * np.sin(x2) + + res = tplquad(func3d, -1, 2, lambda x: -2, lambda x: 2, + lambda x, y: -np.pi, lambda x, y: np.pi, + args=(2, 3)) + res2 = nquad(func3d, [[-np.pi, np.pi], [-2, 2], (-1, 2)], args=(2, 3)) + assert_almost_equal(res, res2) + + def test_dict_as_opts(self): + try: + nquad(lambda x, y: x * y, [[0, 1], [0, 1]], opts={'epsrel': 0.0001}) + except TypeError: + assert False + diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/test_quadrature.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/test_quadrature.py new file mode 100644 index 0000000000000000000000000000000000000000..9006fb4141529802731e33d50bb712686ee098ed --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/test_quadrature.py @@ -0,0 +1,721 @@ +# mypy: disable-error-code="attr-defined" +import pytest +import numpy as np +from numpy import cos, sin, pi +from numpy.testing import (assert_equal, assert_almost_equal, assert_allclose, + assert_, suppress_warnings) +from hypothesis import given +import hypothesis.strategies as st +import hypothesis.extra.numpy as hyp_num + +from scipy.integrate import (quadrature, romberg, romb, newton_cotes, + cumulative_trapezoid, trapezoid, + quad, simpson, fixed_quad, AccuracyWarning, + qmc_quad, cumulative_simpson) +from scipy.integrate._quadrature import _cumulative_simpson_unequal_intervals +from scipy import stats, special + + +class TestFixedQuad: + def test_scalar(self): + n = 4 + expected = 1/(2*n) + got, _ = fixed_quad(lambda x: x**(2*n - 1), 0, 1, n=n) + # quadrature exact for this input + assert_allclose(got, expected, rtol=1e-12) + + def test_vector(self): + n = 4 + p = np.arange(1, 2*n) + expected = 1/(p + 1) + got, _ = fixed_quad(lambda x: x**p[:, None], 0, 1, n=n) + assert_allclose(got, expected, rtol=1e-12) + + +@pytest.mark.filterwarnings('ignore::DeprecationWarning') +class TestQuadrature: + def quad(self, x, a, b, args): + raise NotImplementedError + + def test_quadrature(self): + # Typical function with two extra arguments: + def myfunc(x, n, z): # Bessel function integrand + return cos(n*x-z*sin(x))/pi + val, err = quadrature(myfunc, 0, pi, (2, 1.8)) + table_val = 0.30614353532540296487 + assert_almost_equal(val, table_val, decimal=7) + + def test_quadrature_rtol(self): + def myfunc(x, n, z): # Bessel function integrand + return 1e90 * cos(n*x-z*sin(x))/pi + val, err = quadrature(myfunc, 0, pi, (2, 1.8), rtol=1e-10) + table_val = 1e90 * 0.30614353532540296487 + assert_allclose(val, table_val, rtol=1e-10) + + def test_quadrature_miniter(self): + # Typical function with two extra arguments: + def myfunc(x, n, z): # Bessel function integrand + return cos(n*x-z*sin(x))/pi + table_val = 0.30614353532540296487 + for miniter in [5, 52]: + val, err = quadrature(myfunc, 0, pi, (2, 1.8), miniter=miniter) + assert_almost_equal(val, table_val, decimal=7) + assert_(err < 1.0) + + def test_quadrature_single_args(self): + def myfunc(x, n): + return 1e90 * cos(n*x-1.8*sin(x))/pi + val, err = quadrature(myfunc, 0, pi, args=2, rtol=1e-10) + table_val = 1e90 * 0.30614353532540296487 + assert_allclose(val, table_val, rtol=1e-10) + + def test_romberg(self): + # Typical function with two extra arguments: + def myfunc(x, n, z): # Bessel function integrand + return cos(n*x-z*sin(x))/pi + val = romberg(myfunc, 0, pi, args=(2, 1.8)) + table_val = 0.30614353532540296487 + assert_almost_equal(val, table_val, decimal=7) + + def test_romberg_rtol(self): + # Typical function with two extra arguments: + def myfunc(x, n, z): # Bessel function integrand + return 1e19*cos(n*x-z*sin(x))/pi + val = romberg(myfunc, 0, pi, args=(2, 1.8), rtol=1e-10) + table_val = 1e19*0.30614353532540296487 + assert_allclose(val, table_val, rtol=1e-10) + + def test_romb(self): + assert_equal(romb(np.arange(17)), 128) + + def test_romb_gh_3731(self): + # Check that romb makes maximal use of data points + x = np.arange(2**4+1) + y = np.cos(0.2*x) + val = romb(y) + val2, err = quad(lambda x: np.cos(0.2*x), x.min(), x.max()) + assert_allclose(val, val2, rtol=1e-8, atol=0) + + # should be equal to romb with 2**k+1 samples + with suppress_warnings() as sup: + sup.filter(AccuracyWarning, "divmax .4. exceeded") + val3 = romberg(lambda x: np.cos(0.2*x), x.min(), x.max(), divmax=4) + assert_allclose(val, val3, rtol=1e-12, atol=0) + + def test_non_dtype(self): + # Check that we work fine with functions returning float + import math + valmath = romberg(math.sin, 0, 1) + expected_val = 0.45969769413185085 + assert_almost_equal(valmath, expected_val, decimal=7) + + def test_newton_cotes(self): + """Test the first few degrees, for evenly spaced points.""" + n = 1 + wts, errcoff = newton_cotes(n, 1) + assert_equal(wts, n*np.array([0.5, 0.5])) + assert_almost_equal(errcoff, -n**3/12.0) + + n = 2 + wts, errcoff = newton_cotes(n, 1) + assert_almost_equal(wts, n*np.array([1.0, 4.0, 1.0])/6.0) + assert_almost_equal(errcoff, -n**5/2880.0) + + n = 3 + wts, errcoff = newton_cotes(n, 1) + assert_almost_equal(wts, n*np.array([1.0, 3.0, 3.0, 1.0])/8.0) + assert_almost_equal(errcoff, -n**5/6480.0) + + n = 4 + wts, errcoff = newton_cotes(n, 1) + assert_almost_equal(wts, n*np.array([7.0, 32.0, 12.0, 32.0, 7.0])/90.0) + assert_almost_equal(errcoff, -n**7/1935360.0) + + def test_newton_cotes2(self): + """Test newton_cotes with points that are not evenly spaced.""" + + x = np.array([0.0, 1.5, 2.0]) + y = x**2 + wts, errcoff = newton_cotes(x) + exact_integral = 8.0/3 + numeric_integral = np.dot(wts, y) + assert_almost_equal(numeric_integral, exact_integral) + + x = np.array([0.0, 1.4, 2.1, 3.0]) + y = x**2 + wts, errcoff = newton_cotes(x) + exact_integral = 9.0 + numeric_integral = np.dot(wts, y) + assert_almost_equal(numeric_integral, exact_integral) + + def test_simpson(self): + y = np.arange(17) + assert_equal(simpson(y), 128) + assert_equal(simpson(y, dx=0.5), 64) + assert_equal(simpson(y, x=np.linspace(0, 4, 17)), 32) + + # integral should be exactly 21 + x = np.linspace(1, 4, 4) + def f(x): + return x**2 + + assert_allclose(simpson(f(x), x=x), 21.0) + + # integral should be exactly 114 + x = np.linspace(1, 7, 4) + assert_allclose(simpson(f(x), dx=2.0), 114) + + # test multi-axis behaviour + a = np.arange(16).reshape(4, 4) + x = np.arange(64.).reshape(4, 4, 4) + y = f(x) + for i in range(3): + r = simpson(y, x=x, axis=i) + it = np.nditer(a, flags=['multi_index']) + for _ in it: + idx = list(it.multi_index) + idx.insert(i, slice(None)) + integral = x[tuple(idx)][-1]**3 / 3 - x[tuple(idx)][0]**3 / 3 + assert_allclose(r[it.multi_index], integral) + + # test when integration axis only has two points + x = np.arange(16).reshape(8, 2) + y = f(x) + r = simpson(y, x=x, axis=-1) + + integral = 0.5 * (y[:, 1] + y[:, 0]) * (x[:, 1] - x[:, 0]) + assert_allclose(r, integral) + + # odd points, test multi-axis behaviour + a = np.arange(25).reshape(5, 5) + x = np.arange(125).reshape(5, 5, 5) + y = f(x) + for i in range(3): + r = simpson(y, x=x, axis=i) + it = np.nditer(a, flags=['multi_index']) + for _ in it: + idx = list(it.multi_index) + idx.insert(i, slice(None)) + integral = x[tuple(idx)][-1]**3 / 3 - x[tuple(idx)][0]**3 / 3 + assert_allclose(r[it.multi_index], integral) + + # Tests for checking base case + x = np.array([3]) + y = np.power(x, 2) + assert_allclose(simpson(y, x=x, axis=0), 0.0) + assert_allclose(simpson(y, x=x, axis=-1), 0.0) + + x = np.array([3, 3, 3, 3]) + y = np.power(x, 2) + assert_allclose(simpson(y, x=x, axis=0), 0.0) + assert_allclose(simpson(y, x=x, axis=-1), 0.0) + + x = np.array([[1, 2, 4, 8], [1, 2, 4, 8], [1, 2, 4, 8]]) + y = np.power(x, 2) + zero_axis = [0.0, 0.0, 0.0, 0.0] + default_axis = [170 + 1/3] * 3 # 8**3 / 3 - 1/3 + assert_allclose(simpson(y, x=x, axis=0), zero_axis) + # the following should be exact + assert_allclose(simpson(y, x=x, axis=-1), default_axis) + + x = np.array([[1, 2, 4, 8], [1, 2, 4, 8], [1, 8, 16, 32]]) + y = np.power(x, 2) + zero_axis = [0.0, 136.0, 1088.0, 8704.0] + default_axis = [170 + 1/3, 170 + 1/3, 32**3 / 3 - 1/3] + assert_allclose(simpson(y, x=x, axis=0), zero_axis) + assert_allclose(simpson(y, x=x, axis=-1), default_axis) + + + @pytest.mark.parametrize('droplast', [False, True]) + def test_simpson_2d_integer_no_x(self, droplast): + # The inputs are 2d integer arrays. The results should be + # identical to the results when the inputs are floating point. + y = np.array([[2, 2, 4, 4, 8, 8, -4, 5], + [4, 4, 2, -4, 10, 22, -2, 10]]) + if droplast: + y = y[:, :-1] + result = simpson(y, axis=-1) + expected = simpson(np.array(y, dtype=np.float64), axis=-1) + assert_equal(result, expected) + + +@pytest.mark.parametrize('func', [romberg, quadrature]) +def test_deprecate_integrator(func): + message = f"`scipy.integrate.{func.__name__}` is deprecated..." + with pytest.deprecated_call(match=message): + func(np.exp, 0, 1) + + +class TestCumulative_trapezoid: + def test_1d(self): + x = np.linspace(-2, 2, num=5) + y = x + y_int = cumulative_trapezoid(y, x, initial=0) + y_expected = [0., -1.5, -2., -1.5, 0.] + assert_allclose(y_int, y_expected) + + y_int = cumulative_trapezoid(y, x, initial=None) + assert_allclose(y_int, y_expected[1:]) + + def test_y_nd_x_nd(self): + x = np.arange(3 * 2 * 4).reshape(3, 2, 4) + y = x + y_int = cumulative_trapezoid(y, x, initial=0) + y_expected = np.array([[[0., 0.5, 2., 4.5], + [0., 4.5, 10., 16.5]], + [[0., 8.5, 18., 28.5], + [0., 12.5, 26., 40.5]], + [[0., 16.5, 34., 52.5], + [0., 20.5, 42., 64.5]]]) + + assert_allclose(y_int, y_expected) + + # Try with all axes + shapes = [(2, 2, 4), (3, 1, 4), (3, 2, 3)] + for axis, shape in zip([0, 1, 2], shapes): + y_int = cumulative_trapezoid(y, x, initial=0, axis=axis) + assert_equal(y_int.shape, (3, 2, 4)) + y_int = cumulative_trapezoid(y, x, initial=None, axis=axis) + assert_equal(y_int.shape, shape) + + def test_y_nd_x_1d(self): + y = np.arange(3 * 2 * 4).reshape(3, 2, 4) + x = np.arange(4)**2 + # Try with all axes + ys_expected = ( + np.array([[[4., 5., 6., 7.], + [8., 9., 10., 11.]], + [[40., 44., 48., 52.], + [56., 60., 64., 68.]]]), + np.array([[[2., 3., 4., 5.]], + [[10., 11., 12., 13.]], + [[18., 19., 20., 21.]]]), + np.array([[[0.5, 5., 17.5], + [4.5, 21., 53.5]], + [[8.5, 37., 89.5], + [12.5, 53., 125.5]], + [[16.5, 69., 161.5], + [20.5, 85., 197.5]]])) + + for axis, y_expected in zip([0, 1, 2], ys_expected): + y_int = cumulative_trapezoid(y, x=x[:y.shape[axis]], axis=axis, + initial=None) + assert_allclose(y_int, y_expected) + + def test_x_none(self): + y = np.linspace(-2, 2, num=5) + + y_int = cumulative_trapezoid(y) + y_expected = [-1.5, -2., -1.5, 0.] + assert_allclose(y_int, y_expected) + + y_int = cumulative_trapezoid(y, initial=0) + y_expected = [0, -1.5, -2., -1.5, 0.] + assert_allclose(y_int, y_expected) + + y_int = cumulative_trapezoid(y, dx=3) + y_expected = [-4.5, -6., -4.5, 0.] + assert_allclose(y_int, y_expected) + + y_int = cumulative_trapezoid(y, dx=3, initial=0) + y_expected = [0, -4.5, -6., -4.5, 0.] + assert_allclose(y_int, y_expected) + + @pytest.mark.parametrize( + "initial", [1, 0.5] + ) + def test_initial_warning(self, initial): + """If initial is not None or 0, a ValueError is raised.""" + y = np.linspace(0, 10, num=10) + with pytest.deprecated_call(match="`initial`"): + res = cumulative_trapezoid(y, initial=initial) + assert_allclose(res, [initial, *np.cumsum(y[1:] + y[:-1])/2]) + + def test_zero_len_y(self): + with pytest.raises(ValueError, match="At least one point is required"): + cumulative_trapezoid(y=[]) + + +class TestTrapezoid: + def test_simple(self): + x = np.arange(-10, 10, .1) + r = trapezoid(np.exp(-.5 * x ** 2) / np.sqrt(2 * np.pi), dx=0.1) + # check integral of normal equals 1 + assert_allclose(r, 1) + + def test_ndim(self): + x = np.linspace(0, 1, 3) + y = np.linspace(0, 2, 8) + z = np.linspace(0, 3, 13) + + wx = np.ones_like(x) * (x[1] - x[0]) + wx[0] /= 2 + wx[-1] /= 2 + wy = np.ones_like(y) * (y[1] - y[0]) + wy[0] /= 2 + wy[-1] /= 2 + wz = np.ones_like(z) * (z[1] - z[0]) + wz[0] /= 2 + wz[-1] /= 2 + + q = x[:, None, None] + y[None,:, None] + z[None, None,:] + + qx = (q * wx[:, None, None]).sum(axis=0) + qy = (q * wy[None, :, None]).sum(axis=1) + qz = (q * wz[None, None, :]).sum(axis=2) + + # n-d `x` + r = trapezoid(q, x=x[:, None, None], axis=0) + assert_allclose(r, qx) + r = trapezoid(q, x=y[None,:, None], axis=1) + assert_allclose(r, qy) + r = trapezoid(q, x=z[None, None,:], axis=2) + assert_allclose(r, qz) + + # 1-d `x` + r = trapezoid(q, x=x, axis=0) + assert_allclose(r, qx) + r = trapezoid(q, x=y, axis=1) + assert_allclose(r, qy) + r = trapezoid(q, x=z, axis=2) + assert_allclose(r, qz) + + def test_masked(self): + # Testing that masked arrays behave as if the function is 0 where + # masked + x = np.arange(5) + y = x * x + mask = x == 2 + ym = np.ma.array(y, mask=mask) + r = 13.0 # sum(0.5 * (0 + 1) * 1.0 + 0.5 * (9 + 16)) + assert_allclose(trapezoid(ym, x), r) + + xm = np.ma.array(x, mask=mask) + assert_allclose(trapezoid(ym, xm), r) + + xm = np.ma.array(x, mask=mask) + assert_allclose(trapezoid(y, xm), r) + + +class TestQMCQuad: + def test_input_validation(self): + message = "`func` must be callable." + with pytest.raises(TypeError, match=message): + qmc_quad("a duck", [0, 0], [1, 1]) + + message = "`func` must evaluate the integrand at points..." + with pytest.raises(ValueError, match=message): + qmc_quad(lambda: 1, [0, 0], [1, 1]) + + def func(x): + assert x.ndim == 1 + return np.sum(x) + message = "Exception encountered when attempting vectorized call..." + with pytest.warns(UserWarning, match=message): + qmc_quad(func, [0, 0], [1, 1]) + + message = "`n_points` must be an integer." + with pytest.raises(TypeError, match=message): + qmc_quad(lambda x: 1, [0, 0], [1, 1], n_points=1024.5) + + message = "`n_estimates` must be an integer." + with pytest.raises(TypeError, match=message): + qmc_quad(lambda x: 1, [0, 0], [1, 1], n_estimates=8.5) + + message = "`qrng` must be an instance of scipy.stats.qmc.QMCEngine." + with pytest.raises(TypeError, match=message): + qmc_quad(lambda x: 1, [0, 0], [1, 1], qrng="a duck") + + message = "`qrng` must be initialized with dimensionality equal to " + with pytest.raises(ValueError, match=message): + qmc_quad(lambda x: 1, [0, 0], [1, 1], qrng=stats.qmc.Sobol(1)) + + message = r"`log` must be boolean \(`True` or `False`\)." + with pytest.raises(TypeError, match=message): + qmc_quad(lambda x: 1, [0, 0], [1, 1], log=10) + + def basic_test(self, n_points=2**8, n_estimates=8, signs=np.ones(2)): + + ndim = 2 + mean = np.zeros(ndim) + cov = np.eye(ndim) + + def func(x): + return stats.multivariate_normal.pdf(x.T, mean, cov) + + rng = np.random.default_rng(2879434385674690281) + qrng = stats.qmc.Sobol(ndim, seed=rng) + a = np.zeros(ndim) + b = np.ones(ndim) * signs + res = qmc_quad(func, a, b, n_points=n_points, + n_estimates=n_estimates, qrng=qrng) + ref = stats.multivariate_normal.cdf(b, mean, cov, lower_limit=a) + atol = special.stdtrit(n_estimates-1, 0.995) * res.standard_error # 99% CI + assert_allclose(res.integral, ref, atol=atol) + assert np.prod(signs)*res.integral > 0 + + rng = np.random.default_rng(2879434385674690281) + qrng = stats.qmc.Sobol(ndim, seed=rng) + logres = qmc_quad(lambda *args: np.log(func(*args)), a, b, + n_points=n_points, n_estimates=n_estimates, + log=True, qrng=qrng) + assert_allclose(np.exp(logres.integral), res.integral, rtol=1e-14) + assert np.imag(logres.integral) == (np.pi if np.prod(signs) < 0 else 0) + assert_allclose(np.exp(logres.standard_error), + res.standard_error, rtol=1e-14, atol=1e-16) + + @pytest.mark.parametrize("n_points", [2**8, 2**12]) + @pytest.mark.parametrize("n_estimates", [8, 16]) + def test_basic(self, n_points, n_estimates): + self.basic_test(n_points, n_estimates) + + @pytest.mark.parametrize("signs", [[1, 1], [-1, -1], [-1, 1], [1, -1]]) + def test_sign(self, signs): + self.basic_test(signs=signs) + + @pytest.mark.parametrize("log", [False, True]) + def test_zero(self, log): + message = "A lower limit was equal to an upper limit, so" + with pytest.warns(UserWarning, match=message): + res = qmc_quad(lambda x: 1, [0, 0], [0, 1], log=log) + assert res.integral == (-np.inf if log else 0) + assert res.standard_error == 0 + + def test_flexible_input(self): + # check that qrng is not required + # also checks that for 1d problems, a and b can be scalars + def func(x): + return stats.norm.pdf(x, scale=2) + + res = qmc_quad(func, 0, 1) + ref = stats.norm.cdf(1, scale=2) - stats.norm.cdf(0, scale=2) + assert_allclose(res.integral, ref, 1e-2) + + +def cumulative_simpson_nd_reference(y, *, x=None, dx=None, initial=None, axis=-1): + # Use cumulative_trapezoid if length of y < 3 + if y.shape[axis] < 3: + if initial is None: + return cumulative_trapezoid(y, x=x, dx=dx, axis=axis, initial=None) + else: + return initial + cumulative_trapezoid(y, x=x, dx=dx, axis=axis, initial=0) + + # Ensure that working axis is last axis + y = np.moveaxis(y, axis, -1) + x = np.moveaxis(x, axis, -1) if np.ndim(x) > 1 else x + dx = np.moveaxis(dx, axis, -1) if np.ndim(dx) > 1 else dx + initial = np.moveaxis(initial, axis, -1) if np.ndim(initial) > 1 else initial + + # If `x` is not present, create it from `dx` + n = y.shape[-1] + x = dx * np.arange(n) if dx is not None else x + # Similarly, if `initial` is not present, set it to 0 + initial_was_none = initial is None + initial = 0 if initial_was_none else initial + + # `np.apply_along_axis` accepts only one array, so concatenate arguments + x = np.broadcast_to(x, y.shape) + initial = np.broadcast_to(initial, y.shape[:-1] + (1,)) + z = np.concatenate((y, x, initial), axis=-1) + + # Use `np.apply_along_axis` to compute result + def f(z): + return cumulative_simpson(z[:n], x=z[n:2*n], initial=z[2*n:]) + res = np.apply_along_axis(f, -1, z) + + # Remove `initial` and undo axis move as needed + res = res[..., 1:] if initial_was_none else res + res = np.moveaxis(res, -1, axis) + return res + + +class TestCumulativeSimpson: + x0 = np.arange(4) + y0 = x0**2 + + @pytest.mark.parametrize('use_dx', (False, True)) + @pytest.mark.parametrize('use_initial', (False, True)) + def test_1d(self, use_dx, use_initial): + # Test for exact agreement with polynomial of highest + # possible order (3 if `dx` is constant, 2 otherwise). + rng = np.random.default_rng(82456839535679456794) + n = 10 + + # Generate random polynomials and ground truth + # integral of appropriate order + order = 3 if use_dx else 2 + dx = rng.random() + x = (np.sort(rng.random(n)) if order == 2 + else np.arange(n)*dx + rng.random()) + i = np.arange(order + 1)[:, np.newaxis] + c = rng.random(order + 1)[:, np.newaxis] + y = np.sum(c*x**i, axis=0) + Y = np.sum(c*x**(i + 1)/(i + 1), axis=0) + ref = Y if use_initial else (Y-Y[0])[1:] + + # Integrate with `cumulative_simpson` + initial = Y[0] if use_initial else None + kwarg = {'dx': dx} if use_dx else {'x': x} + res = cumulative_simpson(y, **kwarg, initial=initial) + + # Compare result against reference + if not use_dx: + assert_allclose(res, ref, rtol=2e-15) + else: + i0 = 0 if use_initial else 1 + # all terms are "close" + assert_allclose(res, ref, rtol=0.0025) + # only even-interval terms are "exact" + assert_allclose(res[i0::2], ref[i0::2], rtol=2e-15) + + @pytest.mark.parametrize('axis', np.arange(-3, 3)) + @pytest.mark.parametrize('x_ndim', (1, 3)) + @pytest.mark.parametrize('x_len', (1, 2, 7)) + @pytest.mark.parametrize('i_ndim', (None, 0, 3,)) + @pytest.mark.parametrize('dx', (None, True)) + def test_nd(self, axis, x_ndim, x_len, i_ndim, dx): + # Test behavior of `cumulative_simpson` with N-D `y` + rng = np.random.default_rng(82456839535679456794) + + # determine shapes + shape = [5, 6, x_len] + shape[axis], shape[-1] = shape[-1], shape[axis] + shape_len_1 = shape.copy() + shape_len_1[axis] = 1 + i_shape = shape_len_1 if i_ndim == 3 else () + + # initialize arguments + y = rng.random(size=shape) + x, dx = None, None + if dx: + dx = rng.random(size=shape_len_1) if x_ndim > 1 else rng.random() + else: + x = (np.sort(rng.random(size=shape), axis=axis) if x_ndim > 1 + else np.sort(rng.random(size=shape[axis]))) + initial = None if i_ndim is None else rng.random(size=i_shape) + + # compare results + res = cumulative_simpson(y, x=x, dx=dx, initial=initial, axis=axis) + ref = cumulative_simpson_nd_reference(y, x=x, dx=dx, initial=initial, axis=axis) + np.testing.assert_allclose(res, ref, rtol=1e-15) + + @pytest.mark.parametrize(('message', 'kwarg_update'), [ + ("x must be strictly increasing", dict(x=[2, 2, 3, 4])), + ("x must be strictly increasing", dict(x=[x0, [2, 2, 4, 8]], y=[y0, y0])), + ("x must be strictly increasing", dict(x=[x0, x0, x0], y=[y0, y0, y0], axis=0)), + ("At least one point is required", dict(x=[], y=[])), + ("`axis=4` is not valid for `y` with `y.ndim=1`", dict(axis=4)), + ("shape of `x` must be the same as `y` or 1-D", dict(x=np.arange(5))), + ("`initial` must either be a scalar or...", dict(initial=np.arange(5))), + ("`dx` must either be a scalar or...", dict(x=None, dx=np.arange(5))), + ]) + def test_simpson_exceptions(self, message, kwarg_update): + kwargs0 = dict(y=self.y0, x=self.x0, dx=None, initial=None, axis=-1) + with pytest.raises(ValueError, match=message): + cumulative_simpson(**dict(kwargs0, **kwarg_update)) + + def test_special_cases(self): + # Test special cases not checked elsewhere + rng = np.random.default_rng(82456839535679456794) + y = rng.random(size=10) + res = cumulative_simpson(y, dx=0) + assert_equal(res, 0) + + # Should add tests of: + # - all elements of `x` identical + # These should work as they do for `simpson` + + def _get_theoretical_diff_between_simps_and_cum_simps(self, y, x): + """`cumulative_simpson` and `simpson` can be tested against other to verify + they give consistent results. `simpson` will iteratively be called with + successively higher upper limits of integration. This function calculates + the theoretical correction required to `simpson` at even intervals to match + with `cumulative_simpson`. + """ + d = np.diff(x, axis=-1) + sub_integrals_h1 = _cumulative_simpson_unequal_intervals(y, d) + sub_integrals_h2 = _cumulative_simpson_unequal_intervals( + y[..., ::-1], d[..., ::-1] + )[..., ::-1] + + # Concatenate to build difference array + zeros_shape = (*y.shape[:-1], 1) + theoretical_difference = np.concatenate( + [ + np.zeros(zeros_shape), + (sub_integrals_h1[..., 1:] - sub_integrals_h2[..., :-1]), + np.zeros(zeros_shape), + ], + axis=-1, + ) + # Differences only expected at even intervals. Odd intervals will + # match exactly so there is no correction + theoretical_difference[..., 1::2] = 0.0 + # Note: the first interval will not match from this correction as + # `simpson` uses the trapezoidal rule + return theoretical_difference + + @pytest.mark.slow + @given( + y=hyp_num.arrays( + np.float64, + hyp_num.array_shapes(max_dims=4, min_side=3, max_side=10), + elements=st.floats(-10, 10, allow_nan=False).filter(lambda x: abs(x) > 1e-7) + ) + ) + def test_cumulative_simpson_against_simpson_with_default_dx( + self, y + ): + """Theoretically, the output of `cumulative_simpson` will be identical + to `simpson` at all even indices and in the last index. The first index + will not match as `simpson` uses the trapezoidal rule when there are only two + data points. Odd indices after the first index are shown to match with + a mathematically-derived correction.""" + def simpson_reference(y): + return np.stack( + [simpson(y[..., :i], dx=1.0) for i in range(2, y.shape[-1]+1)], axis=-1, + ) + + res = cumulative_simpson(y, dx=1.0) + ref = simpson_reference(y) + theoretical_difference = self._get_theoretical_diff_between_simps_and_cum_simps( + y, x=np.arange(y.shape[-1]) + ) + np.testing.assert_allclose( + res[..., 1:], ref[..., 1:] + theoretical_difference[..., 1:] + ) + + @pytest.mark.slow + @given( + y=hyp_num.arrays( + np.float64, + hyp_num.array_shapes(max_dims=4, min_side=3, max_side=10), + elements=st.floats(-10, 10, allow_nan=False).filter(lambda x: abs(x) > 1e-7) + ) + ) + def test_cumulative_simpson_against_simpson( + self, y + ): + """Theoretically, the output of `cumulative_simpson` will be identical + to `simpson` at all even indices and in the last index. The first index + will not match as `simpson` uses the trapezoidal rule when there are only two + data points. Odd indices after the first index are shown to match with + a mathematically-derived correction.""" + interval = 10/(y.shape[-1] - 1) + x = np.linspace(0, 10, num=y.shape[-1]) + x[1:] = x[1:] + 0.2*interval*np.random.uniform(-1, 1, len(x) - 1) + + def simpson_reference(y, x): + return np.stack( + [simpson(y[..., :i], x=x[..., :i]) for i in range(2, y.shape[-1]+1)], + axis=-1, + ) + + res = cumulative_simpson(y, x=x) + ref = simpson_reference(y, x) + theoretical_difference = self._get_theoretical_diff_between_simps_and_cum_simps( + y, x + ) + np.testing.assert_allclose( + res[..., 1:], ref[..., 1:] + theoretical_difference[..., 1:] + ) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/test_tanhsinh.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/test_tanhsinh.py new file mode 100644 index 0000000000000000000000000000000000000000..084385cf9b4a0ddbb145aa99e9b22c381216ca33 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/tests/test_tanhsinh.py @@ -0,0 +1,947 @@ +# mypy: disable-error-code="attr-defined" +import os +import pytest + +import numpy as np +from numpy.testing import assert_allclose, assert_equal + +import scipy._lib._elementwise_iterative_method as eim +from scipy import special, stats +from scipy.integrate import quad_vec +from scipy.integrate._tanhsinh import _tanhsinh, _pair_cache, _nsum +from scipy.stats._discrete_distns import _gen_harmonic_gt1 + +class TestTanhSinh: + + # Test problems from [1] Section 6 + def f1(self, t): + return t * np.log(1 + t) + + f1.ref = 0.25 + f1.b = 1 + + def f2(self, t): + return t ** 2 * np.arctan(t) + + f2.ref = (np.pi - 2 + 2 * np.log(2)) / 12 + f2.b = 1 + + def f3(self, t): + return np.exp(t) * np.cos(t) + + f3.ref = (np.exp(np.pi / 2) - 1) / 2 + f3.b = np.pi / 2 + + def f4(self, t): + a = np.sqrt(2 + t ** 2) + return np.arctan(a) / ((1 + t ** 2) * a) + + f4.ref = 5 * np.pi ** 2 / 96 + f4.b = 1 + + def f5(self, t): + return np.sqrt(t) * np.log(t) + + f5.ref = -4 / 9 + f5.b = 1 + + def f6(self, t): + return np.sqrt(1 - t ** 2) + + f6.ref = np.pi / 4 + f6.b = 1 + + def f7(self, t): + return np.sqrt(t) / np.sqrt(1 - t ** 2) + + f7.ref = 2 * np.sqrt(np.pi) * special.gamma(3 / 4) / special.gamma(1 / 4) + f7.b = 1 + + def f8(self, t): + return np.log(t) ** 2 + + f8.ref = 2 + f8.b = 1 + + def f9(self, t): + return np.log(np.cos(t)) + + f9.ref = -np.pi * np.log(2) / 2 + f9.b = np.pi / 2 + + def f10(self, t): + return np.sqrt(np.tan(t)) + + f10.ref = np.pi * np.sqrt(2) / 2 + f10.b = np.pi / 2 + + def f11(self, t): + return 1 / (1 + t ** 2) + + f11.ref = np.pi / 2 + f11.b = np.inf + + def f12(self, t): + return np.exp(-t) / np.sqrt(t) + + f12.ref = np.sqrt(np.pi) + f12.b = np.inf + + def f13(self, t): + return np.exp(-t ** 2 / 2) + + f13.ref = np.sqrt(np.pi / 2) + f13.b = np.inf + + def f14(self, t): + return np.exp(-t) * np.cos(t) + + f14.ref = 0.5 + f14.b = np.inf + + def f15(self, t): + return np.sin(t) / t + + f15.ref = np.pi / 2 + f15.b = np.inf + + def error(self, res, ref, log=False): + err = abs(res - ref) + + if not log: + return err + + with np.errstate(divide='ignore'): + return np.log10(err) + + def test_input_validation(self): + f = self.f1 + + message = '`f` must be callable.' + with pytest.raises(ValueError, match=message): + _tanhsinh(42, 0, f.b) + + message = '...must be True or False.' + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 0, f.b, log=2) + + message = '...must be real numbers.' + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 1+1j, f.b) + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 0, f.b, atol='ekki') + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 0, f.b, rtol=pytest) + + message = '...must be non-negative and finite.' + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 0, f.b, rtol=-1) + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 0, f.b, atol=np.inf) + + message = '...may not be positive infinity.' + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 0, f.b, rtol=np.inf, log=True) + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 0, f.b, atol=np.inf, log=True) + + message = '...must be integers.' + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 0, f.b, maxlevel=object()) + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 0, f.b, maxfun=1+1j) + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 0, f.b, minlevel="migratory coconut") + + message = '...must be non-negative.' + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 0, f.b, maxlevel=-1) + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 0, f.b, maxfun=-1) + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 0, f.b, minlevel=-1) + + message = '...must be True or False.' + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 0, f.b, preserve_shape=2) + + message = '...must be callable.' + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 0, f.b, callback='elderberry') + + @pytest.mark.parametrize("limits, ref", [ + [(0, np.inf), 0.5], # b infinite + [(-np.inf, 0), 0.5], # a infinite + [(-np.inf, np.inf), 1], # a and b infinite + [(np.inf, -np.inf), -1], # flipped limits + [(1, -1), stats.norm.cdf(-1) - stats.norm.cdf(1)], # flipped limits + ]) + def test_integral_transforms(self, limits, ref): + # Check that the integral transforms are behaving for both normal and + # log integration + dist = stats.norm() + + res = _tanhsinh(dist.pdf, *limits) + assert_allclose(res.integral, ref) + + logres = _tanhsinh(dist.logpdf, *limits, log=True) + assert_allclose(np.exp(logres.integral), ref) + # Transformation should not make the result complex unnecessarily + assert (np.issubdtype(logres.integral.dtype, np.floating) if ref > 0 + else np.issubdtype(logres.integral.dtype, np.complexfloating)) + + assert_allclose(np.exp(logres.error), res.error, atol=1e-16) + + # 15 skipped intentionally; it's very difficult numerically + @pytest.mark.parametrize('f_number', range(1, 15)) + def test_basic(self, f_number): + f = getattr(self, f"f{f_number}") + rtol = 2e-8 + res = _tanhsinh(f, 0, f.b, rtol=rtol) + assert_allclose(res.integral, f.ref, rtol=rtol) + if f_number not in {14}: # mildly underestimates error here + true_error = abs(self.error(res.integral, f.ref)/res.integral) + assert true_error < res.error + + if f_number in {7, 10, 12}: # succeeds, but doesn't know it + return + + assert res.success + assert res.status == 0 + + @pytest.mark.parametrize('ref', (0.5, [0.4, 0.6])) + @pytest.mark.parametrize('case', stats._distr_params.distcont) + def test_accuracy(self, ref, case): + distname, params = case + if distname in {'dgamma', 'dweibull', 'laplace', 'kstwo'}: + # should split up interval at first-derivative discontinuity + pytest.skip('tanh-sinh is not great for non-smooth integrands') + if (distname in {'studentized_range', 'levy_stable'} + and not int(os.getenv('SCIPY_XSLOW', 0))): + pytest.skip('This case passes, but it is too slow.') + dist = getattr(stats, distname)(*params) + x = dist.interval(ref) + res = _tanhsinh(dist.pdf, *x) + assert_allclose(res.integral, ref) + + @pytest.mark.parametrize('shape', [tuple(), (12,), (3, 4), (3, 2, 2)]) + def test_vectorization(self, shape): + # Test for correct functionality, output shapes, and dtypes for various + # input shapes. + rng = np.random.default_rng(82456839535679456794) + a = rng.random(shape) + b = rng.random(shape) + p = rng.random(shape) + n = np.prod(shape) + + def f(x, p): + f.ncall += 1 + f.feval += 1 if (x.size == n or x.ndim <=1) else x.shape[-1] + return x**p + f.ncall = 0 + f.feval = 0 + + @np.vectorize + def _tanhsinh_single(a, b, p): + return _tanhsinh(lambda x: x**p, a, b) + + res = _tanhsinh(f, a, b, args=(p,)) + refs = _tanhsinh_single(a, b, p).ravel() + + attrs = ['integral', 'error', 'success', 'status', 'nfev', 'maxlevel'] + for attr in attrs: + ref_attr = [getattr(ref, attr) for ref in refs] + res_attr = getattr(res, attr) + assert_allclose(res_attr.ravel(), ref_attr, rtol=1e-15) + assert_equal(res_attr.shape, shape) + + assert np.issubdtype(res.success.dtype, np.bool_) + assert np.issubdtype(res.status.dtype, np.integer) + assert np.issubdtype(res.nfev.dtype, np.integer) + assert np.issubdtype(res.maxlevel.dtype, np.integer) + assert_equal(np.max(res.nfev), f.feval) + # maxlevel = 2 -> 3 function calls (2 initialization, 1 work) + assert np.max(res.maxlevel) >= 2 + assert_equal(np.max(res.maxlevel), f.ncall) + + def test_flags(self): + # Test cases that should produce different status flags; show that all + # can be produced simultaneously. + def f(xs, js): + f.nit += 1 + funcs = [lambda x: np.exp(-x**2), # converges + lambda x: np.exp(x), # reaches maxiter due to order=2 + lambda x: np.full_like(x, np.nan)[()]] # stops due to NaN + res = [funcs[j](x) for x, j in zip(xs, js.ravel())] + return res + f.nit = 0 + + args = (np.arange(3, dtype=np.int64),) + res = _tanhsinh(f, [np.inf]*3, [-np.inf]*3, maxlevel=5, args=args) + ref_flags = np.array([0, -2, -3]) + assert_equal(res.status, ref_flags) + + def test_flags_preserve_shape(self): + # Same test as above but using `preserve_shape` option to simplify. + def f(x): + return [np.exp(-x[0]**2), # converges + np.exp(x[1]), # reaches maxiter due to order=2 + np.full_like(x[2], np.nan)[()]] # stops due to NaN + + res = _tanhsinh(f, [np.inf]*3, [-np.inf]*3, maxlevel=5, preserve_shape=True) + ref_flags = np.array([0, -2, -3]) + assert_equal(res.status, ref_flags) + + def test_preserve_shape(self): + # Test `preserve_shape` option + def f(x): + return np.asarray([[x, np.sin(10 * x)], + [np.cos(30 * x), x * np.sin(100 * x)]]) + + ref = quad_vec(f, 0, 1) + res = _tanhsinh(f, 0, 1, preserve_shape=True) + assert_allclose(res.integral, ref[0]) + + def test_convergence(self): + # demonstrate that number of accurate digits doubles each iteration + f = self.f1 + last_logerr = 0 + for i in range(4): + res = _tanhsinh(f, 0, f.b, minlevel=0, maxlevel=i) + logerr = self.error(res.integral, f.ref, log=True) + assert (logerr < last_logerr * 2 or logerr < -15.5) + last_logerr = logerr + + def test_options_and_result_attributes(self): + # demonstrate that options are behaving as advertised and status + # messages are as intended + def f(x): + f.calls += 1 + f.feval += np.size(x) + return self.f2(x) + f.ref = self.f2.ref + f.b = self.f2.b + default_rtol = 1e-12 + default_atol = f.ref * default_rtol # effective default absolute tol + + # Test default options + f.feval, f.calls = 0, 0 + ref = _tanhsinh(f, 0, f.b) + assert self.error(ref.integral, f.ref) < ref.error < default_atol + assert ref.nfev == f.feval + ref.calls = f.calls # reference number of function calls + assert ref.success + assert ref.status == 0 + + # Test `maxlevel` equal to required max level + # We should get all the same results + f.feval, f.calls = 0, 0 + maxlevel = ref.maxlevel + res = _tanhsinh(f, 0, f.b, maxlevel=maxlevel) + res.calls = f.calls + assert res == ref + + # Now reduce the maximum level. We won't meet tolerances. + f.feval, f.calls = 0, 0 + maxlevel -= 1 + assert maxlevel >= 2 # can't compare errors otherwise + res = _tanhsinh(f, 0, f.b, maxlevel=maxlevel) + assert self.error(res.integral, f.ref) < res.error > default_atol + assert res.nfev == f.feval < ref.nfev + assert f.calls == ref.calls - 1 + assert not res.success + assert res.status == eim._ECONVERR + + # `maxfun` is currently not enforced + + # # Test `maxfun` equal to required number of function evaluations + # # We should get all the same results + # f.feval, f.calls = 0, 0 + # maxfun = ref.nfev + # res = _tanhsinh(f, 0, f.b, maxfun = maxfun) + # assert res == ref + # + # # Now reduce `maxfun`. We won't meet tolerances. + # f.feval, f.calls = 0, 0 + # maxfun -= 1 + # res = _tanhsinh(f, 0, f.b, maxfun=maxfun) + # assert self.error(res.integral, f.ref) < res.error > default_atol + # assert res.nfev == f.feval < ref.nfev + # assert f.calls == ref.calls - 1 + # assert not res.success + # assert res.status == 2 + + # Take this result to be the new reference + ref = res + ref.calls = f.calls + + # Test `atol` + f.feval, f.calls = 0, 0 + # With this tolerance, we should get the exact same result as ref + atol = np.nextafter(ref.error, np.inf) + res = _tanhsinh(f, 0, f.b, rtol=0, atol=atol) + assert res.integral == ref.integral + assert res.error == ref.error + assert res.nfev == f.feval == ref.nfev + assert f.calls == ref.calls + # Except the result is considered to be successful + assert res.success + assert res.status == 0 + + f.feval, f.calls = 0, 0 + # With a tighter tolerance, we should get a more accurate result + atol = np.nextafter(ref.error, -np.inf) + res = _tanhsinh(f, 0, f.b, rtol=0, atol=atol) + assert self.error(res.integral, f.ref) < res.error < atol + assert res.nfev == f.feval > ref.nfev + assert f.calls > ref.calls + assert res.success + assert res.status == 0 + + # Test `rtol` + f.feval, f.calls = 0, 0 + # With this tolerance, we should get the exact same result as ref + rtol = np.nextafter(ref.error/ref.integral, np.inf) + res = _tanhsinh(f, 0, f.b, rtol=rtol) + assert res.integral == ref.integral + assert res.error == ref.error + assert res.nfev == f.feval == ref.nfev + assert f.calls == ref.calls + # Except the result is considered to be successful + assert res.success + assert res.status == 0 + + f.feval, f.calls = 0, 0 + # With a tighter tolerance, we should get a more accurate result + rtol = np.nextafter(ref.error/ref.integral, -np.inf) + res = _tanhsinh(f, 0, f.b, rtol=rtol) + assert self.error(res.integral, f.ref)/f.ref < res.error/res.integral < rtol + assert res.nfev == f.feval > ref.nfev + assert f.calls > ref.calls + assert res.success + assert res.status == 0 + + @pytest.mark.parametrize('rtol', [1e-4, 1e-14]) + def test_log(self, rtol): + # Test equivalence of log-integration and regular integration + dist = stats.norm() + + test_tols = dict(atol=1e-18, rtol=1e-15) + + # Positive integrand (real log-integrand) + res = _tanhsinh(dist.logpdf, -1, 2, log=True, rtol=np.log(rtol)) + ref = _tanhsinh(dist.pdf, -1, 2, rtol=rtol) + assert_allclose(np.exp(res.integral), ref.integral, **test_tols) + assert_allclose(np.exp(res.error), ref.error, **test_tols) + assert res.nfev == ref.nfev + + # Real integrand (complex log-integrand) + def f(x): + return -dist.logpdf(x)*dist.pdf(x) + + def logf(x): + return np.log(dist.logpdf(x) + 0j) + dist.logpdf(x) + np.pi * 1j + + res = _tanhsinh(logf, -np.inf, np.inf, log=True) + ref = _tanhsinh(f, -np.inf, np.inf) + # In gh-19173, we saw `invalid` warnings on one CI platform. + # Silencing `all` because I can't reproduce locally and don't want + # to risk the need to run CI again. + with np.errstate(all='ignore'): + assert_allclose(np.exp(res.integral), ref.integral, **test_tols) + assert_allclose(np.exp(res.error), ref.error, **test_tols) + assert res.nfev == ref.nfev + + def test_complex(self): + # Test integration of complex integrand + # Finite limits + def f(x): + return np.exp(1j * x) + + res = _tanhsinh(f, 0, np.pi/4) + ref = np.sqrt(2)/2 + (1-np.sqrt(2)/2)*1j + assert_allclose(res.integral, ref) + + # Infinite limits + dist1 = stats.norm(scale=1) + dist2 = stats.norm(scale=2) + def f(x): + return dist1.pdf(x) + 1j*dist2.pdf(x) + + res = _tanhsinh(f, np.inf, -np.inf) + assert_allclose(res.integral, -(1+1j)) + + @pytest.mark.parametrize("maxlevel", range(4)) + def test_minlevel(self, maxlevel): + # Verify that minlevel does not change the values at which the + # integrand is evaluated or the integral/error estimates, only the + # number of function calls + def f(x): + f.calls += 1 + f.feval += np.size(x) + f.x = np.concatenate((f.x, x.ravel())) + return self.f2(x) + f.feval, f.calls, f.x = 0, 0, np.array([]) + + ref = _tanhsinh(f, 0, self.f2.b, minlevel=0, maxlevel=maxlevel) + ref_x = np.sort(f.x) + + for minlevel in range(0, maxlevel + 1): + f.feval, f.calls, f.x = 0, 0, np.array([]) + options = dict(minlevel=minlevel, maxlevel=maxlevel) + res = _tanhsinh(f, 0, self.f2.b, **options) + # Should be very close; all that has changed is the order of values + assert_allclose(res.integral, ref.integral, rtol=4e-16) + # Difference in absolute errors << magnitude of integral + assert_allclose(res.error, ref.error, atol=4e-16 * ref.integral) + assert res.nfev == f.feval == len(f.x) + assert f.calls == maxlevel - minlevel + 1 + 1 # 1 validation call + assert res.status == ref.status + assert_equal(ref_x, np.sort(f.x)) + + def test_improper_integrals(self): + # Test handling of infinite limits of integration (mixed with finite limits) + def f(x): + x[np.isinf(x)] = np.nan + return np.exp(-x**2) + a = [-np.inf, 0, -np.inf, np.inf, -20, -np.inf, -20] + b = [np.inf, np.inf, 0, -np.inf, 20, 20, np.inf] + ref = np.sqrt(np.pi) + res = _tanhsinh(f, a, b) + assert_allclose(res.integral, [ref, ref/2, ref/2, -ref, ref, ref, ref]) + + @pytest.mark.parametrize("limits", ((0, 3), ([-np.inf, 0], [3, 3]))) + @pytest.mark.parametrize("dtype", (np.float32, np.float64)) + def test_dtype(self, limits, dtype): + # Test that dtypes are preserved + a, b = np.asarray(limits, dtype=dtype)[()] + + def f(x): + assert x.dtype == dtype + return np.exp(x) + + rtol = 1e-12 if dtype == np.float64 else 1e-5 + res = _tanhsinh(f, a, b, rtol=rtol) + assert res.integral.dtype == dtype + assert res.error.dtype == dtype + assert np.all(res.success) + assert_allclose(res.integral, np.exp(b)-np.exp(a), rtol=rtol) + + def test_maxiter_callback(self): + # Test behavior of `maxiter` parameter and `callback` interface + a, b = -np.inf, np.inf + def f(x): + return np.exp(-x*x) + + minlevel, maxlevel = 0, 2 + maxiter = maxlevel - minlevel + 1 + kwargs = dict(minlevel=minlevel, maxlevel=maxlevel, rtol=1e-15) + res = _tanhsinh(f, a, b, **kwargs) + assert not res.success + assert res.maxlevel == maxlevel + + def callback(res): + callback.iter += 1 + callback.res = res + assert hasattr(res, 'integral') + assert res.status == 1 + if callback.iter == maxiter: + raise StopIteration + callback.iter = -1 # callback called once before first iteration + callback.res = None + + del kwargs['maxlevel'] + res2 = _tanhsinh(f, a, b, **kwargs, callback=callback) + # terminating with callback is identical to terminating due to maxiter + # (except for `status`) + for key in res.keys(): + if key == 'status': + assert callback.res[key] == 1 + assert res[key] == -2 + assert res2[key] == -4 + else: + assert res2[key] == callback.res[key] == res[key] + + def test_jumpstart(self): + # The intermediate results at each level i should be the same as the + # final results when jumpstarting at level i; i.e. minlevel=maxlevel=i + a, b = -np.inf, np.inf + def f(x): + return np.exp(-x*x) + + def callback(res): + callback.integrals.append(res.integral) + callback.errors.append(res.error) + callback.integrals = [] + callback.errors = [] + + maxlevel = 4 + _tanhsinh(f, a, b, minlevel=0, maxlevel=maxlevel, callback=callback) + + integrals = [] + errors = [] + for i in range(maxlevel + 1): + res = _tanhsinh(f, a, b, minlevel=i, maxlevel=i) + integrals.append(res.integral) + errors.append(res.error) + + assert_allclose(callback.integrals[1:], integrals, rtol=1e-15) + assert_allclose(callback.errors[1:], errors, rtol=1e-15, atol=1e-16) + + def test_special_cases(self): + # Test edge cases and other special cases + + # Test that integers are not passed to `f` + # (otherwise this would overflow) + def f(x): + assert np.issubdtype(x.dtype, np.floating) + return x ** 99 + + res = _tanhsinh(f, 0, 1) + assert res.success + assert_allclose(res.integral, 1/100) + + # Test levels 0 and 1; error is NaN + res = _tanhsinh(f, 0, 1, maxlevel=0) + assert res.integral > 0 + assert_equal(res.error, np.nan) + res = _tanhsinh(f, 0, 1, maxlevel=1) + assert res.integral > 0 + assert_equal(res.error, np.nan) + + # Tes equal left and right integration limits + res = _tanhsinh(f, 1, 1) + assert res.success + assert res.maxlevel == -1 + assert_allclose(res.integral, 0) + + # Test scalar `args` (not in tuple) + def f(x, c): + return x**c + + res = _tanhsinh(f, 0, 1, args=99) + assert_allclose(res.integral, 1/100) + + # Test NaNs + a = [np.nan, 0, 0, 0] + b = [1, np.nan, 1, 1] + c = [1, 1, np.nan, 1] + res = _tanhsinh(f, a, b, args=(c,)) + assert_allclose(res.integral, [np.nan, np.nan, np.nan, 0.5]) + assert_allclose(res.error[:3], np.nan) + assert_equal(res.status, [-3, -3, -3, 0]) + assert_equal(res.success, [False, False, False, True]) + assert_equal(res.nfev[:3], 1) + + # Test complex integral followed by real integral + # Previously, h0 was of the result dtype. If the `dtype` were complex, + # this could lead to complex cached abscissae/weights. If these get + # cast to real dtype for a subsequent real integral, we would get a + # ComplexWarning. Check that this is avoided. + _pair_cache.xjc = np.empty(0) + _pair_cache.wj = np.empty(0) + _pair_cache.indices = [0] + _pair_cache.h0 = None + res = _tanhsinh(lambda x: x*1j, 0, 1) + assert_allclose(res.integral, 0.5*1j) + res = _tanhsinh(lambda x: x, 0, 1) + assert_allclose(res.integral, 0.5) + + # Test zero-size + shape = (0, 3) + res = _tanhsinh(lambda x: x, 0, np.zeros(shape)) + attrs = ['integral', 'error', 'success', 'status', 'nfev', 'maxlevel'] + for attr in attrs: + assert_equal(res[attr].shape, shape) + + +class TestNSum: + rng = np.random.default_rng(5895448232066142650) + p = rng.uniform(1, 10, size=10) + + def f1(self, k): + # Integers are never passed to `f1`; if they were, we'd get + # integer to negative integer power error + return k**(-2) + + f1.ref = np.pi**2/6 + f1.a = 1 + f1.b = np.inf + f1.args = tuple() + + def f2(self, k, p): + return 1 / k**p + + f2.ref = special.zeta(p, 1) + f2.a = 1 + f2.b = np.inf + f2.args = (p,) + + def f3(self, k, p): + return 1 / k**p + + f3.a = 1 + f3.b = rng.integers(5, 15, size=(3, 1)) + f3.ref = _gen_harmonic_gt1(f3.b, p) + f3.args = (p,) + + def test_input_validation(self): + f = self.f1 + + message = '`f` must be callable.' + with pytest.raises(ValueError, match=message): + _nsum(42, f.a, f.b) + + message = '...must be True or False.' + with pytest.raises(ValueError, match=message): + _nsum(f, f.a, f.b, log=2) + + message = '...must be real numbers.' + with pytest.raises(ValueError, match=message): + _nsum(f, 1+1j, f.b) + with pytest.raises(ValueError, match=message): + _nsum(f, f.a, None) + with pytest.raises(ValueError, match=message): + _nsum(f, f.a, f.b, step=object()) + with pytest.raises(ValueError, match=message): + _nsum(f, f.a, f.b, atol='ekki') + with pytest.raises(ValueError, match=message): + _nsum(f, f.a, f.b, rtol=pytest) + + with np.errstate(all='ignore'): + res = _nsum(f, [np.nan, -np.inf, np.inf], 1) + assert np.all((res.status == -1) & np.isnan(res.sum) + & np.isnan(res.error) & ~res.success & res.nfev == 1) + res = _nsum(f, 10, [np.nan, 1]) + assert np.all((res.status == -1) & np.isnan(res.sum) + & np.isnan(res.error) & ~res.success & res.nfev == 1) + res = _nsum(f, 1, 10, step=[np.nan, -np.inf, np.inf, -1, 0]) + assert np.all((res.status == -1) & np.isnan(res.sum) + & np.isnan(res.error) & ~res.success & res.nfev == 1) + + message = '...must be non-negative and finite.' + with pytest.raises(ValueError, match=message): + _nsum(f, f.a, f.b, rtol=-1) + with pytest.raises(ValueError, match=message): + _nsum(f, f.a, f.b, atol=np.inf) + + message = '...may not be positive infinity.' + with pytest.raises(ValueError, match=message): + _nsum(f, f.a, f.b, rtol=np.inf, log=True) + with pytest.raises(ValueError, match=message): + _nsum(f, f.a, f.b, atol=np.inf, log=True) + + message = '...must be a non-negative integer.' + with pytest.raises(ValueError, match=message): + _nsum(f, f.a, f.b, maxterms=3.5) + with pytest.raises(ValueError, match=message): + _nsum(f, f.a, f.b, maxterms=-2) + + @pytest.mark.parametrize('f_number', range(1, 4)) + def test_basic(self, f_number): + f = getattr(self, f"f{f_number}") + res = _nsum(f, f.a, f.b, args=f.args) + assert_allclose(res.sum, f.ref) + assert_equal(res.status, 0) + assert_equal(res.success, True) + + with np.errstate(divide='ignore'): + logres = _nsum(lambda *args: np.log(f(*args)), + f.a, f.b, log=True, args=f.args) + assert_allclose(np.exp(logres.sum), res.sum) + assert_allclose(np.exp(logres.error), res.error) + assert_equal(logres.status, 0) + assert_equal(logres.success, True) + + @pytest.mark.parametrize('maxterms', [0, 1, 10, 20, 100]) + def test_integral(self, maxterms): + # test precise behavior of integral approximation + f = self.f1 + + def logf(x): + return -2*np.log(x) + + def F(x): + return -1 / x + + a = np.asarray([1, 5])[:, np.newaxis] + b = np.asarray([20, 100, np.inf])[:, np.newaxis, np.newaxis] + step = np.asarray([0.5, 1, 2]).reshape((-1, 1, 1, 1)) + nsteps = np.floor((b - a)/step) + b_original = b + b = a + nsteps*step + + k = a + maxterms*step + # partial sum + direct = f(a + np.arange(maxterms)*step).sum(axis=-1, keepdims=True) + integral = (F(b) - F(k))/step # integral approximation of remainder + low = direct + integral + f(b) # theoretical lower bound + high = direct + integral + f(k) # theoretical upper bound + ref_sum = (low + high)/2 # _nsum uses average of the two + ref_err = (high - low)/2 # error (assuming perfect quadrature) + + # correct reference values where number of terms < maxterms + a, b, step = np.broadcast_arrays(a, b, step) + for i in np.ndindex(a.shape): + ai, bi, stepi = a[i], b[i], step[i] + if (bi - ai)/stepi + 1 <= maxterms: + direct = f(np.arange(ai, bi+stepi, stepi)).sum() + ref_sum[i] = direct + ref_err[i] = direct * np.finfo(direct).eps + + rtol = 1e-12 + res = _nsum(f, a, b_original, step=step, maxterms=maxterms, rtol=rtol) + assert_allclose(res.sum, ref_sum, rtol=10*rtol) + assert_allclose(res.error, ref_err, rtol=100*rtol) + assert_equal(res.status, 0) + assert_equal(res.success, True) + + i = ((b_original - a)/step + 1 <= maxterms) + assert_allclose(res.sum[i], ref_sum[i], rtol=1e-15) + assert_allclose(res.error[i], ref_err[i], rtol=1e-15) + + logres = _nsum(logf, a, b_original, step=step, log=True, + rtol=np.log(rtol), maxterms=maxterms) + assert_allclose(np.exp(logres.sum), res.sum) + assert_allclose(np.exp(logres.error), res.error) + assert_equal(logres.status, 0) + assert_equal(logres.success, True) + + @pytest.mark.parametrize('shape', [tuple(), (12,), (3, 4), (3, 2, 2)]) + def test_vectorization(self, shape): + # Test for correct functionality, output shapes, and dtypes for various + # input shapes. + rng = np.random.default_rng(82456839535679456794) + a = rng.integers(1, 10, size=shape) + # when the sum can be computed directly or `maxterms` is large enough + # to meet `atol`, there are slight differences (for good reason) + # between vectorized call and looping. + b = np.inf + p = rng.random(shape) + 1 + n = np.prod(shape) + + def f(x, p): + f.feval += 1 if (x.size == n or x.ndim <= 1) else x.shape[-1] + return 1 / x ** p + + f.feval = 0 + + @np.vectorize + def _nsum_single(a, b, p, maxterms): + return _nsum(lambda x: 1 / x**p, a, b, maxterms=maxterms) + + res = _nsum(f, a, b, maxterms=1000, args=(p,)) + refs = _nsum_single(a, b, p, maxterms=1000).ravel() + + attrs = ['sum', 'error', 'success', 'status', 'nfev'] + for attr in attrs: + ref_attr = [getattr(ref, attr) for ref in refs] + res_attr = getattr(res, attr) + assert_allclose(res_attr.ravel(), ref_attr, rtol=1e-15) + assert_equal(res_attr.shape, shape) + + assert np.issubdtype(res.success.dtype, np.bool_) + assert np.issubdtype(res.status.dtype, np.integer) + assert np.issubdtype(res.nfev.dtype, np.integer) + assert_equal(np.max(res.nfev), f.feval) + + def test_status(self): + f = self.f2 + + p = [2, 2, 0.9, 1.1] + a = [0, 0, 1, 1] + b = [10, np.inf, np.inf, np.inf] + ref = special.zeta(p, 1) + + with np.errstate(divide='ignore'): # intentionally dividing by zero + res = _nsum(f, a, b, args=(p,)) + + assert_equal(res.success, [False, False, False, True]) + assert_equal(res.status, [-3, -3, -2, 0]) + assert_allclose(res.sum[res.success], ref[res.success]) + + def test_nfev(self): + def f(x): + f.nfev += np.size(x) + return 1 / x**2 + + f.nfev = 0 + res = _nsum(f, 1, 10) + assert_equal(res.nfev, f.nfev) + + f.nfev = 0 + res = _nsum(f, 1, np.inf, atol=1e-6) + assert_equal(res.nfev, f.nfev) + + def test_inclusive(self): + # There was an edge case off-by one bug when `_direct` was called with + # `inclusive=True`. Check that this is resolved. + res = _nsum(lambda k: 1 / k ** 2, [1, 4], np.inf, maxterms=500, atol=0.1) + ref = _nsum(lambda k: 1 / k ** 2, [1, 4], np.inf) + assert np.all(res.sum > (ref.sum - res.error)) + assert np.all(res.sum < (ref.sum + res.error)) + + def test_special_case(self): + # test equal lower/upper limit + f = self.f1 + a = b = 2 + res = _nsum(f, a, b) + assert_equal(res.sum, f(a)) + + # Test scalar `args` (not in tuple) + res = _nsum(self.f2, 1, np.inf, args=2) + assert_allclose(res.sum, self.f1.ref) # f1.ref is correct w/ args=2 + + # Test 0 size input + a = np.empty((3, 1, 1)) # arbitrary broadcastable shapes + b = np.empty((0, 1)) # could use Hypothesis + p = np.empty(4) # but it's overkill + shape = np.broadcast_shapes(a.shape, b.shape, p.shape) + res = _nsum(self.f2, a, b, args=(p,)) + assert res.sum.shape == shape + assert res.status.shape == shape + assert res.nfev.shape == shape + + # Test maxterms=0 + def f(x): + with np.errstate(divide='ignore'): + return 1 / x + + res = _nsum(f, 0, 10, maxterms=0) + assert np.isnan(res.sum) + assert np.isnan(res.error) + assert res.status == -2 + + res = _nsum(f, 0, 10, maxterms=1) + assert np.isnan(res.sum) + assert np.isnan(res.error) + assert res.status == -3 + + # Test NaNs + # should skip both direct and integral methods if there are NaNs + a = [np.nan, 1, 1, 1] + b = [np.inf, np.nan, np.inf, np.inf] + p = [2, 2, np.nan, 2] + res = _nsum(self.f2, a, b, args=(p,)) + assert_allclose(res.sum, [np.nan, np.nan, np.nan, self.f1.ref]) + assert_allclose(res.error[:3], np.nan) + assert_equal(res.status, [-1, -1, -3, 0]) + assert_equal(res.success, [False, False, False, True]) + # Ideally res.nfev[2] would be 1, but `tanhsinh` has some function evals + assert_equal(res.nfev[:2], 1) + + @pytest.mark.parametrize('dtype', [np.float32, np.float64]) + def test_dtype(self, dtype): + def f(k): + assert k.dtype == dtype + return 1 / k ** np.asarray(2, dtype=dtype)[()] + + a = np.asarray(1, dtype=dtype) + b = np.asarray([10, np.inf], dtype=dtype) + res = _nsum(f, a, b) + assert res.sum.dtype == dtype + assert res.error.dtype == dtype + + rtol = 1e-12 if dtype == np.float64 else 1e-6 + ref = _gen_harmonic_gt1(b, 2) + assert_allclose(res.sum, ref, rtol=rtol) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/vode.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/vode.py new file mode 100644 index 0000000000000000000000000000000000000000..f92927901084ce33cdeb006057d85dd501b13aae --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/integrate/vode.py @@ -0,0 +1,15 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__: list[str] = [] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="integrate", module="vode", + private_modules=["_vode"], all=__all__, + attribute=name) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/__init__.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a44a8c133b674aea416efeb4da469241b50a547f --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/__init__.py @@ -0,0 +1,131 @@ +""" +================================================= +Orthogonal distance regression (:mod:`scipy.odr`) +================================================= + +.. currentmodule:: scipy.odr + +Package Content +=============== + +.. autosummary:: + :toctree: generated/ + + Data -- The data to fit. + RealData -- Data with weights as actual std. dev.s and/or covariances. + Model -- Stores information about the function to be fit. + ODR -- Gathers all info & manages the main fitting routine. + Output -- Result from the fit. + odr -- Low-level function for ODR. + + OdrWarning -- Warning about potential problems when running ODR. + OdrError -- Error exception. + OdrStop -- Stop exception. + + polynomial -- Factory function for a general polynomial model. + exponential -- Exponential model + multilinear -- Arbitrary-dimensional linear model + unilinear -- Univariate linear model + quadratic -- Quadratic model + +Usage information +================= + +Introduction +------------ + +Why Orthogonal Distance Regression (ODR)? Sometimes one has +measurement errors in the explanatory (a.k.a., "independent") +variable(s), not just the response (a.k.a., "dependent") variable(s). +Ordinary Least Squares (OLS) fitting procedures treat the data for +explanatory variables as fixed, i.e., not subject to error of any kind. +Furthermore, OLS procedures require that the response variables be an +explicit function of the explanatory variables; sometimes making the +equation explicit is impractical and/or introduces errors. ODR can +handle both of these cases with ease, and can even reduce to the OLS +case if that is sufficient for the problem. + +ODRPACK is a FORTRAN-77 library for performing ODR with possibly +non-linear fitting functions. It uses a modified trust-region +Levenberg-Marquardt-type algorithm [1]_ to estimate the function +parameters. The fitting functions are provided by Python functions +operating on NumPy arrays. The required derivatives may be provided +by Python functions as well, or may be estimated numerically. ODRPACK +can do explicit or implicit ODR fits, or it can do OLS. Input and +output variables may be multidimensional. Weights can be provided to +account for different variances of the observations, and even +covariances between dimensions of the variables. + +The `scipy.odr` package offers an object-oriented interface to +ODRPACK, in addition to the low-level `odr` function. + +Additional background information about ODRPACK can be found in the +`ODRPACK User's Guide +`_, reading +which is recommended. + +Basic usage +----------- + +1. Define the function you want to fit against.:: + + def f(B, x): + '''Linear function y = m*x + b''' + # B is a vector of the parameters. + # x is an array of the current x values. + # x is in the same format as the x passed to Data or RealData. + # + # Return an array in the same format as y passed to Data or RealData. + return B[0]*x + B[1] + +2. Create a Model.:: + + linear = Model(f) + +3. Create a Data or RealData instance.:: + + mydata = Data(x, y, wd=1./power(sx,2), we=1./power(sy,2)) + + or, when the actual covariances are known:: + + mydata = RealData(x, y, sx=sx, sy=sy) + +4. Instantiate ODR with your data, model and initial parameter estimate.:: + + myodr = ODR(mydata, linear, beta0=[1., 2.]) + +5. Run the fit.:: + + myoutput = myodr.run() + +6. Examine output.:: + + myoutput.pprint() + + +References +---------- +.. [1] P. T. Boggs and J. E. Rogers, "Orthogonal Distance Regression," + in "Statistical analysis of measurement error models and + applications: proceedings of the AMS-IMS-SIAM joint summer research + conference held June 10-16, 1989," Contemporary Mathematics, + vol. 112, pg. 186, 1990. + +""" +# version: 0.7 +# author: Robert Kern +# date: 2006-09-21 + +from ._odrpack import * +from ._models import * +from . import _add_newdocs + +# Deprecated namespaces, to be removed in v2.0.0 +from . import models, odrpack + +__all__ = [s for s in dir() + if not (s.startswith('_') or s in ('odr_stop', 'odr_error'))] + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/__pycache__/__init__.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1c67b334563b6831df6910c7a6b9a6ce9f4e645 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/__pycache__/__init__.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/__pycache__/_add_newdocs.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/__pycache__/_add_newdocs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36992ef2ca0ee85abec2e0b3067e32694d4f8138 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/__pycache__/_add_newdocs.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/__pycache__/_models.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/__pycache__/_models.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..06ec1c9323cbc7fd6305aee6e999591fdc7c67b0 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/__pycache__/_models.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/__pycache__/_odrpack.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/__pycache__/_odrpack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..34768df8347998b6a26144c28feddb67ca61b008 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/__pycache__/_odrpack.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/__pycache__/models.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/__pycache__/models.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..678a3f20bb27a7b1f3b6b483f3179fa1b106990a Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/__pycache__/models.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/__pycache__/odrpack.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/__pycache__/odrpack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd4e0fc2e48c0a8f1ad65a15c28962c722dabdaa Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/__pycache__/odrpack.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/_add_newdocs.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/_add_newdocs.py new file mode 100644 index 0000000000000000000000000000000000000000..e09fb6cc8c5f1523dfbeaef466a5b76bd22c01bb --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/_add_newdocs.py @@ -0,0 +1,34 @@ +from numpy.lib import add_newdoc + +add_newdoc('scipy.odr', 'odr', + """ + odr(fcn, beta0, y, x, we=None, wd=None, fjacb=None, fjacd=None, extra_args=None, + ifixx=None, ifixb=None, job=0, iprint=0, errfile=None, rptfile=None, ndigit=0, + taufac=0.0, sstol=-1.0, partol=-1.0, maxit=-1, stpb=None, stpd=None, sclb=None, + scld=None, work=None, iwork=None, full_output=0) + + Low-level function for ODR. + + See Also + -------- + ODR : The ODR class gathers all information and coordinates the running of the + main fitting routine. + Model : The Model class stores information about the function you wish to fit. + Data : The data to fit. + RealData : Data with weights as actual std. dev.s and/or covariances. + + Notes + ----- + This is a function performing the same operation as the `ODR`, + `Model`, and `Data` classes together. The parameters of this + function are explained in the class documentation. + + """) + +add_newdoc('scipy.odr.__odrpack', '_set_exceptions', + """ + _set_exceptions(odr_error, odr_stop) + + Internal function: set exception classes. + + """) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/_models.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/_models.py new file mode 100644 index 0000000000000000000000000000000000000000..e0a8d2275dcc4698a9ea61be5871d62069be2599 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/_models.py @@ -0,0 +1,315 @@ +""" Collection of Model instances for use with the odrpack fitting package. +""" +import numpy as np +from scipy.odr._odrpack import Model + +__all__ = ['Model', 'exponential', 'multilinear', 'unilinear', 'quadratic', + 'polynomial'] + + +def _lin_fcn(B, x): + a, b = B[0], B[1:] + b.shape = (b.shape[0], 1) + + return a + (x*b).sum(axis=0) + + +def _lin_fjb(B, x): + a = np.ones(x.shape[-1], float) + res = np.concatenate((a, x.ravel())) + res.shape = (B.shape[-1], x.shape[-1]) + return res + + +def _lin_fjd(B, x): + b = B[1:] + b = np.repeat(b, (x.shape[-1],)*b.shape[-1], axis=0) + b.shape = x.shape + return b + + +def _lin_est(data): + # Eh. The answer is analytical, so just return all ones. + # Don't return zeros since that will interfere with + # ODRPACK's auto-scaling procedures. + + if len(data.x.shape) == 2: + m = data.x.shape[0] + else: + m = 1 + + return np.ones((m + 1,), float) + + +def _poly_fcn(B, x, powers): + a, b = B[0], B[1:] + b.shape = (b.shape[0], 1) + + return a + np.sum(b * np.power(x, powers), axis=0) + + +def _poly_fjacb(B, x, powers): + res = np.concatenate((np.ones(x.shape[-1], float), + np.power(x, powers).flat)) + res.shape = (B.shape[-1], x.shape[-1]) + return res + + +def _poly_fjacd(B, x, powers): + b = B[1:] + b.shape = (b.shape[0], 1) + + b = b * powers + + return np.sum(b * np.power(x, powers-1), axis=0) + + +def _exp_fcn(B, x): + return B[0] + np.exp(B[1] * x) + + +def _exp_fjd(B, x): + return B[1] * np.exp(B[1] * x) + + +def _exp_fjb(B, x): + res = np.concatenate((np.ones(x.shape[-1], float), x * np.exp(B[1] * x))) + res.shape = (2, x.shape[-1]) + return res + + +def _exp_est(data): + # Eh. + return np.array([1., 1.]) + + +class _MultilinearModel(Model): + r""" + Arbitrary-dimensional linear model + + This model is defined by :math:`y=\beta_0 + \sum_{i=1}^m \beta_i x_i` + + Examples + -------- + We can calculate orthogonal distance regression with an arbitrary + dimensional linear model: + + >>> from scipy import odr + >>> import numpy as np + >>> x = np.linspace(0.0, 5.0) + >>> y = 10.0 + 5.0 * x + >>> data = odr.Data(x, y) + >>> odr_obj = odr.ODR(data, odr.multilinear) + >>> output = odr_obj.run() + >>> print(output.beta) + [10. 5.] + + """ + + def __init__(self): + super().__init__( + _lin_fcn, fjacb=_lin_fjb, fjacd=_lin_fjd, estimate=_lin_est, + meta={'name': 'Arbitrary-dimensional Linear', + 'equ': 'y = B_0 + Sum[i=1..m, B_i * x_i]', + 'TeXequ': r'$y=\beta_0 + \sum_{i=1}^m \beta_i x_i$'}) + + +multilinear = _MultilinearModel() + + +def polynomial(order): + """ + Factory function for a general polynomial model. + + Parameters + ---------- + order : int or sequence + If an integer, it becomes the order of the polynomial to fit. If + a sequence of numbers, then these are the explicit powers in the + polynomial. + A constant term (power 0) is always included, so don't include 0. + Thus, polynomial(n) is equivalent to polynomial(range(1, n+1)). + + Returns + ------- + polynomial : Model instance + Model instance. + + Examples + -------- + We can fit an input data using orthogonal distance regression (ODR) with + a polynomial model: + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy import odr + >>> x = np.linspace(0.0, 5.0) + >>> y = np.sin(x) + >>> poly_model = odr.polynomial(3) # using third order polynomial model + >>> data = odr.Data(x, y) + >>> odr_obj = odr.ODR(data, poly_model) + >>> output = odr_obj.run() # running ODR fitting + >>> poly = np.poly1d(output.beta[::-1]) + >>> poly_y = poly(x) + >>> plt.plot(x, y, label="input data") + >>> plt.plot(x, poly_y, label="polynomial ODR") + >>> plt.legend() + >>> plt.show() + + """ + + powers = np.asarray(order) + if powers.shape == (): + # Scalar. + powers = np.arange(1, powers + 1) + + powers.shape = (len(powers), 1) + len_beta = len(powers) + 1 + + def _poly_est(data, len_beta=len_beta): + # Eh. Ignore data and return all ones. + return np.ones((len_beta,), float) + + return Model(_poly_fcn, fjacd=_poly_fjacd, fjacb=_poly_fjacb, + estimate=_poly_est, extra_args=(powers,), + meta={'name': 'Sorta-general Polynomial', + 'equ': 'y = B_0 + Sum[i=1..%s, B_i * (x**i)]' % (len_beta-1), + 'TeXequ': r'$y=\beta_0 + \sum_{i=1}^{%s} \beta_i x^i$' % + (len_beta-1)}) + + +class _ExponentialModel(Model): + r""" + Exponential model + + This model is defined by :math:`y=\beta_0 + e^{\beta_1 x}` + + Examples + -------- + We can calculate orthogonal distance regression with an exponential model: + + >>> from scipy import odr + >>> import numpy as np + >>> x = np.linspace(0.0, 5.0) + >>> y = -10.0 + np.exp(0.5*x) + >>> data = odr.Data(x, y) + >>> odr_obj = odr.ODR(data, odr.exponential) + >>> output = odr_obj.run() + >>> print(output.beta) + [-10. 0.5] + + """ + + def __init__(self): + super().__init__(_exp_fcn, fjacd=_exp_fjd, fjacb=_exp_fjb, + estimate=_exp_est, + meta={'name': 'Exponential', + 'equ': 'y= B_0 + exp(B_1 * x)', + 'TeXequ': r'$y=\beta_0 + e^{\beta_1 x}$'}) + + +exponential = _ExponentialModel() + + +def _unilin(B, x): + return x*B[0] + B[1] + + +def _unilin_fjd(B, x): + return np.ones(x.shape, float) * B[0] + + +def _unilin_fjb(B, x): + _ret = np.concatenate((x, np.ones(x.shape, float))) + _ret.shape = (2,) + x.shape + + return _ret + + +def _unilin_est(data): + return (1., 1.) + + +def _quadratic(B, x): + return x*(x*B[0] + B[1]) + B[2] + + +def _quad_fjd(B, x): + return 2*x*B[0] + B[1] + + +def _quad_fjb(B, x): + _ret = np.concatenate((x*x, x, np.ones(x.shape, float))) + _ret.shape = (3,) + x.shape + + return _ret + + +def _quad_est(data): + return (1.,1.,1.) + + +class _UnilinearModel(Model): + r""" + Univariate linear model + + This model is defined by :math:`y = \beta_0 x + \beta_1` + + Examples + -------- + We can calculate orthogonal distance regression with an unilinear model: + + >>> from scipy import odr + >>> import numpy as np + >>> x = np.linspace(0.0, 5.0) + >>> y = 1.0 * x + 2.0 + >>> data = odr.Data(x, y) + >>> odr_obj = odr.ODR(data, odr.unilinear) + >>> output = odr_obj.run() + >>> print(output.beta) + [1. 2.] + + """ + + def __init__(self): + super().__init__(_unilin, fjacd=_unilin_fjd, fjacb=_unilin_fjb, + estimate=_unilin_est, + meta={'name': 'Univariate Linear', + 'equ': 'y = B_0 * x + B_1', + 'TeXequ': '$y = \\beta_0 x + \\beta_1$'}) + + +unilinear = _UnilinearModel() + + +class _QuadraticModel(Model): + r""" + Quadratic model + + This model is defined by :math:`y = \beta_0 x^2 + \beta_1 x + \beta_2` + + Examples + -------- + We can calculate orthogonal distance regression with a quadratic model: + + >>> from scipy import odr + >>> import numpy as np + >>> x = np.linspace(0.0, 5.0) + >>> y = 1.0 * x ** 2 + 2.0 * x + 3.0 + >>> data = odr.Data(x, y) + >>> odr_obj = odr.ODR(data, odr.quadratic) + >>> output = odr_obj.run() + >>> print(output.beta) + [1. 2. 3.] + + """ + + def __init__(self): + super().__init__( + _quadratic, fjacd=_quad_fjd, fjacb=_quad_fjb, estimate=_quad_est, + meta={'name': 'Quadratic', + 'equ': 'y = B_0*x**2 + B_1*x + B_2', + 'TeXequ': '$y = \\beta_0 x^2 + \\beta_1 x + \\beta_2'}) + + +quadratic = _QuadraticModel() diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/_odrpack.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/_odrpack.py new file mode 100644 index 0000000000000000000000000000000000000000..609c2c77835befa7e2edbc35357a8eef05c2d55c --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/_odrpack.py @@ -0,0 +1,1151 @@ +""" +Python wrappers for Orthogonal Distance Regression (ODRPACK). + +Notes +===== + +* Array formats -- FORTRAN stores its arrays in memory column first, i.e., an + array element A(i, j, k) will be next to A(i+1, j, k). In C and, consequently, + NumPy, arrays are stored row first: A[i, j, k] is next to A[i, j, k+1]. For + efficiency and convenience, the input and output arrays of the fitting + function (and its Jacobians) are passed to FORTRAN without transposition. + Therefore, where the ODRPACK documentation says that the X array is of shape + (N, M), it will be passed to the Python function as an array of shape (M, N). + If M==1, the 1-D case, then nothing matters; if M>1, then your + Python functions will be dealing with arrays that are indexed in reverse of + the ODRPACK documentation. No real issue, but watch out for your indexing of + the Jacobians: the i,jth elements (@f_i/@x_j) evaluated at the nth + observation will be returned as jacd[j, i, n]. Except for the Jacobians, it + really is easier to deal with x[0] and x[1] than x[:,0] and x[:,1]. Of course, + you can always use the transpose() function from SciPy explicitly. + +* Examples -- See the accompanying file test/test.py for examples of how to set + up fits of your own. Some are taken from the User's Guide; some are from + other sources. + +* Models -- Some common models are instantiated in the accompanying module + models.py . Contributions are welcome. + +Credits +======= + +* Thanks to Arnold Moene and Gerard Vermeulen for fixing some killer bugs. + +Robert Kern +robert.kern@gmail.com + +""" +import os + +import numpy as np +from warnings import warn +from scipy.odr import __odrpack + +__all__ = ['odr', 'OdrWarning', 'OdrError', 'OdrStop', + 'Data', 'RealData', 'Model', 'Output', 'ODR', + 'odr_error', 'odr_stop'] + +odr = __odrpack.odr + + +class OdrWarning(UserWarning): + """ + Warning indicating that the data passed into + ODR will cause problems when passed into 'odr' + that the user should be aware of. + """ + pass + + +class OdrError(Exception): + """ + Exception indicating an error in fitting. + + This is raised by `~scipy.odr.odr` if an error occurs during fitting. + """ + pass + + +class OdrStop(Exception): + """ + Exception stopping fitting. + + You can raise this exception in your objective function to tell + `~scipy.odr.odr` to stop fitting. + """ + pass + + +# Backwards compatibility +odr_error = OdrError +odr_stop = OdrStop + +__odrpack._set_exceptions(OdrError, OdrStop) + + +def _conv(obj, dtype=None): + """ Convert an object to the preferred form for input to the odr routine. + """ + + if obj is None: + return obj + else: + if dtype is None: + obj = np.asarray(obj) + else: + obj = np.asarray(obj, dtype) + if obj.shape == (): + # Scalar. + return obj.dtype.type(obj) + else: + return obj + + +def _report_error(info): + """ Interprets the return code of the odr routine. + + Parameters + ---------- + info : int + The return code of the odr routine. + + Returns + ------- + problems : list(str) + A list of messages about why the odr() routine stopped. + """ + + stopreason = ('Blank', + 'Sum of squares convergence', + 'Parameter convergence', + 'Both sum of squares and parameter convergence', + 'Iteration limit reached')[info % 5] + + if info >= 5: + # questionable results or fatal error + + I = (info//10000 % 10, + info//1000 % 10, + info//100 % 10, + info//10 % 10, + info % 10) + problems = [] + + if I[0] == 0: + if I[1] != 0: + problems.append('Derivatives possibly not correct') + if I[2] != 0: + problems.append('Error occurred in callback') + if I[3] != 0: + problems.append('Problem is not full rank at solution') + problems.append(stopreason) + elif I[0] == 1: + if I[1] != 0: + problems.append('N < 1') + if I[2] != 0: + problems.append('M < 1') + if I[3] != 0: + problems.append('NP < 1 or NP > N') + if I[4] != 0: + problems.append('NQ < 1') + elif I[0] == 2: + if I[1] != 0: + problems.append('LDY and/or LDX incorrect') + if I[2] != 0: + problems.append('LDWE, LD2WE, LDWD, and/or LD2WD incorrect') + if I[3] != 0: + problems.append('LDIFX, LDSTPD, and/or LDSCLD incorrect') + if I[4] != 0: + problems.append('LWORK and/or LIWORK too small') + elif I[0] == 3: + if I[1] != 0: + problems.append('STPB and/or STPD incorrect') + if I[2] != 0: + problems.append('SCLB and/or SCLD incorrect') + if I[3] != 0: + problems.append('WE incorrect') + if I[4] != 0: + problems.append('WD incorrect') + elif I[0] == 4: + problems.append('Error in derivatives') + elif I[0] == 5: + problems.append('Error occurred in callback') + elif I[0] == 6: + problems.append('Numerical error detected') + + return problems + + else: + return [stopreason] + + +class Data: + """ + The data to fit. + + Parameters + ---------- + x : array_like + Observed data for the independent variable of the regression + y : array_like, optional + If array-like, observed data for the dependent variable of the + regression. A scalar input implies that the model to be used on + the data is implicit. + we : array_like, optional + If `we` is a scalar, then that value is used for all data points (and + all dimensions of the response variable). + If `we` is a rank-1 array of length q (the dimensionality of the + response variable), then this vector is the diagonal of the covariant + weighting matrix for all data points. + If `we` is a rank-1 array of length n (the number of data points), then + the i'th element is the weight for the i'th response variable + observation (single-dimensional only). + If `we` is a rank-2 array of shape (q, q), then this is the full + covariant weighting matrix broadcast to each observation. + If `we` is a rank-2 array of shape (q, n), then `we[:,i]` is the + diagonal of the covariant weighting matrix for the i'th observation. + If `we` is a rank-3 array of shape (q, q, n), then `we[:,:,i]` is the + full specification of the covariant weighting matrix for each + observation. + If the fit is implicit, then only a positive scalar value is used. + wd : array_like, optional + If `wd` is a scalar, then that value is used for all data points + (and all dimensions of the input variable). If `wd` = 0, then the + covariant weighting matrix for each observation is set to the identity + matrix (so each dimension of each observation has the same weight). + If `wd` is a rank-1 array of length m (the dimensionality of the input + variable), then this vector is the diagonal of the covariant weighting + matrix for all data points. + If `wd` is a rank-1 array of length n (the number of data points), then + the i'th element is the weight for the ith input variable observation + (single-dimensional only). + If `wd` is a rank-2 array of shape (m, m), then this is the full + covariant weighting matrix broadcast to each observation. + If `wd` is a rank-2 array of shape (m, n), then `wd[:,i]` is the + diagonal of the covariant weighting matrix for the ith observation. + If `wd` is a rank-3 array of shape (m, m, n), then `wd[:,:,i]` is the + full specification of the covariant weighting matrix for each + observation. + fix : array_like of ints, optional + The `fix` argument is the same as ifixx in the class ODR. It is an + array of integers with the same shape as data.x that determines which + input observations are treated as fixed. One can use a sequence of + length m (the dimensionality of the input observations) to fix some + dimensions for all observations. A value of 0 fixes the observation, + a value > 0 makes it free. + meta : dict, optional + Free-form dictionary for metadata. + + Notes + ----- + Each argument is attached to the member of the instance of the same name. + The structures of `x` and `y` are described in the Model class docstring. + If `y` is an integer, then the Data instance can only be used to fit with + implicit models where the dimensionality of the response is equal to the + specified value of `y`. + + The `we` argument weights the effect a deviation in the response variable + has on the fit. The `wd` argument weights the effect a deviation in the + input variable has on the fit. To handle multidimensional inputs and + responses easily, the structure of these arguments has the n'th + dimensional axis first. These arguments heavily use the structured + arguments feature of ODRPACK to conveniently and flexibly support all + options. See the ODRPACK User's Guide for a full explanation of how these + weights are used in the algorithm. Basically, a higher value of the weight + for a particular data point makes a deviation at that point more + detrimental to the fit. + + """ + + def __init__(self, x, y=None, we=None, wd=None, fix=None, meta=None): + self.x = _conv(x) + + if not isinstance(self.x, np.ndarray): + raise ValueError("Expected an 'ndarray' of data for 'x', " + f"but instead got data of type '{type(self.x).__name__}'") + + self.y = _conv(y) + self.we = _conv(we) + self.wd = _conv(wd) + self.fix = _conv(fix) + self.meta = {} if meta is None else meta + + def set_meta(self, **kwds): + """ Update the metadata dictionary with the keywords and data provided + by keywords. + + Examples + -------- + :: + + data.set_meta(lab="Ph 7; Lab 26", title="Ag110 + Ag108 Decay") + """ + + self.meta.update(kwds) + + def __getattr__(self, attr): + """ Dispatch attribute access to the metadata dictionary. + """ + if attr != "meta" and attr in self.meta: + return self.meta[attr] + else: + raise AttributeError("'%s' not in metadata" % attr) + + +class RealData(Data): + """ + The data, with weightings as actual standard deviations and/or + covariances. + + Parameters + ---------- + x : array_like + Observed data for the independent variable of the regression + y : array_like, optional + If array-like, observed data for the dependent variable of the + regression. A scalar input implies that the model to be used on + the data is implicit. + sx : array_like, optional + Standard deviations of `x`. + `sx` are standard deviations of `x` and are converted to weights by + dividing 1.0 by their squares. + sy : array_like, optional + Standard deviations of `y`. + `sy` are standard deviations of `y` and are converted to weights by + dividing 1.0 by their squares. + covx : array_like, optional + Covariance of `x` + `covx` is an array of covariance matrices of `x` and are converted to + weights by performing a matrix inversion on each observation's + covariance matrix. + covy : array_like, optional + Covariance of `y` + `covy` is an array of covariance matrices and are converted to + weights by performing a matrix inversion on each observation's + covariance matrix. + fix : array_like, optional + The argument and member fix is the same as Data.fix and ODR.ifixx: + It is an array of integers with the same shape as `x` that + determines which input observations are treated as fixed. One can + use a sequence of length m (the dimensionality of the input + observations) to fix some dimensions for all observations. A value + of 0 fixes the observation, a value > 0 makes it free. + meta : dict, optional + Free-form dictionary for metadata. + + Notes + ----- + The weights `wd` and `we` are computed from provided values as follows: + + `sx` and `sy` are converted to weights by dividing 1.0 by their squares. + For example, ``wd = 1./np.power(`sx`, 2)``. + + `covx` and `covy` are arrays of covariance matrices and are converted to + weights by performing a matrix inversion on each observation's covariance + matrix. For example, ``we[i] = np.linalg.inv(covy[i])``. + + These arguments follow the same structured argument conventions as wd and + we only restricted by their natures: `sx` and `sy` can't be rank-3, but + `covx` and `covy` can be. + + Only set *either* `sx` or `covx` (not both). Setting both will raise an + exception. Same with `sy` and `covy`. + + """ + + def __init__(self, x, y=None, sx=None, sy=None, covx=None, covy=None, + fix=None, meta=None): + if (sx is not None) and (covx is not None): + raise ValueError("cannot set both sx and covx") + if (sy is not None) and (covy is not None): + raise ValueError("cannot set both sy and covy") + + # Set flags for __getattr__ + self._ga_flags = {} + if sx is not None: + self._ga_flags['wd'] = 'sx' + else: + self._ga_flags['wd'] = 'covx' + if sy is not None: + self._ga_flags['we'] = 'sy' + else: + self._ga_flags['we'] = 'covy' + + self.x = _conv(x) + + if not isinstance(self.x, np.ndarray): + raise ValueError("Expected an 'ndarray' of data for 'x', " + f"but instead got data of type '{type(self.x).__name__}'") + + self.y = _conv(y) + self.sx = _conv(sx) + self.sy = _conv(sy) + self.covx = _conv(covx) + self.covy = _conv(covy) + self.fix = _conv(fix) + self.meta = {} if meta is None else meta + + def _sd2wt(self, sd): + """ Convert standard deviation to weights. + """ + + return 1./np.power(sd, 2) + + def _cov2wt(self, cov): + """ Convert covariance matrix(-ices) to weights. + """ + + from scipy.linalg import inv + + if len(cov.shape) == 2: + return inv(cov) + else: + weights = np.zeros(cov.shape, float) + + for i in range(cov.shape[-1]): # n + weights[:,:,i] = inv(cov[:,:,i]) + + return weights + + def __getattr__(self, attr): + + if attr not in ('wd', 'we'): + if attr != "meta" and attr in self.meta: + return self.meta[attr] + else: + raise AttributeError("'%s' not in metadata" % attr) + else: + lookup_tbl = {('wd', 'sx'): (self._sd2wt, self.sx), + ('wd', 'covx'): (self._cov2wt, self.covx), + ('we', 'sy'): (self._sd2wt, self.sy), + ('we', 'covy'): (self._cov2wt, self.covy)} + + func, arg = lookup_tbl[(attr, self._ga_flags[attr])] + + if arg is not None: + return func(*(arg,)) + else: + return None + + +class Model: + """ + The Model class stores information about the function you wish to fit. + + It stores the function itself, at the least, and optionally stores + functions which compute the Jacobians used during fitting. Also, one + can provide a function that will provide reasonable starting values + for the fit parameters possibly given the set of data. + + Parameters + ---------- + fcn : function + fcn(beta, x) --> y + fjacb : function + Jacobian of fcn wrt the fit parameters beta. + + fjacb(beta, x) --> @f_i(x,B)/@B_j + fjacd : function + Jacobian of fcn wrt the (possibly multidimensional) input + variable. + + fjacd(beta, x) --> @f_i(x,B)/@x_j + extra_args : tuple, optional + If specified, `extra_args` should be a tuple of extra + arguments to pass to `fcn`, `fjacb`, and `fjacd`. Each will be called + by `apply(fcn, (beta, x) + extra_args)` + estimate : array_like of rank-1 + Provides estimates of the fit parameters from the data + + estimate(data) --> estbeta + implicit : boolean + If TRUE, specifies that the model + is implicit; i.e `fcn(beta, x)` ~= 0 and there is no y data to fit + against + meta : dict, optional + freeform dictionary of metadata for the model + + Notes + ----- + Note that the `fcn`, `fjacb`, and `fjacd` operate on NumPy arrays and + return a NumPy array. The `estimate` object takes an instance of the + Data class. + + Here are the rules for the shapes of the argument and return + arrays of the callback functions: + + `x` + if the input data is single-dimensional, then `x` is rank-1 + array; i.e., ``x = array([1, 2, 3, ...]); x.shape = (n,)`` + If the input data is multi-dimensional, then `x` is a rank-2 array; + i.e., ``x = array([[1, 2, ...], [2, 4, ...]]); x.shape = (m, n)``. + In all cases, it has the same shape as the input data array passed to + `~scipy.odr.odr`. `m` is the dimensionality of the input data, + `n` is the number of observations. + `y` + if the response variable is single-dimensional, then `y` is a + rank-1 array, i.e., ``y = array([2, 4, ...]); y.shape = (n,)``. + If the response variable is multi-dimensional, then `y` is a rank-2 + array, i.e., ``y = array([[2, 4, ...], [3, 6, ...]]); y.shape = + (q, n)`` where `q` is the dimensionality of the response variable. + `beta` + rank-1 array of length `p` where `p` is the number of parameters; + i.e. ``beta = array([B_1, B_2, ..., B_p])`` + `fjacb` + if the response variable is multi-dimensional, then the + return array's shape is `(q, p, n)` such that ``fjacb(x,beta)[l,k,i] = + d f_l(X,B)/d B_k`` evaluated at the ith data point. If `q == 1`, then + the return array is only rank-2 and with shape `(p, n)`. + `fjacd` + as with fjacb, only the return array's shape is `(q, m, n)` + such that ``fjacd(x,beta)[l,j,i] = d f_l(X,B)/d X_j`` at the ith data + point. If `q == 1`, then the return array's shape is `(m, n)`. If + `m == 1`, the shape is (q, n). If `m == q == 1`, the shape is `(n,)`. + + """ + + def __init__(self, fcn, fjacb=None, fjacd=None, + extra_args=None, estimate=None, implicit=0, meta=None): + + self.fcn = fcn + self.fjacb = fjacb + self.fjacd = fjacd + + if extra_args is not None: + extra_args = tuple(extra_args) + + self.extra_args = extra_args + self.estimate = estimate + self.implicit = implicit + self.meta = meta if meta is not None else {} + + def set_meta(self, **kwds): + """ Update the metadata dictionary with the keywords and data provided + here. + + Examples + -------- + set_meta(name="Exponential", equation="y = a exp(b x) + c") + """ + + self.meta.update(kwds) + + def __getattr__(self, attr): + """ Dispatch attribute access to the metadata. + """ + + if attr != "meta" and attr in self.meta: + return self.meta[attr] + else: + raise AttributeError("'%s' not in metadata" % attr) + + +class Output: + """ + The Output class stores the output of an ODR run. + + Attributes + ---------- + beta : ndarray + Estimated parameter values, of shape (q,). + sd_beta : ndarray + Standard deviations of the estimated parameters, of shape (p,). + cov_beta : ndarray + Covariance matrix of the estimated parameters, of shape (p,p). + Note that this `cov_beta` is not scaled by the residual variance + `res_var`, whereas `sd_beta` is. This means + ``np.sqrt(np.diag(output.cov_beta * output.res_var))`` is the same + result as `output.sd_beta`. + delta : ndarray, optional + Array of estimated errors in input variables, of same shape as `x`. + eps : ndarray, optional + Array of estimated errors in response variables, of same shape as `y`. + xplus : ndarray, optional + Array of ``x + delta``. + y : ndarray, optional + Array ``y = fcn(x + delta)``. + res_var : float, optional + Residual variance. + sum_square : float, optional + Sum of squares error. + sum_square_delta : float, optional + Sum of squares of delta error. + sum_square_eps : float, optional + Sum of squares of eps error. + inv_condnum : float, optional + Inverse condition number (cf. ODRPACK UG p. 77). + rel_error : float, optional + Relative error in function values computed within fcn. + work : ndarray, optional + Final work array. + work_ind : dict, optional + Indices into work for drawing out values (cf. ODRPACK UG p. 83). + info : int, optional + Reason for returning, as output by ODRPACK (cf. ODRPACK UG p. 38). + stopreason : list of str, optional + `info` interpreted into English. + + Notes + ----- + Takes one argument for initialization, the return value from the + function `~scipy.odr.odr`. The attributes listed as "optional" above are + only present if `~scipy.odr.odr` was run with ``full_output=1``. + + """ + + def __init__(self, output): + self.beta = output[0] + self.sd_beta = output[1] + self.cov_beta = output[2] + + if len(output) == 4: + # full output + self.__dict__.update(output[3]) + self.stopreason = _report_error(self.info) + + def pprint(self): + """ Pretty-print important results. + """ + + print('Beta:', self.beta) + print('Beta Std Error:', self.sd_beta) + print('Beta Covariance:', self.cov_beta) + if hasattr(self, 'info'): + print('Residual Variance:',self.res_var) + print('Inverse Condition #:', self.inv_condnum) + print('Reason(s) for Halting:') + for r in self.stopreason: + print(' %s' % r) + + +class ODR: + """ + The ODR class gathers all information and coordinates the running of the + main fitting routine. + + Members of instances of the ODR class have the same names as the arguments + to the initialization routine. + + Parameters + ---------- + data : Data class instance + instance of the Data class + model : Model class instance + instance of the Model class + + Other Parameters + ---------------- + beta0 : array_like of rank-1 + a rank-1 sequence of initial parameter values. Optional if + model provides an "estimate" function to estimate these values. + delta0 : array_like of floats of rank-1, optional + a (double-precision) float array to hold the initial values of + the errors in the input variables. Must be same shape as data.x + ifixb : array_like of ints of rank-1, optional + sequence of integers with the same length as beta0 that determines + which parameters are held fixed. A value of 0 fixes the parameter, + a value > 0 makes the parameter free. + ifixx : array_like of ints with same shape as data.x, optional + an array of integers with the same shape as data.x that determines + which input observations are treated as fixed. One can use a sequence + of length m (the dimensionality of the input observations) to fix some + dimensions for all observations. A value of 0 fixes the observation, + a value > 0 makes it free. + job : int, optional + an integer telling ODRPACK what tasks to perform. See p. 31 of the + ODRPACK User's Guide if you absolutely must set the value here. Use the + method set_job post-initialization for a more readable interface. + iprint : int, optional + an integer telling ODRPACK what to print. See pp. 33-34 of the + ODRPACK User's Guide if you absolutely must set the value here. Use the + method set_iprint post-initialization for a more readable interface. + errfile : str, optional + string with the filename to print ODRPACK errors to. If the file already + exists, an error will be thrown. The `overwrite` argument can be used to + prevent this. *Do Not Open This File Yourself!* + rptfile : str, optional + string with the filename to print ODRPACK summaries to. If the file + already exists, an error will be thrown. The `overwrite` argument can be + used to prevent this. *Do Not Open This File Yourself!* + ndigit : int, optional + integer specifying the number of reliable digits in the computation + of the function. + taufac : float, optional + float specifying the initial trust region. The default value is 1. + The initial trust region is equal to taufac times the length of the + first computed Gauss-Newton step. taufac must be less than 1. + sstol : float, optional + float specifying the tolerance for convergence based on the relative + change in the sum-of-squares. The default value is eps**(1/2) where eps + is the smallest value such that 1 + eps > 1 for double precision + computation on the machine. sstol must be less than 1. + partol : float, optional + float specifying the tolerance for convergence based on the relative + change in the estimated parameters. The default value is eps**(2/3) for + explicit models and ``eps**(1/3)`` for implicit models. partol must be less + than 1. + maxit : int, optional + integer specifying the maximum number of iterations to perform. For + first runs, maxit is the total number of iterations performed and + defaults to 50. For restarts, maxit is the number of additional + iterations to perform and defaults to 10. + stpb : array_like, optional + sequence (``len(stpb) == len(beta0)``) of relative step sizes to compute + finite difference derivatives wrt the parameters. + stpd : optional + array (``stpd.shape == data.x.shape`` or ``stpd.shape == (m,)``) of relative + step sizes to compute finite difference derivatives wrt the input + variable errors. If stpd is a rank-1 array with length m (the + dimensionality of the input variable), then the values are broadcast to + all observations. + sclb : array_like, optional + sequence (``len(stpb) == len(beta0)``) of scaling factors for the + parameters. The purpose of these scaling factors are to scale all of + the parameters to around unity. Normally appropriate scaling factors + are computed if this argument is not specified. Specify them yourself + if the automatic procedure goes awry. + scld : array_like, optional + array (scld.shape == data.x.shape or scld.shape == (m,)) of scaling + factors for the *errors* in the input variables. Again, these factors + are automatically computed if you do not provide them. If scld.shape == + (m,), then the scaling factors are broadcast to all observations. + work : ndarray, optional + array to hold the double-valued working data for ODRPACK. When + restarting, takes the value of self.output.work. + iwork : ndarray, optional + array to hold the integer-valued working data for ODRPACK. When + restarting, takes the value of self.output.iwork. + overwrite : bool, optional + If it is True, output files defined by `errfile` and `rptfile` are + overwritten. The default is False. + + Attributes + ---------- + data : Data + The data for this fit + model : Model + The model used in fit + output : Output + An instance if the Output class containing all of the returned + data from an invocation of ODR.run() or ODR.restart() + + """ + + def __init__(self, data, model, beta0=None, delta0=None, ifixb=None, + ifixx=None, job=None, iprint=None, errfile=None, rptfile=None, + ndigit=None, taufac=None, sstol=None, partol=None, maxit=None, + stpb=None, stpd=None, sclb=None, scld=None, work=None, iwork=None, + overwrite=False): + + self.data = data + self.model = model + + if beta0 is None: + if self.model.estimate is not None: + self.beta0 = _conv(self.model.estimate(self.data)) + else: + raise ValueError( + "must specify beta0 or provide an estimator with the model" + ) + else: + self.beta0 = _conv(beta0) + + if ifixx is None and data.fix is not None: + ifixx = data.fix + + if overwrite: + # remove output files for overwriting. + if rptfile is not None and os.path.exists(rptfile): + os.remove(rptfile) + if errfile is not None and os.path.exists(errfile): + os.remove(errfile) + + self.delta0 = _conv(delta0) + # These really are 32-bit integers in FORTRAN (gfortran), even on 64-bit + # platforms. + # XXX: some other FORTRAN compilers may not agree. + self.ifixx = _conv(ifixx, dtype=np.int32) + self.ifixb = _conv(ifixb, dtype=np.int32) + self.job = job + self.iprint = iprint + self.errfile = errfile + self.rptfile = rptfile + self.ndigit = ndigit + self.taufac = taufac + self.sstol = sstol + self.partol = partol + self.maxit = maxit + self.stpb = _conv(stpb) + self.stpd = _conv(stpd) + self.sclb = _conv(sclb) + self.scld = _conv(scld) + self.work = _conv(work) + self.iwork = _conv(iwork) + + self.output = None + + self._check() + + def _check(self): + """ Check the inputs for consistency, but don't bother checking things + that the builtin function odr will check. + """ + + x_s = list(self.data.x.shape) + + if isinstance(self.data.y, np.ndarray): + y_s = list(self.data.y.shape) + if self.model.implicit: + raise OdrError("an implicit model cannot use response data") + else: + # implicit model with q == self.data.y + y_s = [self.data.y, x_s[-1]] + if not self.model.implicit: + raise OdrError("an explicit model needs response data") + self.set_job(fit_type=1) + + if x_s[-1] != y_s[-1]: + raise OdrError("number of observations do not match") + + n = x_s[-1] + + if len(x_s) == 2: + m = x_s[0] + else: + m = 1 + if len(y_s) == 2: + q = y_s[0] + else: + q = 1 + + p = len(self.beta0) + + # permissible output array shapes + + fcn_perms = [(q, n)] + fjacd_perms = [(q, m, n)] + fjacb_perms = [(q, p, n)] + + if q == 1: + fcn_perms.append((n,)) + fjacd_perms.append((m, n)) + fjacb_perms.append((p, n)) + if m == 1: + fjacd_perms.append((q, n)) + if p == 1: + fjacb_perms.append((q, n)) + if m == q == 1: + fjacd_perms.append((n,)) + if p == q == 1: + fjacb_perms.append((n,)) + + # try evaluating the supplied functions to make sure they provide + # sensible outputs + + arglist = (self.beta0, self.data.x) + if self.model.extra_args is not None: + arglist = arglist + self.model.extra_args + res = self.model.fcn(*arglist) + + if res.shape not in fcn_perms: + print(res.shape) + print(fcn_perms) + raise OdrError("fcn does not output %s-shaped array" % y_s) + + if self.model.fjacd is not None: + res = self.model.fjacd(*arglist) + if res.shape not in fjacd_perms: + raise OdrError( + "fjacd does not output %s-shaped array" % repr((q, m, n))) + if self.model.fjacb is not None: + res = self.model.fjacb(*arglist) + if res.shape not in fjacb_perms: + raise OdrError( + "fjacb does not output %s-shaped array" % repr((q, p, n))) + + # check shape of delta0 + + if self.delta0 is not None and self.delta0.shape != self.data.x.shape: + raise OdrError( + "delta0 is not a %s-shaped array" % repr(self.data.x.shape)) + + if self.data.x.size == 0: + warn("Empty data detected for ODR instance. " + "Do not expect any fitting to occur", + OdrWarning, stacklevel=3) + + def _gen_work(self): + """ Generate a suitable work array if one does not already exist. + """ + + n = self.data.x.shape[-1] + p = self.beta0.shape[0] + + if len(self.data.x.shape) == 2: + m = self.data.x.shape[0] + else: + m = 1 + + if self.model.implicit: + q = self.data.y + elif len(self.data.y.shape) == 2: + q = self.data.y.shape[0] + else: + q = 1 + + if self.data.we is None: + ldwe = ld2we = 1 + elif len(self.data.we.shape) == 3: + ld2we, ldwe = self.data.we.shape[1:] + else: + we = self.data.we + ldwe = 1 + ld2we = 1 + if we.ndim == 1 and q == 1: + ldwe = n + elif we.ndim == 2: + if we.shape == (q, q): + ld2we = q + elif we.shape == (q, n): + ldwe = n + + if self.job % 10 < 2: + # ODR not OLS + lwork = (18 + 11*p + p*p + m + m*m + 4*n*q + 6*n*m + 2*n*q*p + + 2*n*q*m + q*q + 5*q + q*(p+m) + ldwe*ld2we*q) + else: + # OLS not ODR + lwork = (18 + 11*p + p*p + m + m*m + 4*n*q + 2*n*m + 2*n*q*p + + 5*q + q*(p+m) + ldwe*ld2we*q) + + if isinstance(self.work, np.ndarray) and self.work.shape == (lwork,)\ + and self.work.dtype.str.endswith('f8'): + # the existing array is fine + return + else: + self.work = np.zeros((lwork,), float) + + def set_job(self, fit_type=None, deriv=None, var_calc=None, + del_init=None, restart=None): + """ + Sets the "job" parameter is a hopefully comprehensible way. + + If an argument is not specified, then the value is left as is. The + default value from class initialization is for all of these options set + to 0. + + Parameters + ---------- + fit_type : {0, 1, 2} int + 0 -> explicit ODR + + 1 -> implicit ODR + + 2 -> ordinary least-squares + deriv : {0, 1, 2, 3} int + 0 -> forward finite differences + + 1 -> central finite differences + + 2 -> user-supplied derivatives (Jacobians) with results + checked by ODRPACK + + 3 -> user-supplied derivatives, no checking + var_calc : {0, 1, 2} int + 0 -> calculate asymptotic covariance matrix and fit + parameter uncertainties (V_B, s_B) using derivatives + recomputed at the final solution + + 1 -> calculate V_B and s_B using derivatives from last iteration + + 2 -> do not calculate V_B and s_B + del_init : {0, 1} int + 0 -> initial input variable offsets set to 0 + + 1 -> initial offsets provided by user in variable "work" + restart : {0, 1} int + 0 -> fit is not a restart + + 1 -> fit is a restart + + Notes + ----- + The permissible values are different from those given on pg. 31 of the + ODRPACK User's Guide only in that one cannot specify numbers greater than + the last value for each variable. + + If one does not supply functions to compute the Jacobians, the fitting + procedure will change deriv to 0, finite differences, as a default. To + initialize the input variable offsets by yourself, set del_init to 1 and + put the offsets into the "work" variable correctly. + + """ + + if self.job is None: + job_l = [0, 0, 0, 0, 0] + else: + job_l = [self.job // 10000 % 10, + self.job // 1000 % 10, + self.job // 100 % 10, + self.job // 10 % 10, + self.job % 10] + + if fit_type in (0, 1, 2): + job_l[4] = fit_type + if deriv in (0, 1, 2, 3): + job_l[3] = deriv + if var_calc in (0, 1, 2): + job_l[2] = var_calc + if del_init in (0, 1): + job_l[1] = del_init + if restart in (0, 1): + job_l[0] = restart + + self.job = (job_l[0]*10000 + job_l[1]*1000 + + job_l[2]*100 + job_l[3]*10 + job_l[4]) + + def set_iprint(self, init=None, so_init=None, + iter=None, so_iter=None, iter_step=None, final=None, so_final=None): + """ Set the iprint parameter for the printing of computation reports. + + If any of the arguments are specified here, then they are set in the + iprint member. If iprint is not set manually or with this method, then + ODRPACK defaults to no printing. If no filename is specified with the + member rptfile, then ODRPACK prints to stdout. One can tell ODRPACK to + print to stdout in addition to the specified filename by setting the + so_* arguments to this function, but one cannot specify to print to + stdout but not a file since one can do that by not specifying a rptfile + filename. + + There are three reports: initialization, iteration, and final reports. + They are represented by the arguments init, iter, and final + respectively. The permissible values are 0, 1, and 2 representing "no + report", "short report", and "long report" respectively. + + The argument iter_step (0 <= iter_step <= 9) specifies how often to make + the iteration report; the report will be made for every iter_step'th + iteration starting with iteration one. If iter_step == 0, then no + iteration report is made, regardless of the other arguments. + + If the rptfile is None, then any so_* arguments supplied will raise an + exception. + """ + if self.iprint is None: + self.iprint = 0 + + ip = [self.iprint // 1000 % 10, + self.iprint // 100 % 10, + self.iprint // 10 % 10, + self.iprint % 10] + + # make a list to convert iprint digits to/from argument inputs + # rptfile, stdout + ip2arg = [[0, 0], # none, none + [1, 0], # short, none + [2, 0], # long, none + [1, 1], # short, short + [2, 1], # long, short + [1, 2], # short, long + [2, 2]] # long, long + + if (self.rptfile is None and + (so_init is not None or + so_iter is not None or + so_final is not None)): + raise OdrError( + "no rptfile specified, cannot output to stdout twice") + + iprint_l = ip2arg[ip[0]] + ip2arg[ip[1]] + ip2arg[ip[3]] + + if init is not None: + iprint_l[0] = init + if so_init is not None: + iprint_l[1] = so_init + if iter is not None: + iprint_l[2] = iter + if so_iter is not None: + iprint_l[3] = so_iter + if final is not None: + iprint_l[4] = final + if so_final is not None: + iprint_l[5] = so_final + + if iter_step in range(10): + # 0..9 + ip[2] = iter_step + + ip[0] = ip2arg.index(iprint_l[0:2]) + ip[1] = ip2arg.index(iprint_l[2:4]) + ip[3] = ip2arg.index(iprint_l[4:6]) + + self.iprint = ip[0]*1000 + ip[1]*100 + ip[2]*10 + ip[3] + + def run(self): + """ Run the fitting routine with all of the information given and with ``full_output=1``. + + Returns + ------- + output : Output instance + This object is also assigned to the attribute .output . + """ # noqa: E501 + + args = (self.model.fcn, self.beta0, self.data.y, self.data.x) + kwds = {'full_output': 1} + kwd_l = ['ifixx', 'ifixb', 'job', 'iprint', 'errfile', 'rptfile', + 'ndigit', 'taufac', 'sstol', 'partol', 'maxit', 'stpb', + 'stpd', 'sclb', 'scld', 'work', 'iwork'] + + if self.delta0 is not None and (self.job // 10000) % 10 == 0: + # delta0 provided and fit is not a restart + self._gen_work() + + d0 = np.ravel(self.delta0) + + self.work[:len(d0)] = d0 + + # set the kwds from other objects explicitly + if self.model.fjacb is not None: + kwds['fjacb'] = self.model.fjacb + if self.model.fjacd is not None: + kwds['fjacd'] = self.model.fjacd + if self.data.we is not None: + kwds['we'] = self.data.we + if self.data.wd is not None: + kwds['wd'] = self.data.wd + if self.model.extra_args is not None: + kwds['extra_args'] = self.model.extra_args + + # implicitly set kwds from self's members + for attr in kwd_l: + obj = getattr(self, attr) + if obj is not None: + kwds[attr] = obj + + self.output = Output(odr(*args, **kwds)) + + return self.output + + def restart(self, iter=None): + """ Restarts the run with iter more iterations. + + Parameters + ---------- + iter : int, optional + ODRPACK's default for the number of new iterations is 10. + + Returns + ------- + output : Output instance + This object is also assigned to the attribute .output . + """ + + if self.output is None: + raise OdrError("cannot restart: run() has not been called before") + + self.set_job(restart=1) + self.work = self.output.work + self.iwork = self.output.iwork + + self.maxit = iter + + return self.run() diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/models.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/models.py new file mode 100644 index 0000000000000000000000000000000000000000..0289b59747bb68a4954e58732ac69d7df144f5f6 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/models.py @@ -0,0 +1,20 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.odr` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'Model', 'exponential', 'multilinear', 'unilinear', + 'quadratic', 'polynomial' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="odr", module="models", + private_modules=["_models"], all=__all__, + attribute=name) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/odrpack.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/odrpack.py new file mode 100644 index 0000000000000000000000000000000000000000..192fb3342b7957703996957c882d44656706e41b --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/odrpack.py @@ -0,0 +1,21 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.odr` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'odr', 'OdrWarning', 'OdrError', 'OdrStop', + 'Data', 'RealData', 'Model', 'Output', 'ODR', + 'odr_error', 'odr_stop' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="odr", module="odrpack", + private_modules=["_odrpack"], all=__all__, + attribute=name) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/tests/__init__.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/tests/__pycache__/__init__.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4200dcd12fa459ccb6c7e5656b472a9fc145027c Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/tests/__pycache__/test_odr.cpython-310.pyc b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/tests/__pycache__/test_odr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d14ca3c88d17e703a34b8b5e5d497f978a74765b Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/tests/__pycache__/test_odr.cpython-310.pyc differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/tests/test_odr.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/tests/test_odr.py new file mode 100644 index 0000000000000000000000000000000000000000..d3aa91595b7100d56bf0d9716017b0ef01a52aa5 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/odr/tests/test_odr.py @@ -0,0 +1,606 @@ +import pickle +import tempfile +import shutil +import os + +import numpy as np +from numpy import pi +from numpy.testing import (assert_array_almost_equal, + assert_equal, assert_warns, + assert_allclose) +import pytest +from pytest import raises as assert_raises + +from scipy.odr import (Data, Model, ODR, RealData, OdrStop, OdrWarning, + multilinear, exponential, unilinear, quadratic, + polynomial) + + +class TestODR: + + # Bad Data for 'x' + + def test_bad_data(self): + assert_raises(ValueError, Data, 2, 1) + assert_raises(ValueError, RealData, 2, 1) + + # Empty Data for 'x' + def empty_data_func(self, B, x): + return B[0]*x + B[1] + + def test_empty_data(self): + beta0 = [0.02, 0.0] + linear = Model(self.empty_data_func) + + empty_dat = Data([], []) + assert_warns(OdrWarning, ODR, + empty_dat, linear, beta0=beta0) + + empty_dat = RealData([], []) + assert_warns(OdrWarning, ODR, + empty_dat, linear, beta0=beta0) + + # Explicit Example + + def explicit_fcn(self, B, x): + ret = B[0] + B[1] * np.power(np.exp(B[2]*x) - 1.0, 2) + return ret + + def explicit_fjd(self, B, x): + eBx = np.exp(B[2]*x) + ret = B[1] * 2.0 * (eBx-1.0) * B[2] * eBx + return ret + + def explicit_fjb(self, B, x): + eBx = np.exp(B[2]*x) + res = np.vstack([np.ones(x.shape[-1]), + np.power(eBx-1.0, 2), + B[1]*2.0*(eBx-1.0)*eBx*x]) + return res + + def test_explicit(self): + explicit_mod = Model( + self.explicit_fcn, + fjacb=self.explicit_fjb, + fjacd=self.explicit_fjd, + meta=dict(name='Sample Explicit Model', + ref='ODRPACK UG, pg. 39'), + ) + explicit_dat = Data([0.,0.,5.,7.,7.5,10.,16.,26.,30.,34.,34.5,100.], + [1265.,1263.6,1258.,1254.,1253.,1249.8,1237.,1218.,1220.6, + 1213.8,1215.5,1212.]) + explicit_odr = ODR(explicit_dat, explicit_mod, beta0=[1500.0, -50.0, -0.1], + ifixx=[0,0,1,1,1,1,1,1,1,1,1,0]) + explicit_odr.set_job(deriv=2) + explicit_odr.set_iprint(init=0, iter=0, final=0) + + out = explicit_odr.run() + assert_array_almost_equal( + out.beta, + np.array([1.2646548050648876e+03, -5.4018409956678255e+01, + -8.7849712165253724e-02]), + ) + assert_array_almost_equal( + out.sd_beta, + np.array([1.0349270280543437, 1.583997785262061, 0.0063321988657267]), + ) + assert_array_almost_equal( + out.cov_beta, + np.array([[4.4949592379003039e-01, -3.7421976890364739e-01, + -8.0978217468468912e-04], + [-3.7421976890364739e-01, 1.0529686462751804e+00, + -1.9453521827942002e-03], + [-8.0978217468468912e-04, -1.9453521827942002e-03, + 1.6827336938454476e-05]]), + ) + + # Implicit Example + + def implicit_fcn(self, B, x): + return (B[2]*np.power(x[0]-B[0], 2) + + 2.0*B[3]*(x[0]-B[0])*(x[1]-B[1]) + + B[4]*np.power(x[1]-B[1], 2) - 1.0) + + def test_implicit(self): + implicit_mod = Model( + self.implicit_fcn, + implicit=1, + meta=dict(name='Sample Implicit Model', + ref='ODRPACK UG, pg. 49'), + ) + implicit_dat = Data([ + [0.5,1.2,1.6,1.86,2.12,2.36,2.44,2.36,2.06,1.74,1.34,0.9,-0.28, + -0.78,-1.36,-1.9,-2.5,-2.88,-3.18,-3.44], + [-0.12,-0.6,-1.,-1.4,-2.54,-3.36,-4.,-4.75,-5.25,-5.64,-5.97,-6.32, + -6.44,-6.44,-6.41,-6.25,-5.88,-5.5,-5.24,-4.86]], + 1, + ) + implicit_odr = ODR(implicit_dat, implicit_mod, + beta0=[-1.0, -3.0, 0.09, 0.02, 0.08]) + + out = implicit_odr.run() + assert_array_almost_equal( + out.beta, + np.array([-0.9993809167281279, -2.9310484652026476, 0.0875730502693354, + 0.0162299708984738, 0.0797537982976416]), + ) + assert_array_almost_equal( + out.sd_beta, + np.array([0.1113840353364371, 0.1097673310686467, 0.0041060738314314, + 0.0027500347539902, 0.0034962501532468]), + ) + assert_allclose( + out.cov_beta, + np.array([[2.1089274602333052e+00, -1.9437686411979040e+00, + 7.0263550868344446e-02, -4.7175267373474862e-02, + 5.2515575927380355e-02], + [-1.9437686411979040e+00, 2.0481509222414456e+00, + -6.1600515853057307e-02, 4.6268827806232933e-02, + -5.8822307501391467e-02], + [7.0263550868344446e-02, -6.1600515853057307e-02, + 2.8659542561579308e-03, -1.4628662260014491e-03, + 1.4528860663055824e-03], + [-4.7175267373474862e-02, 4.6268827806232933e-02, + -1.4628662260014491e-03, 1.2855592885514335e-03, + -1.2692942951415293e-03], + [5.2515575927380355e-02, -5.8822307501391467e-02, + 1.4528860663055824e-03, -1.2692942951415293e-03, + 2.0778813389755596e-03]]), + rtol=1e-6, atol=2e-6, + ) + + # Multi-variable Example + + def multi_fcn(self, B, x): + if (x < 0.0).any(): + raise OdrStop + theta = pi*B[3]/2. + ctheta = np.cos(theta) + stheta = np.sin(theta) + omega = np.power(2.*pi*x*np.exp(-B[2]), B[3]) + phi = np.arctan2((omega*stheta), (1.0 + omega*ctheta)) + r = (B[0] - B[1]) * np.power(np.sqrt(np.power(1.0 + omega*ctheta, 2) + + np.power(omega*stheta, 2)), -B[4]) + ret = np.vstack([B[1] + r*np.cos(B[4]*phi), + r*np.sin(B[4]*phi)]) + return ret + + def test_multi(self): + multi_mod = Model( + self.multi_fcn, + meta=dict(name='Sample Multi-Response Model', + ref='ODRPACK UG, pg. 56'), + ) + + multi_x = np.array([30.0, 50.0, 70.0, 100.0, 150.0, 200.0, 300.0, 500.0, + 700.0, 1000.0, 1500.0, 2000.0, 3000.0, 5000.0, 7000.0, 10000.0, + 15000.0, 20000.0, 30000.0, 50000.0, 70000.0, 100000.0, 150000.0]) + multi_y = np.array([ + [4.22, 4.167, 4.132, 4.038, 4.019, 3.956, 3.884, 3.784, 3.713, + 3.633, 3.54, 3.433, 3.358, 3.258, 3.193, 3.128, 3.059, 2.984, + 2.934, 2.876, 2.838, 2.798, 2.759], + [0.136, 0.167, 0.188, 0.212, 0.236, 0.257, 0.276, 0.297, 0.309, + 0.311, 0.314, 0.311, 0.305, 0.289, 0.277, 0.255, 0.24, 0.218, + 0.202, 0.182, 0.168, 0.153, 0.139], + ]) + n = len(multi_x) + multi_we = np.zeros((2, 2, n), dtype=float) + multi_ifixx = np.ones(n, dtype=int) + multi_delta = np.zeros(n, dtype=float) + + multi_we[0,0,:] = 559.6 + multi_we[1,0,:] = multi_we[0,1,:] = -1634.0 + multi_we[1,1,:] = 8397.0 + + for i in range(n): + if multi_x[i] < 100.0: + multi_ifixx[i] = 0 + elif multi_x[i] <= 150.0: + pass # defaults are fine + elif multi_x[i] <= 1000.0: + multi_delta[i] = 25.0 + elif multi_x[i] <= 10000.0: + multi_delta[i] = 560.0 + elif multi_x[i] <= 100000.0: + multi_delta[i] = 9500.0 + else: + multi_delta[i] = 144000.0 + if multi_x[i] == 100.0 or multi_x[i] == 150.0: + multi_we[:,:,i] = 0.0 + + multi_dat = Data(multi_x, multi_y, wd=1e-4/np.power(multi_x, 2), + we=multi_we) + multi_odr = ODR(multi_dat, multi_mod, beta0=[4.,2.,7.,.4,.5], + delta0=multi_delta, ifixx=multi_ifixx) + multi_odr.set_job(deriv=1, del_init=1) + + out = multi_odr.run() + assert_array_almost_equal( + out.beta, + np.array([4.3799880305938963, 2.4333057577497703, 8.0028845899503978, + 0.5101147161764654, 0.5173902330489161]), + ) + assert_array_almost_equal( + out.sd_beta, + np.array([0.0130625231081944, 0.0130499785273277, 0.1167085962217757, + 0.0132642749596149, 0.0288529201353984]), + ) + assert_array_almost_equal( + out.cov_beta, + np.array([[0.0064918418231375, 0.0036159705923791, 0.0438637051470406, + -0.0058700836512467, 0.011281212888768], + [0.0036159705923791, 0.0064793789429006, 0.0517610978353126, + -0.0051181304940204, 0.0130726943624117], + [0.0438637051470406, 0.0517610978353126, 0.5182263323095322, + -0.0563083340093696, 0.1269490939468611], + [-0.0058700836512467, -0.0051181304940204, -0.0563083340093696, + 0.0066939246261263, -0.0140184391377962], + [0.011281212888768, 0.0130726943624117, 0.1269490939468611, + -0.0140184391377962, 0.0316733013820852]]), + ) + + # Pearson's Data + # K. Pearson, Philosophical Magazine, 2, 559 (1901) + + def pearson_fcn(self, B, x): + return B[0] + B[1]*x + + def test_pearson(self): + p_x = np.array([0.,.9,1.8,2.6,3.3,4.4,5.2,6.1,6.5,7.4]) + p_y = np.array([5.9,5.4,4.4,4.6,3.5,3.7,2.8,2.8,2.4,1.5]) + p_sx = np.array([.03,.03,.04,.035,.07,.11,.13,.22,.74,1.]) + p_sy = np.array([1.,.74,.5,.35,.22,.22,.12,.12,.1,.04]) + + p_dat = RealData(p_x, p_y, sx=p_sx, sy=p_sy) + + # Reverse the data to test invariance of results + pr_dat = RealData(p_y, p_x, sx=p_sy, sy=p_sx) + + p_mod = Model(self.pearson_fcn, meta=dict(name='Uni-linear Fit')) + + p_odr = ODR(p_dat, p_mod, beta0=[1.,1.]) + pr_odr = ODR(pr_dat, p_mod, beta0=[1.,1.]) + + out = p_odr.run() + assert_array_almost_equal( + out.beta, + np.array([5.4767400299231674, -0.4796082367610305]), + ) + assert_array_almost_equal( + out.sd_beta, + np.array([0.3590121690702467, 0.0706291186037444]), + ) + assert_array_almost_equal( + out.cov_beta, + np.array([[0.0854275622946333, -0.0161807025443155], + [-0.0161807025443155, 0.003306337993922]]), + ) + + rout = pr_odr.run() + assert_array_almost_equal( + rout.beta, + np.array([11.4192022410781231, -2.0850374506165474]), + ) + assert_array_almost_equal( + rout.sd_beta, + np.array([0.9820231665657161, 0.3070515616198911]), + ) + assert_array_almost_equal( + rout.cov_beta, + np.array([[0.6391799462548782, -0.1955657291119177], + [-0.1955657291119177, 0.0624888159223392]]), + ) + + # Lorentz Peak + # The data is taken from one of the undergraduate physics labs I performed. + + def lorentz(self, beta, x): + return (beta[0]*beta[1]*beta[2] / np.sqrt(np.power(x*x - + beta[2]*beta[2], 2.0) + np.power(beta[1]*x, 2.0))) + + def test_lorentz(self): + l_sy = np.array([.29]*18) + l_sx = np.array([.000972971,.000948268,.000707632,.000706679, + .000706074, .000703918,.000698955,.000456856, + .000455207,.000662717,.000654619,.000652694, + .000000859202,.00106589,.00106378,.00125483, .00140818,.00241839]) + + l_dat = RealData( + [3.9094, 3.85945, 3.84976, 3.84716, 3.84551, 3.83964, 3.82608, + 3.78847, 3.78163, 3.72558, 3.70274, 3.6973, 3.67373, 3.65982, + 3.6562, 3.62498, 3.55525, 3.41886], + [652, 910.5, 984, 1000, 1007.5, 1053, 1160.5, 1409.5, 1430, 1122, + 957.5, 920, 777.5, 709.5, 698, 578.5, 418.5, 275.5], + sx=l_sx, + sy=l_sy, + ) + l_mod = Model(self.lorentz, meta=dict(name='Lorentz Peak')) + l_odr = ODR(l_dat, l_mod, beta0=(1000., .1, 3.8)) + + out = l_odr.run() + assert_array_almost_equal( + out.beta, + np.array([1.4306780846149925e+03, 1.3390509034538309e-01, + 3.7798193600109009e+00]), + ) + assert_array_almost_equal( + out.sd_beta, + np.array([7.3621186811330963e-01, 3.5068899941471650e-04, + 2.4451209281408992e-04]), + ) + assert_array_almost_equal( + out.cov_beta, + np.array([[2.4714409064597873e-01, -6.9067261911110836e-05, + -3.1236953270424990e-05], + [-6.9067261911110836e-05, 5.6077531517333009e-08, + 3.6133261832722601e-08], + [-3.1236953270424990e-05, 3.6133261832722601e-08, + 2.7261220025171730e-08]]), + ) + + def test_ticket_1253(self): + def linear(c, x): + return c[0]*x+c[1] + + c = [2.0, 3.0] + x = np.linspace(0, 10) + y = linear(c, x) + + model = Model(linear) + data = Data(x, y, wd=1.0, we=1.0) + job = ODR(data, model, beta0=[1.0, 1.0]) + result = job.run() + assert_equal(result.info, 2) + + # Verify fix for gh-9140 + + def test_ifixx(self): + x1 = [-2.01, -0.99, -0.001, 1.02, 1.98] + x2 = [3.98, 1.01, 0.001, 0.998, 4.01] + fix = np.vstack((np.zeros_like(x1, dtype=int), np.ones_like(x2, dtype=int))) + data = Data(np.vstack((x1, x2)), y=1, fix=fix) + model = Model(lambda beta, x: x[1, :] - beta[0] * x[0, :]**2., implicit=True) + + odr1 = ODR(data, model, beta0=np.array([1.])) + sol1 = odr1.run() + odr2 = ODR(data, model, beta0=np.array([1.]), ifixx=fix) + sol2 = odr2.run() + assert_equal(sol1.beta, sol2.beta) + + # verify bugfix for #11800 in #11802 + def test_ticket_11800(self): + # parameters + beta_true = np.array([1.0, 2.3, 1.1, -1.0, 1.3, 0.5]) + nr_measurements = 10 + + std_dev_x = 0.01 + x_error = np.array([[0.00063445, 0.00515731, 0.00162719, 0.01022866, + -0.01624845, 0.00482652, 0.00275988, -0.00714734, -0.00929201, -0.00687301], + [-0.00831623, -0.00821211, -0.00203459, 0.00938266, -0.00701829, + 0.0032169, 0.00259194, -0.00581017, -0.0030283, 0.01014164]]) + + std_dev_y = 0.05 + y_error = np.array([[0.05275304, 0.04519563, -0.07524086, 0.03575642, + 0.04745194, 0.03806645, 0.07061601, -0.00753604, -0.02592543, -0.02394929], + [0.03632366, 0.06642266, 0.08373122, 0.03988822, -0.0092536, + -0.03750469, -0.03198903, 0.01642066, 0.01293648, -0.05627085]]) + + beta_solution = np.array([ + 2.62920235756665876536e+00, -1.26608484996299608838e+02, + 1.29703572775403074502e+02, -1.88560985401185465804e+00, + 7.83834160771274923718e+01, -7.64124076838087091801e+01]) + + # model's function and Jacobians + def func(beta, x): + y0 = beta[0] + beta[1] * x[0, :] + beta[2] * x[1, :] + y1 = beta[3] + beta[4] * x[0, :] + beta[5] * x[1, :] + + return np.vstack((y0, y1)) + + def df_dbeta_odr(beta, x): + nr_meas = np.shape(x)[1] + zeros = np.zeros(nr_meas) + ones = np.ones(nr_meas) + + dy0 = np.array([ones, x[0, :], x[1, :], zeros, zeros, zeros]) + dy1 = np.array([zeros, zeros, zeros, ones, x[0, :], x[1, :]]) + + return np.stack((dy0, dy1)) + + def df_dx_odr(beta, x): + nr_meas = np.shape(x)[1] + ones = np.ones(nr_meas) + + dy0 = np.array([beta[1] * ones, beta[2] * ones]) + dy1 = np.array([beta[4] * ones, beta[5] * ones]) + return np.stack((dy0, dy1)) + + # do measurements with errors in independent and dependent variables + x0_true = np.linspace(1, 10, nr_measurements) + x1_true = np.linspace(1, 10, nr_measurements) + x_true = np.array([x0_true, x1_true]) + + y_true = func(beta_true, x_true) + + x_meas = x_true + x_error + y_meas = y_true + y_error + + # estimate model's parameters + model_f = Model(func, fjacb=df_dbeta_odr, fjacd=df_dx_odr) + + data = RealData(x_meas, y_meas, sx=std_dev_x, sy=std_dev_y) + + odr_obj = ODR(data, model_f, beta0=0.9 * beta_true, maxit=100) + #odr_obj.set_iprint(init=2, iter=0, iter_step=1, final=1) + odr_obj.set_job(deriv=3) + + odr_out = odr_obj.run() + + # check results + assert_equal(odr_out.info, 1) + assert_array_almost_equal(odr_out.beta, beta_solution) + + def test_multilinear_model(self): + x = np.linspace(0.0, 5.0) + y = 10.0 + 5.0 * x + data = Data(x, y) + odr_obj = ODR(data, multilinear) + output = odr_obj.run() + assert_array_almost_equal(output.beta, [10.0, 5.0]) + + def test_exponential_model(self): + x = np.linspace(0.0, 5.0) + y = -10.0 + np.exp(0.5*x) + data = Data(x, y) + odr_obj = ODR(data, exponential) + output = odr_obj.run() + assert_array_almost_equal(output.beta, [-10.0, 0.5]) + + def test_polynomial_model(self): + x = np.linspace(0.0, 5.0) + y = 1.0 + 2.0 * x + 3.0 * x ** 2 + 4.0 * x ** 3 + poly_model = polynomial(3) + data = Data(x, y) + odr_obj = ODR(data, poly_model) + output = odr_obj.run() + assert_array_almost_equal(output.beta, [1.0, 2.0, 3.0, 4.0]) + + def test_unilinear_model(self): + x = np.linspace(0.0, 5.0) + y = 1.0 * x + 2.0 + data = Data(x, y) + odr_obj = ODR(data, unilinear) + output = odr_obj.run() + assert_array_almost_equal(output.beta, [1.0, 2.0]) + + def test_quadratic_model(self): + x = np.linspace(0.0, 5.0) + y = 1.0 * x ** 2 + 2.0 * x + 3.0 + data = Data(x, y) + odr_obj = ODR(data, quadratic) + output = odr_obj.run() + assert_array_almost_equal(output.beta, [1.0, 2.0, 3.0]) + + def test_work_ind(self): + + def func(par, x): + b0, b1 = par + return b0 + b1 * x + + # generate some data + n_data = 4 + x = np.arange(n_data) + y = np.where(x % 2, x + 0.1, x - 0.1) + x_err = np.full(n_data, 0.1) + y_err = np.full(n_data, 0.1) + + # do the fitting + linear_model = Model(func) + real_data = RealData(x, y, sx=x_err, sy=y_err) + odr_obj = ODR(real_data, linear_model, beta0=[0.4, 0.4]) + odr_obj.set_job(fit_type=0) + out = odr_obj.run() + + sd_ind = out.work_ind['sd'] + assert_array_almost_equal(out.sd_beta, + out.work[sd_ind:sd_ind + len(out.sd_beta)]) + + @pytest.mark.skipif(True, reason="Fortran I/O prone to crashing so better " + "not to run this test, see gh-13127") + def test_output_file_overwrite(self): + """ + Verify fix for gh-1892 + """ + def func(b, x): + return b[0] + b[1] * x + + p = Model(func) + data = Data(np.arange(10), 12 * np.arange(10)) + tmp_dir = tempfile.mkdtemp() + error_file_path = os.path.join(tmp_dir, "error.dat") + report_file_path = os.path.join(tmp_dir, "report.dat") + try: + ODR(data, p, beta0=[0.1, 13], errfile=error_file_path, + rptfile=report_file_path).run() + ODR(data, p, beta0=[0.1, 13], errfile=error_file_path, + rptfile=report_file_path, overwrite=True).run() + finally: + # remove output files for clean up + shutil.rmtree(tmp_dir) + + def test_odr_model_default_meta(self): + def func(b, x): + return b[0] + b[1] * x + + p = Model(func) + p.set_meta(name='Sample Model Meta', ref='ODRPACK') + assert_equal(p.meta, {'name': 'Sample Model Meta', 'ref': 'ODRPACK'}) + + def test_work_array_del_init(self): + """ + Verify fix for gh-18739 where del_init=1 fails. + """ + def func(b, x): + return b[0] + b[1] * x + + # generate some data + n_data = 4 + x = np.arange(n_data) + y = np.where(x % 2, x + 0.1, x - 0.1) + x_err = np.full(n_data, 0.1) + y_err = np.full(n_data, 0.1) + + linear_model = Model(func) + # Try various shapes of the `we` array from various `sy` and `covy` + rd0 = RealData(x, y, sx=x_err, sy=y_err) + rd1 = RealData(x, y, sx=x_err, sy=0.1) + rd2 = RealData(x, y, sx=x_err, sy=[0.1]) + rd3 = RealData(x, y, sx=x_err, sy=np.full((1, n_data), 0.1)) + rd4 = RealData(x, y, sx=x_err, covy=[[0.01]]) + rd5 = RealData(x, y, sx=x_err, covy=np.full((1, 1, n_data), 0.01)) + for rd in [rd0, rd1, rd2, rd3, rd4, rd5]: + odr_obj = ODR(rd, linear_model, beta0=[0.4, 0.4], + delta0=np.full(n_data, -0.1)) + odr_obj.set_job(fit_type=0, del_init=1) + # Just make sure that it runs without raising an exception. + odr_obj.run() + + def test_pickling_data(self): + x = np.linspace(0.0, 5.0) + y = 1.0 * x + 2.0 + data = Data(x, y) + + obj_pickle = pickle.dumps(data) + del data + pickle.loads(obj_pickle) + + def test_pickling_real_data(self): + x = np.linspace(0.0, 5.0) + y = 1.0 * x + 2.0 + data = RealData(x, y) + + obj_pickle = pickle.dumps(data) + del data + pickle.loads(obj_pickle) + + def test_pickling_model(self): + obj_pickle = pickle.dumps(unilinear) + pickle.loads(obj_pickle) + + def test_pickling_odr(self): + x = np.linspace(0.0, 5.0) + y = 1.0 * x + 2.0 + odr_obj = ODR(Data(x, y), unilinear) + + obj_pickle = pickle.dumps(odr_obj) + del odr_obj + pickle.loads(obj_pickle) + + def test_pickling_output(self): + x = np.linspace(0.0, 5.0) + y = 1.0 * x + 2.0 + output = ODR(Data(x, y), unilinear).run + + obj_pickle = pickle.dumps(output) + del output + pickle.loads(obj_pickle) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/__init__.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a86d6f9f69f8be2186e2eb65b3185d0e4cb0ea4f --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/__init__.py @@ -0,0 +1,894 @@ +""" +======================================== +Special functions (:mod:`scipy.special`) +======================================== + +.. currentmodule:: scipy.special + +Almost all of the functions below accept NumPy arrays as input +arguments as well as single numbers. This means they follow +broadcasting and automatic array-looping rules. Technically, +they are `NumPy universal functions +`_. +Functions which do not accept NumPy arrays are marked by a warning +in the section description. + +.. seealso:: + + `scipy.special.cython_special` -- Typed Cython versions of special functions + + +Error handling +============== + +Errors are handled by returning NaNs or other appropriate values. +Some of the special function routines can emit warnings or raise +exceptions when an error occurs. By default this is disabled; to +query and control the current error handling state the following +functions are provided. + +.. autosummary:: + :toctree: generated/ + + geterr -- Get the current way of handling special-function errors. + seterr -- Set how special-function errors are handled. + errstate -- Context manager for special-function error handling. + SpecialFunctionWarning -- Warning that can be emitted by special functions. + SpecialFunctionError -- Exception that can be raised by special functions. + +Available functions +=================== + +Airy functions +-------------- + +.. autosummary:: + :toctree: generated/ + + airy -- Airy functions and their derivatives. + airye -- Exponentially scaled Airy functions and their derivatives. + ai_zeros -- Compute `nt` zeros and values of the Airy function Ai and its derivative. + bi_zeros -- Compute `nt` zeros and values of the Airy function Bi and its derivative. + itairy -- Integrals of Airy functions + + +Elliptic functions and integrals +-------------------------------- + +.. autosummary:: + :toctree: generated/ + + ellipj -- Jacobian elliptic functions. + ellipk -- Complete elliptic integral of the first kind. + ellipkm1 -- Complete elliptic integral of the first kind around `m` = 1. + ellipkinc -- Incomplete elliptic integral of the first kind. + ellipe -- Complete elliptic integral of the second kind. + ellipeinc -- Incomplete elliptic integral of the second kind. + elliprc -- Degenerate symmetric integral RC. + elliprd -- Symmetric elliptic integral of the second kind. + elliprf -- Completely-symmetric elliptic integral of the first kind. + elliprg -- Completely-symmetric elliptic integral of the second kind. + elliprj -- Symmetric elliptic integral of the third kind. + +Bessel functions +---------------- + +.. autosummary:: + :toctree: generated/ + + jv -- Bessel function of the first kind of real order and \ + complex argument. + jve -- Exponentially scaled Bessel function of order `v`. + yn -- Bessel function of the second kind of integer order and \ + real argument. + yv -- Bessel function of the second kind of real order and \ + complex argument. + yve -- Exponentially scaled Bessel function of the second kind \ + of real order. + kn -- Modified Bessel function of the second kind of integer \ + order `n` + kv -- Modified Bessel function of the second kind of real order \ + `v` + kve -- Exponentially scaled modified Bessel function of the \ + second kind. + iv -- Modified Bessel function of the first kind of real order. + ive -- Exponentially scaled modified Bessel function of the \ + first kind. + hankel1 -- Hankel function of the first kind. + hankel1e -- Exponentially scaled Hankel function of the first kind. + hankel2 -- Hankel function of the second kind. + hankel2e -- Exponentially scaled Hankel function of the second kind. + wright_bessel -- Wright's generalized Bessel function. + log_wright_bessel -- Logarithm of Wright's generalized Bessel function. + +The following function does not accept NumPy arrays (it is not a +universal function): + +.. autosummary:: + :toctree: generated/ + + lmbda -- Jahnke-Emden Lambda function, Lambdav(x). + +Zeros of Bessel functions +^^^^^^^^^^^^^^^^^^^^^^^^^ + +The following functions do not accept NumPy arrays (they are not +universal functions): + +.. autosummary:: + :toctree: generated/ + + jnjnp_zeros -- Compute zeros of integer-order Bessel functions Jn and Jn'. + jnyn_zeros -- Compute nt zeros of Bessel functions Jn(x), Jn'(x), Yn(x), and Yn'(x). + jn_zeros -- Compute zeros of integer-order Bessel function Jn(x). + jnp_zeros -- Compute zeros of integer-order Bessel function derivative Jn'(x). + yn_zeros -- Compute zeros of integer-order Bessel function Yn(x). + ynp_zeros -- Compute zeros of integer-order Bessel function derivative Yn'(x). + y0_zeros -- Compute nt zeros of Bessel function Y0(z), and derivative at each zero. + y1_zeros -- Compute nt zeros of Bessel function Y1(z), and derivative at each zero. + y1p_zeros -- Compute nt zeros of Bessel derivative Y1'(z), and value at each zero. + +Faster versions of common Bessel functions +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated/ + + j0 -- Bessel function of the first kind of order 0. + j1 -- Bessel function of the first kind of order 1. + y0 -- Bessel function of the second kind of order 0. + y1 -- Bessel function of the second kind of order 1. + i0 -- Modified Bessel function of order 0. + i0e -- Exponentially scaled modified Bessel function of order 0. + i1 -- Modified Bessel function of order 1. + i1e -- Exponentially scaled modified Bessel function of order 1. + k0 -- Modified Bessel function of the second kind of order 0, :math:`K_0`. + k0e -- Exponentially scaled modified Bessel function K of order 0 + k1 -- Modified Bessel function of the second kind of order 1, :math:`K_1(x)`. + k1e -- Exponentially scaled modified Bessel function K of order 1. + +Integrals of Bessel functions +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated/ + + itj0y0 -- Integrals of Bessel functions of order 0. + it2j0y0 -- Integrals related to Bessel functions of order 0. + iti0k0 -- Integrals of modified Bessel functions of order 0. + it2i0k0 -- Integrals related to modified Bessel functions of order 0. + besselpoly -- Weighted integral of a Bessel function. + +Derivatives of Bessel functions +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated/ + + jvp -- Compute nth derivative of Bessel function Jv(z) with respect to `z`. + yvp -- Compute nth derivative of Bessel function Yv(z) with respect to `z`. + kvp -- Compute nth derivative of real-order modified Bessel function Kv(z) + ivp -- Compute nth derivative of modified Bessel function Iv(z) with respect to `z`. + h1vp -- Compute nth derivative of Hankel function H1v(z) with respect to `z`. + h2vp -- Compute nth derivative of Hankel function H2v(z) with respect to `z`. + +Spherical Bessel functions +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated/ + + spherical_jn -- Spherical Bessel function of the first kind or its derivative. + spherical_yn -- Spherical Bessel function of the second kind or its derivative. + spherical_in -- Modified spherical Bessel function of the first kind or its derivative. + spherical_kn -- Modified spherical Bessel function of the second kind or its derivative. + +Riccati-Bessel functions +^^^^^^^^^^^^^^^^^^^^^^^^ + +The following functions do not accept NumPy arrays (they are not +universal functions): + +.. autosummary:: + :toctree: generated/ + + riccati_jn -- Compute Ricatti-Bessel function of the first kind and its derivative. + riccati_yn -- Compute Ricatti-Bessel function of the second kind and its derivative. + +Struve functions +---------------- + +.. autosummary:: + :toctree: generated/ + + struve -- Struve function. + modstruve -- Modified Struve function. + itstruve0 -- Integral of the Struve function of order 0. + it2struve0 -- Integral related to the Struve function of order 0. + itmodstruve0 -- Integral of the modified Struve function of order 0. + + +Raw statistical functions +------------------------- + +.. seealso:: :mod:`scipy.stats`: Friendly versions of these functions. + +Binomial distribution +^^^^^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated/ + + bdtr -- Binomial distribution cumulative distribution function. + bdtrc -- Binomial distribution survival function. + bdtri -- Inverse function to `bdtr` with respect to `p`. + bdtrik -- Inverse function to `bdtr` with respect to `k`. + bdtrin -- Inverse function to `bdtr` with respect to `n`. + +Beta distribution +^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated/ + + btdtr -- Cumulative distribution function of the beta distribution. + btdtri -- The `p`-th quantile of the beta distribution. + btdtria -- Inverse of `btdtr` with respect to `a`. + btdtrib -- btdtria(a, p, x). + +F distribution +^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated/ + + fdtr -- F cumulative distribution function. + fdtrc -- F survival function. + fdtri -- The `p`-th quantile of the F-distribution. + fdtridfd -- Inverse to `fdtr` vs dfd. + +Gamma distribution +^^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated/ + + gdtr -- Gamma distribution cumulative distribution function. + gdtrc -- Gamma distribution survival function. + gdtria -- Inverse of `gdtr` vs a. + gdtrib -- Inverse of `gdtr` vs b. + gdtrix -- Inverse of `gdtr` vs x. + +Negative binomial distribution +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated/ + + nbdtr -- Negative binomial cumulative distribution function. + nbdtrc -- Negative binomial survival function. + nbdtri -- Inverse of `nbdtr` vs `p`. + nbdtrik -- Inverse of `nbdtr` vs `k`. + nbdtrin -- Inverse of `nbdtr` vs `n`. + +Noncentral F distribution +^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated/ + + ncfdtr -- Cumulative distribution function of the non-central F distribution. + ncfdtridfd -- Calculate degrees of freedom (denominator) for the noncentral F-distribution. + ncfdtridfn -- Calculate degrees of freedom (numerator) for the noncentral F-distribution. + ncfdtri -- Inverse cumulative distribution function of the non-central F distribution. + ncfdtrinc -- Calculate non-centrality parameter for non-central F distribution. + +Noncentral t distribution +^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated/ + + nctdtr -- Cumulative distribution function of the non-central `t` distribution. + nctdtridf -- Calculate degrees of freedom for non-central t distribution. + nctdtrit -- Inverse cumulative distribution function of the non-central t distribution. + nctdtrinc -- Calculate non-centrality parameter for non-central t distribution. + +Normal distribution +^^^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated/ + + nrdtrimn -- Calculate mean of normal distribution given other params. + nrdtrisd -- Calculate standard deviation of normal distribution given other params. + ndtr -- Normal cumulative distribution function. + log_ndtr -- Logarithm of normal cumulative distribution function. + ndtri -- Inverse of `ndtr` vs x. + ndtri_exp -- Inverse of `log_ndtr` vs x. + +Poisson distribution +^^^^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated/ + + pdtr -- Poisson cumulative distribution function. + pdtrc -- Poisson survival function. + pdtri -- Inverse to `pdtr` vs m. + pdtrik -- Inverse to `pdtr` vs k. + +Student t distribution +^^^^^^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated/ + + stdtr -- Student t distribution cumulative distribution function. + stdtridf -- Inverse of `stdtr` vs df. + stdtrit -- Inverse of `stdtr` vs `t`. + +Chi square distribution +^^^^^^^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated/ + + chdtr -- Chi square cumulative distribution function. + chdtrc -- Chi square survival function. + chdtri -- Inverse to `chdtrc`. + chdtriv -- Inverse to `chdtr` vs `v`. + +Non-central chi square distribution +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated/ + + chndtr -- Non-central chi square cumulative distribution function. + chndtridf -- Inverse to `chndtr` vs `df`. + chndtrinc -- Inverse to `chndtr` vs `nc`. + chndtrix -- Inverse to `chndtr` vs `x`. + +Kolmogorov distribution +^^^^^^^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated/ + + smirnov -- Kolmogorov-Smirnov complementary cumulative distribution function. + smirnovi -- Inverse to `smirnov`. + kolmogorov -- Complementary cumulative distribution function of Kolmogorov distribution. + kolmogi -- Inverse function to `kolmogorov`. + +Box-Cox transformation +^^^^^^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated/ + + boxcox -- Compute the Box-Cox transformation. + boxcox1p -- Compute the Box-Cox transformation of 1 + `x`. + inv_boxcox -- Compute the inverse of the Box-Cox transformation. + inv_boxcox1p -- Compute the inverse of the Box-Cox transformation. + + +Sigmoidal functions +^^^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated/ + + logit -- Logit ufunc for ndarrays. + expit -- Logistic sigmoid function. + log_expit -- Logarithm of the logistic sigmoid function. + +Miscellaneous +^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated/ + + tklmbda -- Tukey-Lambda cumulative distribution function. + owens_t -- Owen's T Function. + + +Information Theory functions +---------------------------- + +.. autosummary:: + :toctree: generated/ + + entr -- Elementwise function for computing entropy. + rel_entr -- Elementwise function for computing relative entropy. + kl_div -- Elementwise function for computing Kullback-Leibler divergence. + huber -- Huber loss function. + pseudo_huber -- Pseudo-Huber loss function. + + +Gamma and related functions +--------------------------- + +.. autosummary:: + :toctree: generated/ + + gamma -- Gamma function. + gammaln -- Logarithm of the absolute value of the Gamma function for real inputs. + loggamma -- Principal branch of the logarithm of the Gamma function. + gammasgn -- Sign of the gamma function. + gammainc -- Regularized lower incomplete gamma function. + gammaincinv -- Inverse to `gammainc`. + gammaincc -- Regularized upper incomplete gamma function. + gammainccinv -- Inverse to `gammaincc`. + beta -- Beta function. + betaln -- Natural logarithm of absolute value of beta function. + betainc -- Incomplete beta integral. + betaincc -- Complemented incomplete beta integral. + betaincinv -- Inverse function to beta integral. + betainccinv -- Inverse of the complemented incomplete beta integral. + psi -- The digamma function. + rgamma -- Gamma function inverted. + polygamma -- Polygamma function n. + multigammaln -- Returns the log of multivariate gamma, also sometimes called the generalized gamma. + digamma -- psi(x[, out]). + poch -- Rising factorial (z)_m. + + +Error function and Fresnel integrals +------------------------------------ + +.. autosummary:: + :toctree: generated/ + + erf -- Returns the error function of complex argument. + erfc -- Complementary error function, ``1 - erf(x)``. + erfcx -- Scaled complementary error function, ``exp(x**2) * erfc(x)``. + erfi -- Imaginary error function, ``-i erf(i z)``. + erfinv -- Inverse function for erf. + erfcinv -- Inverse function for erfc. + wofz -- Faddeeva function. + dawsn -- Dawson's integral. + fresnel -- Fresnel sin and cos integrals. + fresnel_zeros -- Compute nt complex zeros of sine and cosine Fresnel integrals S(z) and C(z). + modfresnelp -- Modified Fresnel positive integrals. + modfresnelm -- Modified Fresnel negative integrals. + voigt_profile -- Voigt profile. + +The following functions do not accept NumPy arrays (they are not +universal functions): + +.. autosummary:: + :toctree: generated/ + + erf_zeros -- Compute nt complex zeros of error function erf(z). + fresnelc_zeros -- Compute nt complex zeros of cosine Fresnel integral C(z). + fresnels_zeros -- Compute nt complex zeros of sine Fresnel integral S(z). + +Legendre functions +------------------ + +.. autosummary:: + :toctree: generated/ + + lpmv -- Associated Legendre function of integer order and real degree. + sph_harm -- Compute spherical harmonics. + +.. autosummary:: + :toctree: generated/ + + clpmn -- Associated Legendre function of the first kind for complex arguments. + lpn -- Legendre function of the first kind. + lqn -- Legendre function of the second kind. + lpmn -- Sequence of associated Legendre functions of the first kind. + lqmn -- Sequence of associated Legendre functions of the second kind. + +Ellipsoidal harmonics +--------------------- + +.. autosummary:: + :toctree: generated/ + + ellip_harm -- Ellipsoidal harmonic functions E^p_n(l). + ellip_harm_2 -- Ellipsoidal harmonic functions F^p_n(l). + ellip_normal -- Ellipsoidal harmonic normalization constants gamma^p_n. + +Orthogonal polynomials +---------------------- + +The following functions evaluate values of orthogonal polynomials: + +.. autosummary:: + :toctree: generated/ + + assoc_laguerre -- Compute the generalized (associated) Laguerre polynomial of degree n and order k. + eval_legendre -- Evaluate Legendre polynomial at a point. + eval_chebyt -- Evaluate Chebyshev polynomial of the first kind at a point. + eval_chebyu -- Evaluate Chebyshev polynomial of the second kind at a point. + eval_chebyc -- Evaluate Chebyshev polynomial of the first kind on [-2, 2] at a point. + eval_chebys -- Evaluate Chebyshev polynomial of the second kind on [-2, 2] at a point. + eval_jacobi -- Evaluate Jacobi polynomial at a point. + eval_laguerre -- Evaluate Laguerre polynomial at a point. + eval_genlaguerre -- Evaluate generalized Laguerre polynomial at a point. + eval_hermite -- Evaluate physicist's Hermite polynomial at a point. + eval_hermitenorm -- Evaluate probabilist's (normalized) Hermite polynomial at a point. + eval_gegenbauer -- Evaluate Gegenbauer polynomial at a point. + eval_sh_legendre -- Evaluate shifted Legendre polynomial at a point. + eval_sh_chebyt -- Evaluate shifted Chebyshev polynomial of the first kind at a point. + eval_sh_chebyu -- Evaluate shifted Chebyshev polynomial of the second kind at a point. + eval_sh_jacobi -- Evaluate shifted Jacobi polynomial at a point. + +The following functions compute roots and quadrature weights for +orthogonal polynomials: + +.. autosummary:: + :toctree: generated/ + + roots_legendre -- Gauss-Legendre quadrature. + roots_chebyt -- Gauss-Chebyshev (first kind) quadrature. + roots_chebyu -- Gauss-Chebyshev (second kind) quadrature. + roots_chebyc -- Gauss-Chebyshev (first kind) quadrature. + roots_chebys -- Gauss-Chebyshev (second kind) quadrature. + roots_jacobi -- Gauss-Jacobi quadrature. + roots_laguerre -- Gauss-Laguerre quadrature. + roots_genlaguerre -- Gauss-generalized Laguerre quadrature. + roots_hermite -- Gauss-Hermite (physicst's) quadrature. + roots_hermitenorm -- Gauss-Hermite (statistician's) quadrature. + roots_gegenbauer -- Gauss-Gegenbauer quadrature. + roots_sh_legendre -- Gauss-Legendre (shifted) quadrature. + roots_sh_chebyt -- Gauss-Chebyshev (first kind, shifted) quadrature. + roots_sh_chebyu -- Gauss-Chebyshev (second kind, shifted) quadrature. + roots_sh_jacobi -- Gauss-Jacobi (shifted) quadrature. + +The functions below, in turn, return the polynomial coefficients in +``orthopoly1d`` objects, which function similarly as `numpy.poly1d`. +The ``orthopoly1d`` class also has an attribute ``weights``, which returns +the roots, weights, and total weights for the appropriate form of Gaussian +quadrature. These are returned in an ``n x 3`` array with roots in the first +column, weights in the second column, and total weights in the final column. +Note that ``orthopoly1d`` objects are converted to `~numpy.poly1d` when doing +arithmetic, and lose information of the original orthogonal polynomial. + +.. autosummary:: + :toctree: generated/ + + legendre -- Legendre polynomial. + chebyt -- Chebyshev polynomial of the first kind. + chebyu -- Chebyshev polynomial of the second kind. + chebyc -- Chebyshev polynomial of the first kind on :math:`[-2, 2]`. + chebys -- Chebyshev polynomial of the second kind on :math:`[-2, 2]`. + jacobi -- Jacobi polynomial. + laguerre -- Laguerre polynomial. + genlaguerre -- Generalized (associated) Laguerre polynomial. + hermite -- Physicist's Hermite polynomial. + hermitenorm -- Normalized (probabilist's) Hermite polynomial. + gegenbauer -- Gegenbauer (ultraspherical) polynomial. + sh_legendre -- Shifted Legendre polynomial. + sh_chebyt -- Shifted Chebyshev polynomial of the first kind. + sh_chebyu -- Shifted Chebyshev polynomial of the second kind. + sh_jacobi -- Shifted Jacobi polynomial. + +.. warning:: + + Computing values of high-order polynomials (around ``order > 20``) using + polynomial coefficients is numerically unstable. To evaluate polynomial + values, the ``eval_*`` functions should be used instead. + + +Hypergeometric functions +------------------------ + +.. autosummary:: + :toctree: generated/ + + hyp2f1 -- Gauss hypergeometric function 2F1(a, b; c; z). + hyp1f1 -- Confluent hypergeometric function 1F1(a, b; x). + hyperu -- Confluent hypergeometric function U(a, b, x) of the second kind. + hyp0f1 -- Confluent hypergeometric limit function 0F1. + + +Parabolic cylinder functions +---------------------------- + +.. autosummary:: + :toctree: generated/ + + pbdv -- Parabolic cylinder function D. + pbvv -- Parabolic cylinder function V. + pbwa -- Parabolic cylinder function W. + +The following functions do not accept NumPy arrays (they are not +universal functions): + +.. autosummary:: + :toctree: generated/ + + pbdv_seq -- Parabolic cylinder functions Dv(x) and derivatives. + pbvv_seq -- Parabolic cylinder functions Vv(x) and derivatives. + pbdn_seq -- Parabolic cylinder functions Dn(z) and derivatives. + +Mathieu and related functions +----------------------------- + +.. autosummary:: + :toctree: generated/ + + mathieu_a -- Characteristic value of even Mathieu functions. + mathieu_b -- Characteristic value of odd Mathieu functions. + +The following functions do not accept NumPy arrays (they are not +universal functions): + +.. autosummary:: + :toctree: generated/ + + mathieu_even_coef -- Fourier coefficients for even Mathieu and modified Mathieu functions. + mathieu_odd_coef -- Fourier coefficients for even Mathieu and modified Mathieu functions. + +The following return both function and first derivative: + +.. autosummary:: + :toctree: generated/ + + mathieu_cem -- Even Mathieu function and its derivative. + mathieu_sem -- Odd Mathieu function and its derivative. + mathieu_modcem1 -- Even modified Mathieu function of the first kind and its derivative. + mathieu_modcem2 -- Even modified Mathieu function of the second kind and its derivative. + mathieu_modsem1 -- Odd modified Mathieu function of the first kind and its derivative. + mathieu_modsem2 -- Odd modified Mathieu function of the second kind and its derivative. + +Spheroidal wave functions +------------------------- + +.. autosummary:: + :toctree: generated/ + + pro_ang1 -- Prolate spheroidal angular function of the first kind and its derivative. + pro_rad1 -- Prolate spheroidal radial function of the first kind and its derivative. + pro_rad2 -- Prolate spheroidal radial function of the second kind and its derivative. + obl_ang1 -- Oblate spheroidal angular function of the first kind and its derivative. + obl_rad1 -- Oblate spheroidal radial function of the first kind and its derivative. + obl_rad2 -- Oblate spheroidal radial function of the second kind and its derivative. + pro_cv -- Characteristic value of prolate spheroidal function. + obl_cv -- Characteristic value of oblate spheroidal function. + pro_cv_seq -- Characteristic values for prolate spheroidal wave functions. + obl_cv_seq -- Characteristic values for oblate spheroidal wave functions. + +The following functions require pre-computed characteristic value: + +.. autosummary:: + :toctree: generated/ + + pro_ang1_cv -- Prolate spheroidal angular function pro_ang1 for precomputed characteristic value. + pro_rad1_cv -- Prolate spheroidal radial function pro_rad1 for precomputed characteristic value. + pro_rad2_cv -- Prolate spheroidal radial function pro_rad2 for precomputed characteristic value. + obl_ang1_cv -- Oblate spheroidal angular function obl_ang1 for precomputed characteristic value. + obl_rad1_cv -- Oblate spheroidal radial function obl_rad1 for precomputed characteristic value. + obl_rad2_cv -- Oblate spheroidal radial function obl_rad2 for precomputed characteristic value. + +Kelvin functions +---------------- + +.. autosummary:: + :toctree: generated/ + + kelvin -- Kelvin functions as complex numbers. + kelvin_zeros -- Compute nt zeros of all Kelvin functions. + ber -- Kelvin function ber. + bei -- Kelvin function bei + berp -- Derivative of the Kelvin function `ber`. + beip -- Derivative of the Kelvin function `bei`. + ker -- Kelvin function ker. + kei -- Kelvin function ker. + kerp -- Derivative of the Kelvin function ker. + keip -- Derivative of the Kelvin function kei. + +The following functions do not accept NumPy arrays (they are not +universal functions): + +.. autosummary:: + :toctree: generated/ + + ber_zeros -- Compute nt zeros of the Kelvin function ber(x). + bei_zeros -- Compute nt zeros of the Kelvin function bei(x). + berp_zeros -- Compute nt zeros of the Kelvin function ber'(x). + beip_zeros -- Compute nt zeros of the Kelvin function bei'(x). + ker_zeros -- Compute nt zeros of the Kelvin function ker(x). + kei_zeros -- Compute nt zeros of the Kelvin function kei(x). + kerp_zeros -- Compute nt zeros of the Kelvin function ker'(x). + keip_zeros -- Compute nt zeros of the Kelvin function kei'(x). + +Combinatorics +------------- + +.. autosummary:: + :toctree: generated/ + + comb -- The number of combinations of N things taken k at a time. + perm -- Permutations of N things taken k at a time, i.e., k-permutations of N. + stirling2 -- Stirling numbers of the second kind. + +Lambert W and related functions +------------------------------- + +.. autosummary:: + :toctree: generated/ + + lambertw -- Lambert W function. + wrightomega -- Wright Omega function. + +Other special functions +----------------------- + +.. autosummary:: + :toctree: generated/ + + agm -- Arithmetic, Geometric Mean. + bernoulli -- Bernoulli numbers B0..Bn (inclusive). + binom -- Binomial coefficient + diric -- Periodic sinc function, also called the Dirichlet function. + euler -- Euler numbers E0..En (inclusive). + expn -- Exponential integral E_n. + exp1 -- Exponential integral E_1 of complex argument z. + expi -- Exponential integral Ei. + factorial -- The factorial of a number or array of numbers. + factorial2 -- Double factorial. + factorialk -- Multifactorial of n of order k, n(!!...!). + shichi -- Hyperbolic sine and cosine integrals. + sici -- Sine and cosine integrals. + softmax -- Softmax function. + log_softmax -- Logarithm of softmax function. + spence -- Spence's function, also known as the dilogarithm. + zeta -- Riemann zeta function. + zetac -- Riemann zeta function minus 1. + +Convenience functions +--------------------- + +.. autosummary:: + :toctree: generated/ + + cbrt -- Cube root of `x`. + exp10 -- 10**x. + exp2 -- 2**x. + radian -- Convert from degrees to radians. + cosdg -- Cosine of the angle `x` given in degrees. + sindg -- Sine of angle given in degrees. + tandg -- Tangent of angle x given in degrees. + cotdg -- Cotangent of the angle `x` given in degrees. + log1p -- Calculates log(1+x) for use when `x` is near zero. + expm1 -- ``exp(x) - 1`` for use when `x` is near zero. + cosm1 -- ``cos(x) - 1`` for use when `x` is near zero. + powm1 -- ``x**y - 1`` for use when `y` is near zero or `x` is near 1. + round -- Round to nearest integer. + xlogy -- Compute ``x*log(y)`` so that the result is 0 if ``x = 0``. + xlog1py -- Compute ``x*log1p(y)`` so that the result is 0 if ``x = 0``. + logsumexp -- Compute the log of the sum of exponentials of input elements. + exprel -- Relative error exponential, (exp(x)-1)/x, for use when `x` is near zero. + sinc -- Return the sinc function. + +""" # noqa: E501 + +import os +import warnings + + +def _load_libsf_error_state(): + """Load libsf_error_state.dll shared library on Windows + + libsf_error_state manages shared state used by + ``scipy.special.seterr`` and ``scipy.special.geterr`` so that these + can work consistently between special functions provided by different + extension modules. This shared library is installed in scipy/special + alongside this __init__.py file. Due to lack of rpath support, Windows + cannot find shared libraries installed within wheels. To circumvent this, + we pre-load ``lib_sf_error_state.dll`` when on Windows. + + The logic for this function was borrowed from the function ``make_init`` + in `scipy/tools/openblas_support.py`: + https://github.com/scipy/scipy/blob/bb92c8014e21052e7dde67a76b28214dd1dcb94a/tools/openblas_support.py#L239-L274 + """ # noqa: E501 + if os.name == "nt": + try: + from ctypes import WinDLL + basedir = os.path.dirname(__file__) + except: # noqa: E722 + pass + else: + dll_path = os.path.join(basedir, "libsf_error_state.dll") + if os.path.exists(dll_path): + WinDLL(dll_path) + + +_load_libsf_error_state() + + +from ._sf_error import SpecialFunctionWarning, SpecialFunctionError + +from . import _ufuncs +from ._ufuncs import * + +# Replace some function definitions from _ufuncs to add Array API support +from ._support_alternative_backends import ( + log_ndtr, ndtr, ndtri, erf, erfc, i0, i0e, i1, i1e, gammaln, + gammainc, gammaincc, logit, expit, entr, rel_entr, xlogy, chdtrc) + +from . import _basic +from ._basic import * + +from ._logsumexp import logsumexp, softmax, log_softmax + +from . import _orthogonal +from ._orthogonal import * + +from ._spfun_stats import multigammaln +from ._ellip_harm import ( + ellip_harm, + ellip_harm_2, + ellip_normal +) +from ._lambertw import lambertw +from ._spherical_bessel import ( + spherical_jn, + spherical_yn, + spherical_in, + spherical_kn +) + +# Deprecated namespaces, to be removed in v2.0.0 +from . import add_newdocs, basic, orthogonal, specfun, sf_error, spfun_stats + +# We replace some function definitions from _ufuncs with those from +# _support_alternative_backends above, but those are all listed in _ufuncs.__all__, +# so there is no need to consider _support_alternative_backends.__all__ here. +__all__ = _ufuncs.__all__ + _basic.__all__ + _orthogonal.__all__ +__all__ += [ + 'SpecialFunctionWarning', + 'SpecialFunctionError', + 'logsumexp', + 'softmax', + 'log_softmax', + 'multigammaln', + 'ellip_harm', + 'ellip_harm_2', + 'ellip_normal', + 'lambertw', + 'spherical_jn', + 'spherical_yn', + 'spherical_in', + 'spherical_kn', +] + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester + +_depr_msg = ('\nThis function was deprecated in SciPy 1.12.0, and will be ' + 'removed in SciPy 1.14.0. Use scipy.special.{} instead.') + + +def btdtr(*args, **kwargs): # type: ignore [no-redef] + warnings.warn(_depr_msg.format('betainc'), category=DeprecationWarning, + stacklevel=2) + return _ufuncs.btdtr(*args, **kwargs) + + +btdtr.__doc__ = _ufuncs.btdtr.__doc__ # type: ignore [misc] + + +def btdtri(*args, **kwargs): # type: ignore [no-redef] + warnings.warn(_depr_msg.format('betaincinv'), category=DeprecationWarning, + stacklevel=2) + return _ufuncs.btdtri(*args, **kwargs) + + +btdtri.__doc__ = _ufuncs.btdtri.__doc__ # type: ignore [misc] + + +def _get_include(): + """This function is for development purposes only. + + This function could disappear or its behavior could change at any time. + """ + import os + return os.path.dirname(__file__) + diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_add_newdocs.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_add_newdocs.py new file mode 100644 index 0000000000000000000000000000000000000000..17b4457e98197e55ccc115f23020df4e930e21e6 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_add_newdocs.py @@ -0,0 +1,12847 @@ +# Docstrings for generated ufuncs +# +# The syntax is designed to look like the function add_newdoc is being +# called from numpy.lib, but in this file add_newdoc puts the +# docstrings in a dictionary. This dictionary is used in +# _generate_pyx.py to generate the docstrings for the ufuncs in +# scipy.special at the C level when the ufuncs are created at compile +# time. + +docdict: dict[str, str] = {} + + +def get(name): + return docdict.get(name) + + +def add_newdoc(name, doc): + docdict[name] = doc + + +add_newdoc("_sf_error_test_function", + """ + Private function; do not use. + """) + + +add_newdoc("_cosine_cdf", + """ + _cosine_cdf(x) + + Cumulative distribution function (CDF) of the cosine distribution:: + + { 0, x < -pi + cdf(x) = { (pi + x + sin(x))/(2*pi), -pi <= x <= pi + { 1, x > pi + + Parameters + ---------- + x : array_like + `x` must contain real numbers. + + Returns + ------- + scalar or ndarray + The cosine distribution CDF evaluated at `x`. + + """) + +add_newdoc("_cosine_invcdf", + """ + _cosine_invcdf(p) + + Inverse of the cumulative distribution function (CDF) of the cosine + distribution. + + The CDF of the cosine distribution is:: + + cdf(x) = (pi + x + sin(x))/(2*pi) + + This function computes the inverse of cdf(x). + + Parameters + ---------- + p : array_like + `p` must contain real numbers in the interval ``0 <= p <= 1``. + `nan` is returned for values of `p` outside the interval [0, 1]. + + Returns + ------- + scalar or ndarray + The inverse of the cosine distribution CDF evaluated at `p`. + + """) + +add_newdoc("_ellip_harm", + """ + Internal function, use `ellip_harm` instead. + """) + +add_newdoc("_ellip_norm", + """ + Internal function, use `ellip_norm` instead. + """) + +add_newdoc("voigt_profile", + r""" + voigt_profile(x, sigma, gamma, out=None) + + Voigt profile. + + The Voigt profile is a convolution of a 1-D Normal distribution with + standard deviation ``sigma`` and a 1-D Cauchy distribution with half-width at + half-maximum ``gamma``. + + If ``sigma = 0``, PDF of Cauchy distribution is returned. + Conversely, if ``gamma = 0``, PDF of Normal distribution is returned. + If ``sigma = gamma = 0``, the return value is ``Inf`` for ``x = 0``, + and ``0`` for all other ``x``. + + Parameters + ---------- + x : array_like + Real argument + sigma : array_like + The standard deviation of the Normal distribution part + gamma : array_like + The half-width at half-maximum of the Cauchy distribution part + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + The Voigt profile at the given arguments + + See Also + -------- + wofz : Faddeeva function + + Notes + ----- + It can be expressed in terms of Faddeeva function + + .. math:: V(x; \sigma, \gamma) = \frac{Re[w(z)]}{\sigma\sqrt{2\pi}}, + .. math:: z = \frac{x + i\gamma}{\sqrt{2}\sigma} + + where :math:`w(z)` is the Faddeeva function. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Voigt_profile + + Examples + -------- + Calculate the function at point 2 for ``sigma=1`` and ``gamma=1``. + + >>> from scipy.special import voigt_profile + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> voigt_profile(2, 1., 1.) + 0.09071519942627544 + + Calculate the function at several points by providing a NumPy array + for `x`. + + >>> values = np.array([-2., 0., 5]) + >>> voigt_profile(values, 1., 1.) + array([0.0907152 , 0.20870928, 0.01388492]) + + Plot the function for different parameter sets. + + >>> fig, ax = plt.subplots(figsize=(8, 8)) + >>> x = np.linspace(-10, 10, 500) + >>> parameters_list = [(1.5, 0., "solid"), (1.3, 0.5, "dashed"), + ... (0., 1.8, "dotted"), (1., 1., "dashdot")] + >>> for params in parameters_list: + ... sigma, gamma, linestyle = params + ... voigt = voigt_profile(x, sigma, gamma) + ... ax.plot(x, voigt, label=rf"$\sigma={sigma},\, \gamma={gamma}$", + ... ls=linestyle) + >>> ax.legend() + >>> plt.show() + + Verify visually that the Voigt profile indeed arises as the convolution + of a normal and a Cauchy distribution. + + >>> from scipy.signal import convolve + >>> x, dx = np.linspace(-10, 10, 500, retstep=True) + >>> def gaussian(x, sigma): + ... return np.exp(-0.5 * x**2/sigma**2)/(sigma * np.sqrt(2*np.pi)) + >>> def cauchy(x, gamma): + ... return gamma/(np.pi * (np.square(x)+gamma**2)) + >>> sigma = 2 + >>> gamma = 1 + >>> gauss_profile = gaussian(x, sigma) + >>> cauchy_profile = cauchy(x, gamma) + >>> convolved = dx * convolve(cauchy_profile, gauss_profile, mode="same") + >>> voigt = voigt_profile(x, sigma, gamma) + >>> fig, ax = plt.subplots(figsize=(8, 8)) + >>> ax.plot(x, gauss_profile, label="Gauss: $G$", c='b') + >>> ax.plot(x, cauchy_profile, label="Cauchy: $C$", c='y', ls="dashed") + >>> xx = 0.5*(x[1:] + x[:-1]) # midpoints + >>> ax.plot(xx, convolved[1:], label="Convolution: $G * C$", ls='dashdot', + ... c='k') + >>> ax.plot(x, voigt, label="Voigt", ls='dotted', c='r') + >>> ax.legend() + >>> plt.show() + """) + +add_newdoc("wrightomega", + r""" + wrightomega(z, out=None) + + Wright Omega function. + + Defined as the solution to + + .. math:: + + \omega + \log(\omega) = z + + where :math:`\log` is the principal branch of the complex logarithm. + + Parameters + ---------- + z : array_like + Points at which to evaluate the Wright Omega function + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + omega : scalar or ndarray + Values of the Wright Omega function + + See Also + -------- + lambertw : The Lambert W function + + Notes + ----- + .. versionadded:: 0.19.0 + + The function can also be defined as + + .. math:: + + \omega(z) = W_{K(z)}(e^z) + + where :math:`K(z) = \lceil (\Im(z) - \pi)/(2\pi) \rceil` is the + unwinding number and :math:`W` is the Lambert W function. + + The implementation here is taken from [1]_. + + References + ---------- + .. [1] Lawrence, Corless, and Jeffrey, "Algorithm 917: Complex + Double-Precision Evaluation of the Wright :math:`\omega` + Function." ACM Transactions on Mathematical Software, + 2012. :doi:`10.1145/2168773.2168779`. + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import wrightomega, lambertw + + >>> wrightomega([-2, -1, 0, 1, 2]) + array([0.12002824, 0.27846454, 0.56714329, 1. , 1.5571456 ]) + + Complex input: + + >>> wrightomega(3 + 5j) + (1.5804428632097158+3.8213626783287937j) + + Verify that ``wrightomega(z)`` satisfies ``w + log(w) = z``: + + >>> w = -5 + 4j + >>> wrightomega(w + np.log(w)) + (-5+4j) + + Verify the connection to ``lambertw``: + + >>> z = 0.5 + 3j + >>> wrightomega(z) + (0.0966015889280649+1.4937828458191993j) + >>> lambertw(np.exp(z)) + (0.09660158892806493+1.4937828458191993j) + + >>> z = 0.5 + 4j + >>> wrightomega(z) + (-0.3362123489037213+2.282986001579032j) + >>> lambertw(np.exp(z), k=1) + (-0.33621234890372115+2.282986001579032j) + """) + + +add_newdoc("agm", + """ + agm(a, b, out=None) + + Compute the arithmetic-geometric mean of `a` and `b`. + + Start with a_0 = a and b_0 = b and iteratively compute:: + + a_{n+1} = (a_n + b_n)/2 + b_{n+1} = sqrt(a_n*b_n) + + a_n and b_n converge to the same limit as n increases; their common + limit is agm(a, b). + + Parameters + ---------- + a, b : array_like + Real values only. If the values are both negative, the result + is negative. If one value is negative and the other is positive, + `nan` is returned. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + The arithmetic-geometric mean of `a` and `b`. + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import agm + >>> a, b = 24.0, 6.0 + >>> agm(a, b) + 13.458171481725614 + + Compare that result to the iteration: + + >>> while a != b: + ... a, b = (a + b)/2, np.sqrt(a*b) + ... print("a = %19.16f b=%19.16f" % (a, b)) + ... + a = 15.0000000000000000 b=12.0000000000000000 + a = 13.5000000000000000 b=13.4164078649987388 + a = 13.4582039324993694 b=13.4581390309909850 + a = 13.4581714817451772 b=13.4581714817060547 + a = 13.4581714817256159 b=13.4581714817256159 + + When array-like arguments are given, broadcasting applies: + + >>> a = np.array([[1.5], [3], [6]]) # a has shape (3, 1). + >>> b = np.array([6, 12, 24, 48]) # b has shape (4,). + >>> agm(a, b) + array([[ 3.36454287, 5.42363427, 9.05798751, 15.53650756], + [ 4.37037309, 6.72908574, 10.84726853, 18.11597502], + [ 6. , 8.74074619, 13.45817148, 21.69453707]]) + """) + +add_newdoc("airy", + r""" + airy(z, out=None) + + Airy functions and their derivatives. + + Parameters + ---------- + z : array_like + Real or complex argument. + out : tuple of ndarray, optional + Optional output arrays for the function values + + Returns + ------- + Ai, Aip, Bi, Bip : 4-tuple of scalar or ndarray + Airy functions Ai and Bi, and their derivatives Aip and Bip. + + See Also + -------- + airye : exponentially scaled Airy functions. + + Notes + ----- + The Airy functions Ai and Bi are two independent solutions of + + .. math:: y''(x) = x y(x). + + For real `z` in [-10, 10], the computation is carried out by calling + the Cephes [1]_ `airy` routine, which uses power series summation + for small `z` and rational minimax approximations for large `z`. + + Outside this range, the AMOS [2]_ `zairy` and `zbiry` routines are + employed. They are computed using power series for :math:`|z| < 1` and + the following relations to modified Bessel functions for larger `z` + (where :math:`t \equiv 2 z^{3/2}/3`): + + .. math:: + + Ai(z) = \frac{1}{\pi \sqrt{3}} K_{1/3}(t) + + Ai'(z) = -\frac{z}{\pi \sqrt{3}} K_{2/3}(t) + + Bi(z) = \sqrt{\frac{z}{3}} \left(I_{-1/3}(t) + I_{1/3}(t) \right) + + Bi'(z) = \frac{z}{\sqrt{3}} \left(I_{-2/3}(t) + I_{2/3}(t)\right) + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + .. [2] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions + of a Complex Argument and Nonnegative Order", + http://netlib.org/amos/ + + Examples + -------- + Compute the Airy functions on the interval [-15, 5]. + + >>> import numpy as np + >>> from scipy import special + >>> x = np.linspace(-15, 5, 201) + >>> ai, aip, bi, bip = special.airy(x) + + Plot Ai(x) and Bi(x). + + >>> import matplotlib.pyplot as plt + >>> plt.plot(x, ai, 'r', label='Ai(x)') + >>> plt.plot(x, bi, 'b--', label='Bi(x)') + >>> plt.ylim(-0.5, 1.0) + >>> plt.grid() + >>> plt.legend(loc='upper left') + >>> plt.show() + + """) + +add_newdoc("airye", + """ + airye(z, out=None) + + Exponentially scaled Airy functions and their derivatives. + + Scaling:: + + eAi = Ai * exp(2.0/3.0*z*sqrt(z)) + eAip = Aip * exp(2.0/3.0*z*sqrt(z)) + eBi = Bi * exp(-abs(2.0/3.0*(z*sqrt(z)).real)) + eBip = Bip * exp(-abs(2.0/3.0*(z*sqrt(z)).real)) + + Parameters + ---------- + z : array_like + Real or complex argument. + out : tuple of ndarray, optional + Optional output arrays for the function values + + Returns + ------- + eAi, eAip, eBi, eBip : 4-tuple of scalar or ndarray + Exponentially scaled Airy functions eAi and eBi, and their derivatives + eAip and eBip + + See Also + -------- + airy + + Notes + ----- + Wrapper for the AMOS [1]_ routines `zairy` and `zbiry`. + + References + ---------- + .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions + of a Complex Argument and Nonnegative Order", + http://netlib.org/amos/ + + Examples + -------- + We can compute exponentially scaled Airy functions and their derivatives: + + >>> import numpy as np + >>> from scipy.special import airye + >>> import matplotlib.pyplot as plt + >>> z = np.linspace(0, 50, 500) + >>> eAi, eAip, eBi, eBip = airye(z) + >>> f, ax = plt.subplots(2, 1, sharex=True) + >>> for ind, data in enumerate([[eAi, eAip, ["eAi", "eAip"]], + ... [eBi, eBip, ["eBi", "eBip"]]]): + ... ax[ind].plot(z, data[0], "-r", z, data[1], "-b") + ... ax[ind].legend(data[2]) + ... ax[ind].grid(True) + >>> plt.show() + + We can compute these using usual non-scaled Airy functions by: + + >>> from scipy.special import airy + >>> Ai, Aip, Bi, Bip = airy(z) + >>> np.allclose(eAi, Ai * np.exp(2.0 / 3.0 * z * np.sqrt(z))) + True + >>> np.allclose(eAip, Aip * np.exp(2.0 / 3.0 * z * np.sqrt(z))) + True + >>> np.allclose(eBi, Bi * np.exp(-abs(np.real(2.0 / 3.0 * z * np.sqrt(z))))) + True + >>> np.allclose(eBip, Bip * np.exp(-abs(np.real(2.0 / 3.0 * z * np.sqrt(z))))) + True + + Comparing non-scaled and exponentially scaled ones, the usual non-scaled + function quickly underflows for large values, whereas the exponentially + scaled function does not. + + >>> airy(200) + (0.0, 0.0, nan, nan) + >>> airye(200) + (0.07501041684381093, -1.0609012305109042, 0.15003188417418148, 2.1215836725571093) + + """) + +add_newdoc("bdtr", + r""" + bdtr(k, n, p, out=None) + + Binomial distribution cumulative distribution function. + + Sum of the terms 0 through `floor(k)` of the Binomial probability density. + + .. math:: + \mathrm{bdtr}(k, n, p) = + \sum_{j=0}^{\lfloor k \rfloor} {{n}\choose{j}} p^j (1-p)^{n-j} + + Parameters + ---------- + k : array_like + Number of successes (double), rounded down to the nearest integer. + n : array_like + Number of events (int). + p : array_like + Probability of success in a single event (float). + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + y : scalar or ndarray + Probability of `floor(k)` or fewer successes in `n` independent events with + success probabilities of `p`. + + Notes + ----- + The terms are not summed directly; instead the regularized incomplete beta + function is employed, according to the formula, + + .. math:: + \mathrm{bdtr}(k, n, p) = + I_{1 - p}(n - \lfloor k \rfloor, \lfloor k \rfloor + 1). + + Wrapper for the Cephes [1]_ routine `bdtr`. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + """) + +add_newdoc("bdtrc", + r""" + bdtrc(k, n, p, out=None) + + Binomial distribution survival function. + + Sum of the terms `floor(k) + 1` through `n` of the binomial probability + density, + + .. math:: + \mathrm{bdtrc}(k, n, p) = + \sum_{j=\lfloor k \rfloor +1}^n {{n}\choose{j}} p^j (1-p)^{n-j} + + Parameters + ---------- + k : array_like + Number of successes (double), rounded down to nearest integer. + n : array_like + Number of events (int) + p : array_like + Probability of success in a single event. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + y : scalar or ndarray + Probability of `floor(k) + 1` or more successes in `n` independent + events with success probabilities of `p`. + + See Also + -------- + bdtr + betainc + + Notes + ----- + The terms are not summed directly; instead the regularized incomplete beta + function is employed, according to the formula, + + .. math:: + \mathrm{bdtrc}(k, n, p) = I_{p}(\lfloor k \rfloor + 1, n - \lfloor k \rfloor). + + Wrapper for the Cephes [1]_ routine `bdtrc`. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + """) + +add_newdoc("bdtri", + r""" + bdtri(k, n, y, out=None) + + Inverse function to `bdtr` with respect to `p`. + + Finds the event probability `p` such that the sum of the terms 0 through + `k` of the binomial probability density is equal to the given cumulative + probability `y`. + + Parameters + ---------- + k : array_like + Number of successes (float), rounded down to the nearest integer. + n : array_like + Number of events (float) + y : array_like + Cumulative probability (probability of `k` or fewer successes in `n` + events). + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + p : scalar or ndarray + The event probability such that `bdtr(\lfloor k \rfloor, n, p) = y`. + + See Also + -------- + bdtr + betaincinv + + Notes + ----- + The computation is carried out using the inverse beta integral function + and the relation,:: + + 1 - p = betaincinv(n - k, k + 1, y). + + Wrapper for the Cephes [1]_ routine `bdtri`. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + """) + +add_newdoc("bdtrik", + """ + bdtrik(y, n, p, out=None) + + Inverse function to `bdtr` with respect to `k`. + + Finds the number of successes `k` such that the sum of the terms 0 through + `k` of the Binomial probability density for `n` events with probability + `p` is equal to the given cumulative probability `y`. + + Parameters + ---------- + y : array_like + Cumulative probability (probability of `k` or fewer successes in `n` + events). + n : array_like + Number of events (float). + p : array_like + Success probability (float). + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + k : scalar or ndarray + The number of successes `k` such that `bdtr(k, n, p) = y`. + + See Also + -------- + bdtr + + Notes + ----- + Formula 26.5.24 of [1]_ is used to reduce the binomial distribution to the + cumulative incomplete beta distribution. + + Computation of `k` involves a search for a value that produces the desired + value of `y`. The search relies on the monotonicity of `y` with `k`. + + Wrapper for the CDFLIB [2]_ Fortran routine `cdfbin`. + + References + ---------- + .. [1] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + .. [2] Barry Brown, James Lovato, and Kathy Russell, + CDFLIB: Library of Fortran Routines for Cumulative Distribution + Functions, Inverses, and Other Parameters. + + """) + +add_newdoc("bdtrin", + """ + bdtrin(k, y, p, out=None) + + Inverse function to `bdtr` with respect to `n`. + + Finds the number of events `n` such that the sum of the terms 0 through + `k` of the Binomial probability density for events with probability `p` is + equal to the given cumulative probability `y`. + + Parameters + ---------- + k : array_like + Number of successes (float). + y : array_like + Cumulative probability (probability of `k` or fewer successes in `n` + events). + p : array_like + Success probability (float). + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + n : scalar or ndarray + The number of events `n` such that `bdtr(k, n, p) = y`. + + See Also + -------- + bdtr + + Notes + ----- + Formula 26.5.24 of [1]_ is used to reduce the binomial distribution to the + cumulative incomplete beta distribution. + + Computation of `n` involves a search for a value that produces the desired + value of `y`. The search relies on the monotonicity of `y` with `n`. + + Wrapper for the CDFLIB [2]_ Fortran routine `cdfbin`. + + References + ---------- + .. [1] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + .. [2] Barry Brown, James Lovato, and Kathy Russell, + CDFLIB: Library of Fortran Routines for Cumulative Distribution + Functions, Inverses, and Other Parameters. + """) + +add_newdoc("btdtria", + r""" + btdtria(p, b, x, out=None) + + Inverse of `btdtr` with respect to `a`. + + This is the inverse of the beta cumulative distribution function, `btdtr`, + considered as a function of `a`, returning the value of `a` for which + `btdtr(a, b, x) = p`, or + + .. math:: + p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt + + Parameters + ---------- + p : array_like + Cumulative probability, in [0, 1]. + b : array_like + Shape parameter (`b` > 0). + x : array_like + The quantile, in [0, 1]. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + a : scalar or ndarray + The value of the shape parameter `a` such that `btdtr(a, b, x) = p`. + + See Also + -------- + btdtr : Cumulative distribution function of the beta distribution. + btdtri : Inverse with respect to `x`. + btdtrib : Inverse with respect to `b`. + + Notes + ----- + Wrapper for the CDFLIB [1]_ Fortran routine `cdfbet`. + + The cumulative distribution function `p` is computed using a routine by + DiDinato and Morris [2]_. Computation of `a` involves a search for a value + that produces the desired value of `p`. The search relies on the + monotonicity of `p` with `a`. + + References + ---------- + .. [1] Barry Brown, James Lovato, and Kathy Russell, + CDFLIB: Library of Fortran Routines for Cumulative Distribution + Functions, Inverses, and Other Parameters. + .. [2] DiDinato, A. R. and Morris, A. H., + Algorithm 708: Significant Digit Computation of the Incomplete Beta + Function Ratios. ACM Trans. Math. Softw. 18 (1993), 360-373. + + """) + +add_newdoc("btdtrib", + r""" + btdtria(a, p, x, out=None) + + Inverse of `btdtr` with respect to `b`. + + This is the inverse of the beta cumulative distribution function, `btdtr`, + considered as a function of `b`, returning the value of `b` for which + `btdtr(a, b, x) = p`, or + + .. math:: + p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt + + Parameters + ---------- + a : array_like + Shape parameter (`a` > 0). + p : array_like + Cumulative probability, in [0, 1]. + x : array_like + The quantile, in [0, 1]. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + b : scalar or ndarray + The value of the shape parameter `b` such that `btdtr(a, b, x) = p`. + + See Also + -------- + btdtr : Cumulative distribution function of the beta distribution. + btdtri : Inverse with respect to `x`. + btdtria : Inverse with respect to `a`. + + Notes + ----- + Wrapper for the CDFLIB [1]_ Fortran routine `cdfbet`. + + The cumulative distribution function `p` is computed using a routine by + DiDinato and Morris [2]_. Computation of `b` involves a search for a value + that produces the desired value of `p`. The search relies on the + monotonicity of `p` with `b`. + + References + ---------- + .. [1] Barry Brown, James Lovato, and Kathy Russell, + CDFLIB: Library of Fortran Routines for Cumulative Distribution + Functions, Inverses, and Other Parameters. + .. [2] DiDinato, A. R. and Morris, A. H., + Algorithm 708: Significant Digit Computation of the Incomplete Beta + Function Ratios. ACM Trans. Math. Softw. 18 (1993), 360-373. + + + """) + +add_newdoc("besselpoly", + r""" + besselpoly(a, lmb, nu, out=None) + + Weighted integral of the Bessel function of the first kind. + + Computes + + .. math:: + + \int_0^1 x^\lambda J_\nu(2 a x) \, dx + + where :math:`J_\nu` is a Bessel function and :math:`\lambda=lmb`, + :math:`\nu=nu`. + + Parameters + ---------- + a : array_like + Scale factor inside the Bessel function. + lmb : array_like + Power of `x` + nu : array_like + Order of the Bessel function. + out : ndarray, optional + Optional output array for the function results. + + Returns + ------- + scalar or ndarray + Value of the integral. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + Examples + -------- + Evaluate the function for one parameter set. + + >>> from scipy.special import besselpoly + >>> besselpoly(1, 1, 1) + 0.24449718372863877 + + Evaluate the function for different scale factors. + + >>> import numpy as np + >>> factors = np.array([0., 3., 6.]) + >>> besselpoly(factors, 1, 1) + array([ 0. , -0.00549029, 0.00140174]) + + Plot the function for varying powers, orders and scales. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> powers = np.linspace(0, 10, 100) + >>> orders = [1, 2, 3] + >>> scales = [1, 2] + >>> all_combinations = [(order, scale) for order in orders + ... for scale in scales] + >>> for order, scale in all_combinations: + ... ax.plot(powers, besselpoly(scale, powers, order), + ... label=rf"$\nu={order}, a={scale}$") + >>> ax.legend() + >>> ax.set_xlabel(r"$\lambda$") + >>> ax.set_ylabel(r"$\int_0^1 x^{\lambda} J_{\nu}(2ax)\,dx$") + >>> plt.show() + """) + +add_newdoc("beta", + r""" + beta(a, b, out=None) + + Beta function. + + This function is defined in [1]_ as + + .. math:: + + B(a, b) = \int_0^1 t^{a-1}(1-t)^{b-1}dt + = \frac{\Gamma(a)\Gamma(b)}{\Gamma(a+b)}, + + where :math:`\Gamma` is the gamma function. + + Parameters + ---------- + a, b : array_like + Real-valued arguments + out : ndarray, optional + Optional output array for the function result + + Returns + ------- + scalar or ndarray + Value of the beta function + + See Also + -------- + gamma : the gamma function + betainc : the regularized incomplete beta function + betaln : the natural logarithm of the absolute + value of the beta function + + References + ---------- + .. [1] NIST Digital Library of Mathematical Functions, + Eq. 5.12.1. https://dlmf.nist.gov/5.12 + + Examples + -------- + >>> import scipy.special as sc + + The beta function relates to the gamma function by the + definition given above: + + >>> sc.beta(2, 3) + 0.08333333333333333 + >>> sc.gamma(2)*sc.gamma(3)/sc.gamma(2 + 3) + 0.08333333333333333 + + As this relationship demonstrates, the beta function + is symmetric: + + >>> sc.beta(1.7, 2.4) + 0.16567527689031739 + >>> sc.beta(2.4, 1.7) + 0.16567527689031739 + + This function satisfies :math:`B(1, b) = 1/b`: + + >>> sc.beta(1, 4) + 0.25 + + """) + +add_newdoc( + "betainc", + r""" + betainc(a, b, x, out=None) + + Regularized incomplete beta function. + + Computes the regularized incomplete beta function, defined as [1]_: + + .. math:: + + I_x(a, b) = \frac{\Gamma(a+b)}{\Gamma(a)\Gamma(b)} \int_0^x + t^{a-1}(1-t)^{b-1}dt, + + for :math:`0 \leq x \leq 1`. + + This function is the cumulative distribution function for the beta + distribution; its range is [0, 1]. + + Parameters + ---------- + a, b : array_like + Positive, real-valued parameters + x : array_like + Real-valued such that :math:`0 \leq x \leq 1`, + the upper limit of integration + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + Value of the regularized incomplete beta function + + See Also + -------- + beta : beta function + betaincinv : inverse of the regularized incomplete beta function + betaincc : complement of the regularized incomplete beta function + scipy.stats.beta : beta distribution + + Notes + ----- + The term *regularized* in the name of this function refers to the + scaling of the function by the gamma function terms shown in the + formula. When not qualified as *regularized*, the name *incomplete + beta function* often refers to just the integral expression, + without the gamma terms. One can use the function `beta` from + `scipy.special` to get this "nonregularized" incomplete beta + function by multiplying the result of ``betainc(a, b, x)`` by + ``beta(a, b)``. + + References + ---------- + .. [1] NIST Digital Library of Mathematical Functions + https://dlmf.nist.gov/8.17 + + Examples + -------- + + Let :math:`B(a, b)` be the `beta` function. + + >>> import scipy.special as sc + + The coefficient in terms of `gamma` is equal to + :math:`1/B(a, b)`. Also, when :math:`x=1` + the integral is equal to :math:`B(a, b)`. + Therefore, :math:`I_{x=1}(a, b) = 1` for any :math:`a, b`. + + >>> sc.betainc(0.2, 3.5, 1.0) + 1.0 + + It satisfies + :math:`I_x(a, b) = x^a F(a, 1-b, a+1, x)/ (aB(a, b))`, + where :math:`F` is the hypergeometric function `hyp2f1`: + + >>> a, b, x = 1.4, 3.1, 0.5 + >>> x**a * sc.hyp2f1(a, 1 - b, a + 1, x)/(a * sc.beta(a, b)) + 0.8148904036225295 + >>> sc.betainc(a, b, x) + 0.8148904036225296 + + This functions satisfies the relationship + :math:`I_x(a, b) = 1 - I_{1-x}(b, a)`: + + >>> sc.betainc(2.2, 3.1, 0.4) + 0.49339638807619446 + >>> 1 - sc.betainc(3.1, 2.2, 1 - 0.4) + 0.49339638807619446 + + """) + + +add_newdoc( + "betaincc", + r""" + betaincc(a, b, x, out=None) + + Complement of the regularized incomplete beta function. + + Computes the complement of the regularized incomplete beta function, + defined as [1]_: + + .. math:: + + \bar{I}_x(a, b) = 1 - I_x(a, b) + = 1 - \frac{\Gamma(a+b)}{\Gamma(a)\Gamma(b)} \int_0^x + t^{a-1}(1-t)^{b-1}dt, + + for :math:`0 \leq x \leq 1`. + + Parameters + ---------- + a, b : array_like + Positive, real-valued parameters + x : array_like + Real-valued such that :math:`0 \leq x \leq 1`, + the upper limit of integration + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + Value of the regularized incomplete beta function + + See Also + -------- + betainc : regularized incomplete beta function + betaincinv : inverse of the regularized incomplete beta function + betainccinv : + inverse of the complement of the regularized incomplete beta function + beta : beta function + scipy.stats.beta : beta distribution + + Notes + ----- + .. versionadded:: 1.11.0 + + References + ---------- + .. [1] NIST Digital Library of Mathematical Functions + https://dlmf.nist.gov/8.17 + + Examples + -------- + >>> from scipy.special import betaincc, betainc + + The naive calculation ``1 - betainc(a, b, x)`` loses precision when + the values of ``betainc(a, b, x)`` are close to 1: + + >>> 1 - betainc(0.5, 8, [0.9, 0.99, 0.999]) + array([2.0574632e-09, 0.0000000e+00, 0.0000000e+00]) + + By using ``betaincc``, we get the correct values: + + >>> betaincc(0.5, 8, [0.9, 0.99, 0.999]) + array([2.05746321e-09, 1.97259354e-17, 1.96467954e-25]) + + """) + +add_newdoc( + "betaincinv", + r""" + betaincinv(a, b, y, out=None) + + Inverse of the regularized incomplete beta function. + + Computes :math:`x` such that: + + .. math:: + + y = I_x(a, b) = \frac{\Gamma(a+b)}{\Gamma(a)\Gamma(b)} + \int_0^x t^{a-1}(1-t)^{b-1}dt, + + where :math:`I_x` is the normalized incomplete beta function `betainc` + and :math:`\Gamma` is the `gamma` function [1]_. + + Parameters + ---------- + a, b : array_like + Positive, real-valued parameters + y : array_like + Real-valued input + out : ndarray, optional + Optional output array for function values + + Returns + ------- + scalar or ndarray + Value of the inverse of the regularized incomplete beta function + + See Also + -------- + betainc : regularized incomplete beta function + gamma : gamma function + + References + ---------- + .. [1] NIST Digital Library of Mathematical Functions + https://dlmf.nist.gov/8.17 + + Examples + -------- + >>> import scipy.special as sc + + This function is the inverse of `betainc` for fixed + values of :math:`a` and :math:`b`. + + >>> a, b = 1.2, 3.1 + >>> y = sc.betainc(a, b, 0.2) + >>> sc.betaincinv(a, b, y) + 0.2 + >>> + >>> a, b = 7.5, 0.4 + >>> x = sc.betaincinv(a, b, 0.5) + >>> sc.betainc(a, b, x) + 0.5 + + """) + + +add_newdoc( + "betainccinv", + r""" + betainccinv(a, b, y, out=None) + + Inverse of the complemented regularized incomplete beta function. + + Computes :math:`x` such that: + + .. math:: + + y = 1 - I_x(a, b) = 1 - \frac{\Gamma(a+b)}{\Gamma(a)\Gamma(b)} + \int_0^x t^{a-1}(1-t)^{b-1}dt, + + where :math:`I_x` is the normalized incomplete beta function `betainc` + and :math:`\Gamma` is the `gamma` function [1]_. + + Parameters + ---------- + a, b : array_like + Positive, real-valued parameters + y : array_like + Real-valued input + out : ndarray, optional + Optional output array for function values + + Returns + ------- + scalar or ndarray + Value of the inverse of the regularized incomplete beta function + + See Also + -------- + betainc : regularized incomplete beta function + betaincc : complement of the regularized incomplete beta function + + Notes + ----- + .. versionadded:: 1.11.0 + + References + ---------- + .. [1] NIST Digital Library of Mathematical Functions + https://dlmf.nist.gov/8.17 + + Examples + -------- + >>> from scipy.special import betainccinv, betaincc + + This function is the inverse of `betaincc` for fixed + values of :math:`a` and :math:`b`. + + >>> a, b = 1.2, 3.1 + >>> y = betaincc(a, b, 0.2) + >>> betainccinv(a, b, y) + 0.2 + + >>> a, b = 7, 2.5 + >>> x = betainccinv(a, b, 0.875) + >>> betaincc(a, b, x) + 0.875 + + """) + +add_newdoc("betaln", + """ + betaln(a, b, out=None) + + Natural logarithm of absolute value of beta function. + + Computes ``ln(abs(beta(a, b)))``. + + Parameters + ---------- + a, b : array_like + Positive, real-valued parameters + out : ndarray, optional + Optional output array for function values + + Returns + ------- + scalar or ndarray + Value of the betaln function + + See Also + -------- + gamma : the gamma function + betainc : the regularized incomplete beta function + beta : the beta function + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import betaln, beta + + Verify that, for moderate values of ``a`` and ``b``, ``betaln(a, b)`` + is the same as ``log(beta(a, b))``: + + >>> betaln(3, 4) + -4.0943445622221 + + >>> np.log(beta(3, 4)) + -4.0943445622221 + + In the following ``beta(a, b)`` underflows to 0, so we can't compute + the logarithm of the actual value. + + >>> a = 400 + >>> b = 900 + >>> beta(a, b) + 0.0 + + We can compute the logarithm of ``beta(a, b)`` by using `betaln`: + + >>> betaln(a, b) + -804.3069951764146 + + """) + +add_newdoc("boxcox", + """ + boxcox(x, lmbda, out=None) + + Compute the Box-Cox transformation. + + The Box-Cox transformation is:: + + y = (x**lmbda - 1) / lmbda if lmbda != 0 + log(x) if lmbda == 0 + + Returns `nan` if ``x < 0``. + Returns `-inf` if ``x == 0`` and ``lmbda < 0``. + + Parameters + ---------- + x : array_like + Data to be transformed. + lmbda : array_like + Power parameter of the Box-Cox transform. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + y : scalar or ndarray + Transformed data. + + Notes + ----- + + .. versionadded:: 0.14.0 + + Examples + -------- + >>> from scipy.special import boxcox + >>> boxcox([1, 4, 10], 2.5) + array([ 0. , 12.4 , 126.09110641]) + >>> boxcox(2, [0, 1, 2]) + array([ 0.69314718, 1. , 1.5 ]) + """) + +add_newdoc("boxcox1p", + """ + boxcox1p(x, lmbda, out=None) + + Compute the Box-Cox transformation of 1 + `x`. + + The Box-Cox transformation computed by `boxcox1p` is:: + + y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0 + log(1+x) if lmbda == 0 + + Returns `nan` if ``x < -1``. + Returns `-inf` if ``x == -1`` and ``lmbda < 0``. + + Parameters + ---------- + x : array_like + Data to be transformed. + lmbda : array_like + Power parameter of the Box-Cox transform. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + y : scalar or ndarray + Transformed data. + + Notes + ----- + + .. versionadded:: 0.14.0 + + Examples + -------- + >>> from scipy.special import boxcox1p + >>> boxcox1p(1e-4, [0, 0.5, 1]) + array([ 9.99950003e-05, 9.99975001e-05, 1.00000000e-04]) + >>> boxcox1p([0.01, 0.1], 0.25) + array([ 0.00996272, 0.09645476]) + """) + +add_newdoc("inv_boxcox", + """ + inv_boxcox(y, lmbda, out=None) + + Compute the inverse of the Box-Cox transformation. + + Find ``x`` such that:: + + y = (x**lmbda - 1) / lmbda if lmbda != 0 + log(x) if lmbda == 0 + + Parameters + ---------- + y : array_like + Data to be transformed. + lmbda : array_like + Power parameter of the Box-Cox transform. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + x : scalar or ndarray + Transformed data. + + Notes + ----- + + .. versionadded:: 0.16.0 + + Examples + -------- + >>> from scipy.special import boxcox, inv_boxcox + >>> y = boxcox([1, 4, 10], 2.5) + >>> inv_boxcox(y, 2.5) + array([1., 4., 10.]) + """) + +add_newdoc("inv_boxcox1p", + """ + inv_boxcox1p(y, lmbda, out=None) + + Compute the inverse of the Box-Cox transformation. + + Find ``x`` such that:: + + y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0 + log(1+x) if lmbda == 0 + + Parameters + ---------- + y : array_like + Data to be transformed. + lmbda : array_like + Power parameter of the Box-Cox transform. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + x : scalar or ndarray + Transformed data. + + Notes + ----- + + .. versionadded:: 0.16.0 + + Examples + -------- + >>> from scipy.special import boxcox1p, inv_boxcox1p + >>> y = boxcox1p([1, 4, 10], 2.5) + >>> inv_boxcox1p(y, 2.5) + array([1., 4., 10.]) + """) + +add_newdoc("btdtr", + r""" + btdtr(a, b, x, out=None) + + Cumulative distribution function of the beta distribution. + + Returns the integral from zero to `x` of the beta probability density + function, + + .. math:: + I = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt + + where :math:`\Gamma` is the gamma function. + + .. deprecated:: 1.12.0 + This function is deprecated and will be removed from SciPy 1.14.0. + Use `scipy.special.betainc` instead. + + Parameters + ---------- + a : array_like + Shape parameter (a > 0). + b : array_like + Shape parameter (b > 0). + x : array_like + Upper limit of integration, in [0, 1]. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + I : scalar or ndarray + Cumulative distribution function of the beta distribution with + parameters `a` and `b` at `x`. + + See Also + -------- + betainc + + Notes + ----- + This function is identical to the incomplete beta integral function + `betainc`. + + Wrapper for the Cephes [1]_ routine `btdtr`. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + """) + +add_newdoc("btdtri", + r""" + btdtri(a, b, p, out=None) + + The `p`-th quantile of the beta distribution. + + This function is the inverse of the beta cumulative distribution function, + `btdtr`, returning the value of `x` for which `btdtr(a, b, x) = p`, or + + .. math:: + p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt + + .. deprecated:: 1.12.0 + This function is deprecated and will be removed from SciPy 1.14.0. + Use `scipy.special.betaincinv` instead. + + Parameters + ---------- + a : array_like + Shape parameter (`a` > 0). + b : array_like + Shape parameter (`b` > 0). + p : array_like + Cumulative probability, in [0, 1]. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + x : scalar or ndarray + The quantile corresponding to `p`. + + See Also + -------- + betaincinv + btdtr + + Notes + ----- + The value of `x` is found by interval halving or Newton iterations. + + Wrapper for the Cephes [1]_ routine `incbi`, which solves the equivalent + problem of finding the inverse of the incomplete beta integral. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + """) + +add_newdoc("cbrt", + """ + cbrt(x, out=None) + + Element-wise cube root of `x`. + + Parameters + ---------- + x : array_like + `x` must contain real numbers. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + The cube root of each value in `x`. + + Examples + -------- + >>> from scipy.special import cbrt + + >>> cbrt(8) + 2.0 + >>> cbrt([-8, -3, 0.125, 1.331]) + array([-2. , -1.44224957, 0.5 , 1.1 ]) + + """) + +add_newdoc("chdtr", + r""" + chdtr(v, x, out=None) + + Chi square cumulative distribution function. + + Returns the area under the left tail (from 0 to `x`) of the Chi + square probability density function with `v` degrees of freedom: + + .. math:: + + \frac{1}{2^{v/2} \Gamma(v/2)} \int_0^x t^{v/2 - 1} e^{-t/2} dt + + Here :math:`\Gamma` is the Gamma function; see `gamma`. This + integral can be expressed in terms of the regularized lower + incomplete gamma function `gammainc` as + ``gammainc(v / 2, x / 2)``. [1]_ + + Parameters + ---------- + v : array_like + Degrees of freedom. + x : array_like + Upper bound of the integral. + out : ndarray, optional + Optional output array for the function results. + + Returns + ------- + scalar or ndarray + Values of the cumulative distribution function. + + See Also + -------- + chdtrc, chdtri, chdtriv, gammainc + + References + ---------- + .. [1] Chi-Square distribution, + https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm + + Examples + -------- + >>> import numpy as np + >>> import scipy.special as sc + + It can be expressed in terms of the regularized lower incomplete + gamma function. + + >>> v = 1 + >>> x = np.arange(4) + >>> sc.chdtr(v, x) + array([0. , 0.68268949, 0.84270079, 0.91673548]) + >>> sc.gammainc(v / 2, x / 2) + array([0. , 0.68268949, 0.84270079, 0.91673548]) + + """) + +add_newdoc("chdtrc", + r""" + chdtrc(v, x, out=None) + + Chi square survival function. + + Returns the area under the right hand tail (from `x` to infinity) + of the Chi square probability density function with `v` degrees of + freedom: + + .. math:: + + \frac{1}{2^{v/2} \Gamma(v/2)} \int_x^\infty t^{v/2 - 1} e^{-t/2} dt + + Here :math:`\Gamma` is the Gamma function; see `gamma`. This + integral can be expressed in terms of the regularized upper + incomplete gamma function `gammaincc` as + ``gammaincc(v / 2, x / 2)``. [1]_ + + Parameters + ---------- + v : array_like + Degrees of freedom. + x : array_like + Lower bound of the integral. + out : ndarray, optional + Optional output array for the function results. + + Returns + ------- + scalar or ndarray + Values of the survival function. + + See Also + -------- + chdtr, chdtri, chdtriv, gammaincc + + References + ---------- + .. [1] Chi-Square distribution, + https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm + + Examples + -------- + >>> import numpy as np + >>> import scipy.special as sc + + It can be expressed in terms of the regularized upper incomplete + gamma function. + + >>> v = 1 + >>> x = np.arange(4) + >>> sc.chdtrc(v, x) + array([1. , 0.31731051, 0.15729921, 0.08326452]) + >>> sc.gammaincc(v / 2, x / 2) + array([1. , 0.31731051, 0.15729921, 0.08326452]) + + """) + +add_newdoc("chdtri", + """ + chdtri(v, p, out=None) + + Inverse to `chdtrc` with respect to `x`. + + Returns `x` such that ``chdtrc(v, x) == p``. + + Parameters + ---------- + v : array_like + Degrees of freedom. + p : array_like + Probability. + out : ndarray, optional + Optional output array for the function results. + + Returns + ------- + x : scalar or ndarray + Value so that the probability a Chi square random variable + with `v` degrees of freedom is greater than `x` equals `p`. + + See Also + -------- + chdtrc, chdtr, chdtriv + + References + ---------- + .. [1] Chi-Square distribution, + https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm + + Examples + -------- + >>> import scipy.special as sc + + It inverts `chdtrc`. + + >>> v, p = 1, 0.3 + >>> sc.chdtrc(v, sc.chdtri(v, p)) + 0.3 + >>> x = 1 + >>> sc.chdtri(v, sc.chdtrc(v, x)) + 1.0 + + """) + +add_newdoc("chdtriv", + """ + chdtriv(p, x, out=None) + + Inverse to `chdtr` with respect to `v`. + + Returns `v` such that ``chdtr(v, x) == p``. + + Parameters + ---------- + p : array_like + Probability that the Chi square random variable is less than + or equal to `x`. + x : array_like + Nonnegative input. + out : ndarray, optional + Optional output array for the function results. + + Returns + ------- + scalar or ndarray + Degrees of freedom. + + See Also + -------- + chdtr, chdtrc, chdtri + + References + ---------- + .. [1] Chi-Square distribution, + https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm + + Examples + -------- + >>> import scipy.special as sc + + It inverts `chdtr`. + + >>> p, x = 0.5, 1 + >>> sc.chdtr(sc.chdtriv(p, x), x) + 0.5000000000202172 + >>> v = 1 + >>> sc.chdtriv(sc.chdtr(v, x), v) + 1.0000000000000013 + + """) + +add_newdoc("chndtr", + r""" + chndtr(x, df, nc, out=None) + + Non-central chi square cumulative distribution function + + The cumulative distribution function is given by: + + .. math:: + + P(\chi^{\prime 2} \vert \nu, \lambda) =\sum_{j=0}^{\infty} + e^{-\lambda /2} + \frac{(\lambda /2)^j}{j!} P(\chi^{\prime 2} \vert \nu + 2j), + + where :math:`\nu > 0` is the degrees of freedom (``df``) and + :math:`\lambda \geq 0` is the non-centrality parameter (``nc``). + + Parameters + ---------- + x : array_like + Upper bound of the integral; must satisfy ``x >= 0`` + df : array_like + Degrees of freedom; must satisfy ``df > 0`` + nc : array_like + Non-centrality parameter; must satisfy ``nc >= 0`` + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + x : scalar or ndarray + Value of the non-central chi square cumulative distribution function. + + See Also + -------- + chndtrix, chndtridf, chndtrinc + + """) + +add_newdoc("chndtrix", + """ + chndtrix(p, df, nc, out=None) + + Inverse to `chndtr` vs `x` + + Calculated using a search to find a value for `x` that produces the + desired value of `p`. + + Parameters + ---------- + p : array_like + Probability; must satisfy ``0 <= p < 1`` + df : array_like + Degrees of freedom; must satisfy ``df > 0`` + nc : array_like + Non-centrality parameter; must satisfy ``nc >= 0`` + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + x : scalar or ndarray + Value so that the probability a non-central Chi square random variable + with `df` degrees of freedom and non-centrality, `nc`, is greater than + `x` equals `p`. + + See Also + -------- + chndtr, chndtridf, chndtrinc + + """) + +add_newdoc("chndtridf", + """ + chndtridf(x, p, nc, out=None) + + Inverse to `chndtr` vs `df` + + Calculated using a search to find a value for `df` that produces the + desired value of `p`. + + Parameters + ---------- + x : array_like + Upper bound of the integral; must satisfy ``x >= 0`` + p : array_like + Probability; must satisfy ``0 <= p < 1`` + nc : array_like + Non-centrality parameter; must satisfy ``nc >= 0`` + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + df : scalar or ndarray + Degrees of freedom + + See Also + -------- + chndtr, chndtrix, chndtrinc + + """) + +add_newdoc("chndtrinc", + """ + chndtrinc(x, df, p, out=None) + + Inverse to `chndtr` vs `nc` + + Calculated using a search to find a value for `df` that produces the + desired value of `p`. + + Parameters + ---------- + x : array_like + Upper bound of the integral; must satisfy ``x >= 0`` + df : array_like + Degrees of freedom; must satisfy ``df > 0`` + p : array_like + Probability; must satisfy ``0 <= p < 1`` + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + nc : scalar or ndarray + Non-centrality + + See Also + -------- + chndtr, chndtrix, chndtrinc + + """) + +add_newdoc("cosdg", + """ + cosdg(x, out=None) + + Cosine of the angle `x` given in degrees. + + Parameters + ---------- + x : array_like + Angle, given in degrees. + out : ndarray, optional + Optional output array for the function results. + + Returns + ------- + scalar or ndarray + Cosine of the input. + + See Also + -------- + sindg, tandg, cotdg + + Examples + -------- + >>> import numpy as np + >>> import scipy.special as sc + + It is more accurate than using cosine directly. + + >>> x = 90 + 180 * np.arange(3) + >>> sc.cosdg(x) + array([-0., 0., -0.]) + >>> np.cos(x * np.pi / 180) + array([ 6.1232340e-17, -1.8369702e-16, 3.0616170e-16]) + + """) + +add_newdoc("cosm1", + """ + cosm1(x, out=None) + + cos(x) - 1 for use when `x` is near zero. + + Parameters + ---------- + x : array_like + Real valued argument. + out : ndarray, optional + Optional output array for the function results. + + Returns + ------- + scalar or ndarray + Values of ``cos(x) - 1``. + + See Also + -------- + expm1, log1p + + Examples + -------- + >>> import numpy as np + >>> import scipy.special as sc + + It is more accurate than computing ``cos(x) - 1`` directly for + ``x`` around 0. + + >>> x = 1e-30 + >>> np.cos(x) - 1 + 0.0 + >>> sc.cosm1(x) + -5.0000000000000005e-61 + + """) + +add_newdoc("cotdg", + """ + cotdg(x, out=None) + + Cotangent of the angle `x` given in degrees. + + Parameters + ---------- + x : array_like + Angle, given in degrees. + out : ndarray, optional + Optional output array for the function results. + + Returns + ------- + scalar or ndarray + Cotangent at the input. + + See Also + -------- + sindg, cosdg, tandg + + Examples + -------- + >>> import numpy as np + >>> import scipy.special as sc + + It is more accurate than using cotangent directly. + + >>> x = 90 + 180 * np.arange(3) + >>> sc.cotdg(x) + array([0., 0., 0.]) + >>> 1 / np.tan(x * np.pi / 180) + array([6.1232340e-17, 1.8369702e-16, 3.0616170e-16]) + + """) + +add_newdoc("dawsn", + """ + dawsn(x, out=None) + + Dawson's integral. + + Computes:: + + exp(-x**2) * integral(exp(t**2), t=0..x). + + Parameters + ---------- + x : array_like + Function parameter. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + y : scalar or ndarray + Value of the integral. + + See Also + -------- + wofz, erf, erfc, erfcx, erfi + + References + ---------- + .. [1] Steven G. Johnson, Faddeeva W function implementation. + http://ab-initio.mit.edu/Faddeeva + + Examples + -------- + >>> import numpy as np + >>> from scipy import special + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(-15, 15, num=1000) + >>> plt.plot(x, special.dawsn(x)) + >>> plt.xlabel('$x$') + >>> plt.ylabel('$dawsn(x)$') + >>> plt.show() + + """) + +add_newdoc("ellipe", + r""" + ellipe(m, out=None) + + Complete elliptic integral of the second kind + + This function is defined as + + .. math:: E(m) = \int_0^{\pi/2} [1 - m \sin(t)^2]^{1/2} dt + + Parameters + ---------- + m : array_like + Defines the parameter of the elliptic integral. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + E : scalar or ndarray + Value of the elliptic integral. + + See Also + -------- + ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1 + ellipk : Complete elliptic integral of the first kind + ellipkinc : Incomplete elliptic integral of the first kind + ellipeinc : Incomplete elliptic integral of the second kind + elliprd : Symmetric elliptic integral of the second kind. + elliprg : Completely-symmetric elliptic integral of the second kind. + + Notes + ----- + Wrapper for the Cephes [1]_ routine `ellpe`. + + For `m > 0` the computation uses the approximation, + + .. math:: E(m) \approx P(1-m) - (1-m) \log(1-m) Q(1-m), + + where :math:`P` and :math:`Q` are tenth-order polynomials. For + `m < 0`, the relation + + .. math:: E(m) = E(m/(m - 1)) \sqrt(1-m) + + is used. + + The parameterization in terms of :math:`m` follows that of section + 17.2 in [2]_. Other parameterizations in terms of the + complementary parameter :math:`1 - m`, modular angle + :math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also + used, so be careful that you choose the correct parameter. + + The Legendre E integral is related to Carlson's symmetric R_D or R_G + functions in multiple ways [3]_. For example, + + .. math:: E(m) = 2 R_G(0, 1-k^2, 1) . + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + .. [2] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + .. [3] NIST Digital Library of Mathematical + Functions. http://dlmf.nist.gov/, Release 1.0.28 of + 2020-09-15. See Sec. 19.25(i) https://dlmf.nist.gov/19.25#i + + Examples + -------- + This function is used in finding the circumference of an + ellipse with semi-major axis `a` and semi-minor axis `b`. + + >>> import numpy as np + >>> from scipy import special + + >>> a = 3.5 + >>> b = 2.1 + >>> e_sq = 1.0 - b**2/a**2 # eccentricity squared + + Then the circumference is found using the following: + + >>> C = 4*a*special.ellipe(e_sq) # circumference formula + >>> C + 17.868899204378693 + + When `a` and `b` are the same (meaning eccentricity is 0), + this reduces to the circumference of a circle. + + >>> 4*a*special.ellipe(0.0) # formula for ellipse with a = b + 21.991148575128552 + >>> 2*np.pi*a # formula for circle of radius a + 21.991148575128552 + + """) + +add_newdoc("ellipeinc", + r""" + ellipeinc(phi, m, out=None) + + Incomplete elliptic integral of the second kind + + This function is defined as + + .. math:: E(\phi, m) = \int_0^{\phi} [1 - m \sin(t)^2]^{1/2} dt + + Parameters + ---------- + phi : array_like + amplitude of the elliptic integral. + m : array_like + parameter of the elliptic integral. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + E : scalar or ndarray + Value of the elliptic integral. + + See Also + -------- + ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1 + ellipk : Complete elliptic integral of the first kind + ellipkinc : Incomplete elliptic integral of the first kind + ellipe : Complete elliptic integral of the second kind + elliprd : Symmetric elliptic integral of the second kind. + elliprf : Completely-symmetric elliptic integral of the first kind. + elliprg : Completely-symmetric elliptic integral of the second kind. + + Notes + ----- + Wrapper for the Cephes [1]_ routine `ellie`. + + Computation uses arithmetic-geometric means algorithm. + + The parameterization in terms of :math:`m` follows that of section + 17.2 in [2]_. Other parameterizations in terms of the + complementary parameter :math:`1 - m`, modular angle + :math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also + used, so be careful that you choose the correct parameter. + + The Legendre E incomplete integral can be related to combinations + of Carlson's symmetric integrals R_D, R_F, and R_G in multiple + ways [3]_. For example, with :math:`c = \csc^2\phi`, + + .. math:: + E(\phi, m) = R_F(c-1, c-k^2, c) + - \frac{1}{3} k^2 R_D(c-1, c-k^2, c) . + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + .. [2] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + .. [3] NIST Digital Library of Mathematical + Functions. http://dlmf.nist.gov/, Release 1.0.28 of + 2020-09-15. See Sec. 19.25(i) https://dlmf.nist.gov/19.25#i + """) + +add_newdoc("ellipj", + """ + ellipj(u, m, out=None) + + Jacobian elliptic functions + + Calculates the Jacobian elliptic functions of parameter `m` between + 0 and 1, and real argument `u`. + + Parameters + ---------- + m : array_like + Parameter. + u : array_like + Argument. + out : tuple of ndarray, optional + Optional output arrays for the function values + + Returns + ------- + sn, cn, dn, ph : 4-tuple of scalar or ndarray + The returned functions:: + + sn(u|m), cn(u|m), dn(u|m) + + The value `ph` is such that if `u = ellipkinc(ph, m)`, + then `sn(u|m) = sin(ph)` and `cn(u|m) = cos(ph)`. + + See Also + -------- + ellipk : Complete elliptic integral of the first kind + ellipkinc : Incomplete elliptic integral of the first kind + + Notes + ----- + Wrapper for the Cephes [1]_ routine `ellpj`. + + These functions are periodic, with quarter-period on the real axis + equal to the complete elliptic integral `ellipk(m)`. + + Relation to incomplete elliptic integral: If `u = ellipkinc(phi,m)`, then + `sn(u|m) = sin(phi)`, and `cn(u|m) = cos(phi)`. The `phi` is called + the amplitude of `u`. + + Computation is by means of the arithmetic-geometric mean algorithm, + except when `m` is within 1e-9 of 0 or 1. In the latter case with `m` + close to 1, the approximation applies only for `phi < pi/2`. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + """) + +add_newdoc("ellipkm1", + """ + ellipkm1(p, out=None) + + Complete elliptic integral of the first kind around `m` = 1 + + This function is defined as + + .. math:: K(p) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{-1/2} dt + + where `m = 1 - p`. + + Parameters + ---------- + p : array_like + Defines the parameter of the elliptic integral as `m = 1 - p`. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + K : scalar or ndarray + Value of the elliptic integral. + + See Also + -------- + ellipk : Complete elliptic integral of the first kind + ellipkinc : Incomplete elliptic integral of the first kind + ellipe : Complete elliptic integral of the second kind + ellipeinc : Incomplete elliptic integral of the second kind + elliprf : Completely-symmetric elliptic integral of the first kind. + + Notes + ----- + Wrapper for the Cephes [1]_ routine `ellpk`. + + For `p <= 1`, computation uses the approximation, + + .. math:: K(p) \\approx P(p) - \\log(p) Q(p), + + where :math:`P` and :math:`Q` are tenth-order polynomials. The + argument `p` is used internally rather than `m` so that the logarithmic + singularity at `m = 1` will be shifted to the origin; this preserves + maximum accuracy. For `p > 1`, the identity + + .. math:: K(p) = K(1/p)/\\sqrt(p) + + is used. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + """) + +add_newdoc("ellipk", + r""" + ellipk(m, out=None) + + Complete elliptic integral of the first kind. + + This function is defined as + + .. math:: K(m) = \int_0^{\pi/2} [1 - m \sin(t)^2]^{-1/2} dt + + Parameters + ---------- + m : array_like + The parameter of the elliptic integral. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + K : scalar or ndarray + Value of the elliptic integral. + + See Also + -------- + ellipkm1 : Complete elliptic integral of the first kind around m = 1 + ellipkinc : Incomplete elliptic integral of the first kind + ellipe : Complete elliptic integral of the second kind + ellipeinc : Incomplete elliptic integral of the second kind + elliprf : Completely-symmetric elliptic integral of the first kind. + + Notes + ----- + For more precision around point m = 1, use `ellipkm1`, which this + function calls. + + The parameterization in terms of :math:`m` follows that of section + 17.2 in [1]_. Other parameterizations in terms of the + complementary parameter :math:`1 - m`, modular angle + :math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also + used, so be careful that you choose the correct parameter. + + The Legendre K integral is related to Carlson's symmetric R_F + function by [2]_: + + .. math:: K(m) = R_F(0, 1-k^2, 1) . + + References + ---------- + .. [1] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + .. [2] NIST Digital Library of Mathematical + Functions. http://dlmf.nist.gov/, Release 1.0.28 of + 2020-09-15. See Sec. 19.25(i) https://dlmf.nist.gov/19.25#i + + """) + +add_newdoc("ellipkinc", + r""" + ellipkinc(phi, m, out=None) + + Incomplete elliptic integral of the first kind + + This function is defined as + + .. math:: K(\phi, m) = \int_0^{\phi} [1 - m \sin(t)^2]^{-1/2} dt + + This function is also called :math:`F(\phi, m)`. + + Parameters + ---------- + phi : array_like + amplitude of the elliptic integral + m : array_like + parameter of the elliptic integral + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + K : scalar or ndarray + Value of the elliptic integral + + See Also + -------- + ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1 + ellipk : Complete elliptic integral of the first kind + ellipe : Complete elliptic integral of the second kind + ellipeinc : Incomplete elliptic integral of the second kind + elliprf : Completely-symmetric elliptic integral of the first kind. + + Notes + ----- + Wrapper for the Cephes [1]_ routine `ellik`. The computation is + carried out using the arithmetic-geometric mean algorithm. + + The parameterization in terms of :math:`m` follows that of section + 17.2 in [2]_. Other parameterizations in terms of the + complementary parameter :math:`1 - m`, modular angle + :math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also + used, so be careful that you choose the correct parameter. + + The Legendre K incomplete integral (or F integral) is related to + Carlson's symmetric R_F function [3]_. + Setting :math:`c = \csc^2\phi`, + + .. math:: F(\phi, m) = R_F(c-1, c-k^2, c) . + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + .. [2] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + .. [3] NIST Digital Library of Mathematical + Functions. http://dlmf.nist.gov/, Release 1.0.28 of + 2020-09-15. See Sec. 19.25(i) https://dlmf.nist.gov/19.25#i + """) + +add_newdoc( + "elliprc", + r""" + elliprc(x, y, out=None) + + Degenerate symmetric elliptic integral. + + The function RC is defined as [1]_ + + .. math:: + + R_{\mathrm{C}}(x, y) = + \frac{1}{2} \int_0^{+\infty} (t + x)^{-1/2} (t + y)^{-1} dt + = R_{\mathrm{F}}(x, y, y) + + Parameters + ---------- + x, y : array_like + Real or complex input parameters. `x` can be any number in the + complex plane cut along the negative real axis. `y` must be non-zero. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + R : scalar or ndarray + Value of the integral. If `y` is real and negative, the Cauchy + principal value is returned. If both of `x` and `y` are real, the + return value is real. Otherwise, the return value is complex. + + See Also + -------- + elliprf : Completely-symmetric elliptic integral of the first kind. + elliprd : Symmetric elliptic integral of the second kind. + elliprg : Completely-symmetric elliptic integral of the second kind. + elliprj : Symmetric elliptic integral of the third kind. + + Notes + ----- + RC is a degenerate case of the symmetric integral RF: ``elliprc(x, y) == + elliprf(x, y, y)``. It is an elementary function rather than an elliptic + integral. + + The code implements Carlson's algorithm based on the duplication theorems + and series expansion up to the 7th order. [2]_ + + .. versionadded:: 1.8.0 + + References + ---------- + .. [1] B. C. Carlson, ed., Chapter 19 in "Digital Library of Mathematical + Functions," NIST, US Dept. of Commerce. + https://dlmf.nist.gov/19.16.E6 + .. [2] B. C. Carlson, "Numerical computation of real or complex elliptic + integrals," Numer. Algorithm, vol. 10, no. 1, pp. 13-26, 1995. + https://arxiv.org/abs/math/9409227 + https://doi.org/10.1007/BF02198293 + + Examples + -------- + Basic homogeneity property: + + >>> import numpy as np + >>> from scipy.special import elliprc + + >>> x = 1.2 + 3.4j + >>> y = 5. + >>> scale = 0.3 + 0.4j + >>> elliprc(scale*x, scale*y) + (0.5484493976710874-0.4169557678995833j) + + >>> elliprc(x, y)/np.sqrt(scale) + (0.5484493976710874-0.41695576789958333j) + + When the two arguments coincide, the integral is particularly + simple: + + >>> x = 1.2 + 3.4j + >>> elliprc(x, x) + (0.4299173120614631-0.3041729818745595j) + + >>> 1/np.sqrt(x) + (0.4299173120614631-0.30417298187455954j) + + Another simple case: the first argument vanishes: + + >>> y = 1.2 + 3.4j + >>> elliprc(0, y) + (0.6753125346116815-0.47779380263880866j) + + >>> np.pi/2/np.sqrt(y) + (0.6753125346116815-0.4777938026388088j) + + When `x` and `y` are both positive, we can express + :math:`R_C(x,y)` in terms of more elementary functions. For the + case :math:`0 \le x < y`, + + >>> x = 3.2 + >>> y = 6. + >>> elliprc(x, y) + 0.44942991498453444 + + >>> np.arctan(np.sqrt((y-x)/x))/np.sqrt(y-x) + 0.44942991498453433 + + And for the case :math:`0 \le y < x`, + + >>> x = 6. + >>> y = 3.2 + >>> elliprc(x,y) + 0.4989837501576147 + + >>> np.log((np.sqrt(x)+np.sqrt(x-y))/np.sqrt(y))/np.sqrt(x-y) + 0.49898375015761476 + + """) + +add_newdoc( + "elliprd", + r""" + elliprd(x, y, z, out=None) + + Symmetric elliptic integral of the second kind. + + The function RD is defined as [1]_ + + .. math:: + + R_{\mathrm{D}}(x, y, z) = + \frac{3}{2} \int_0^{+\infty} [(t + x) (t + y)]^{-1/2} (t + z)^{-3/2} + dt + + Parameters + ---------- + x, y, z : array_like + Real or complex input parameters. `x` or `y` can be any number in the + complex plane cut along the negative real axis, but at most one of them + can be zero, while `z` must be non-zero. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + R : scalar or ndarray + Value of the integral. If all of `x`, `y`, and `z` are real, the + return value is real. Otherwise, the return value is complex. + + See Also + -------- + elliprc : Degenerate symmetric elliptic integral. + elliprf : Completely-symmetric elliptic integral of the first kind. + elliprg : Completely-symmetric elliptic integral of the second kind. + elliprj : Symmetric elliptic integral of the third kind. + + Notes + ----- + RD is a degenerate case of the elliptic integral RJ: ``elliprd(x, y, z) == + elliprj(x, y, z, z)``. + + The code implements Carlson's algorithm based on the duplication theorems + and series expansion up to the 7th order. [2]_ + + .. versionadded:: 1.8.0 + + References + ---------- + .. [1] B. C. Carlson, ed., Chapter 19 in "Digital Library of Mathematical + Functions," NIST, US Dept. of Commerce. + https://dlmf.nist.gov/19.16.E5 + .. [2] B. C. Carlson, "Numerical computation of real or complex elliptic + integrals," Numer. Algorithm, vol. 10, no. 1, pp. 13-26, 1995. + https://arxiv.org/abs/math/9409227 + https://doi.org/10.1007/BF02198293 + + Examples + -------- + Basic homogeneity property: + + >>> import numpy as np + >>> from scipy.special import elliprd + + >>> x = 1.2 + 3.4j + >>> y = 5. + >>> z = 6. + >>> scale = 0.3 + 0.4j + >>> elliprd(scale*x, scale*y, scale*z) + (-0.03703043835680379-0.24500934665683802j) + + >>> elliprd(x, y, z)*np.power(scale, -1.5) + (-0.0370304383568038-0.24500934665683805j) + + All three arguments coincide: + + >>> x = 1.2 + 3.4j + >>> elliprd(x, x, x) + (-0.03986825876151896-0.14051741840449586j) + + >>> np.power(x, -1.5) + (-0.03986825876151894-0.14051741840449583j) + + The so-called "second lemniscate constant": + + >>> elliprd(0, 2, 1)/3 + 0.5990701173677961 + + >>> from scipy.special import gamma + >>> gamma(0.75)**2/np.sqrt(2*np.pi) + 0.5990701173677959 + + """) + +add_newdoc( + "elliprf", + r""" + elliprf(x, y, z, out=None) + + Completely-symmetric elliptic integral of the first kind. + + The function RF is defined as [1]_ + + .. math:: + + R_{\mathrm{F}}(x, y, z) = + \frac{1}{2} \int_0^{+\infty} [(t + x) (t + y) (t + z)]^{-1/2} dt + + Parameters + ---------- + x, y, z : array_like + Real or complex input parameters. `x`, `y`, or `z` can be any number in + the complex plane cut along the negative real axis, but at most one of + them can be zero. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + R : scalar or ndarray + Value of the integral. If all of `x`, `y`, and `z` are real, the return + value is real. Otherwise, the return value is complex. + + See Also + -------- + elliprc : Degenerate symmetric integral. + elliprd : Symmetric elliptic integral of the second kind. + elliprg : Completely-symmetric elliptic integral of the second kind. + elliprj : Symmetric elliptic integral of the third kind. + + Notes + ----- + The code implements Carlson's algorithm based on the duplication theorems + and series expansion up to the 7th order (cf.: + https://dlmf.nist.gov/19.36.i) and the AGM algorithm for the complete + integral. [2]_ + + .. versionadded:: 1.8.0 + + References + ---------- + .. [1] B. C. Carlson, ed., Chapter 19 in "Digital Library of Mathematical + Functions," NIST, US Dept. of Commerce. + https://dlmf.nist.gov/19.16.E1 + .. [2] B. C. Carlson, "Numerical computation of real or complex elliptic + integrals," Numer. Algorithm, vol. 10, no. 1, pp. 13-26, 1995. + https://arxiv.org/abs/math/9409227 + https://doi.org/10.1007/BF02198293 + + Examples + -------- + Basic homogeneity property: + + >>> import numpy as np + >>> from scipy.special import elliprf + + >>> x = 1.2 + 3.4j + >>> y = 5. + >>> z = 6. + >>> scale = 0.3 + 0.4j + >>> elliprf(scale*x, scale*y, scale*z) + (0.5328051227278146-0.4008623567957094j) + + >>> elliprf(x, y, z)/np.sqrt(scale) + (0.5328051227278147-0.4008623567957095j) + + All three arguments coincide: + + >>> x = 1.2 + 3.4j + >>> elliprf(x, x, x) + (0.42991731206146316-0.30417298187455954j) + + >>> 1/np.sqrt(x) + (0.4299173120614631-0.30417298187455954j) + + The so-called "first lemniscate constant": + + >>> elliprf(0, 1, 2) + 1.3110287771460598 + + >>> from scipy.special import gamma + >>> gamma(0.25)**2/(4*np.sqrt(2*np.pi)) + 1.3110287771460598 + + """) + +add_newdoc( + "elliprg", + r""" + elliprg(x, y, z, out=None) + + Completely-symmetric elliptic integral of the second kind. + + The function RG is defined as [1]_ + + .. math:: + + R_{\mathrm{G}}(x, y, z) = + \frac{1}{4} \int_0^{+\infty} [(t + x) (t + y) (t + z)]^{-1/2} + \left(\frac{x}{t + x} + \frac{y}{t + y} + \frac{z}{t + z}\right) t + dt + + Parameters + ---------- + x, y, z : array_like + Real or complex input parameters. `x`, `y`, or `z` can be any number in + the complex plane cut along the negative real axis. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + R : scalar or ndarray + Value of the integral. If all of `x`, `y`, and `z` are real, the return + value is real. Otherwise, the return value is complex. + + See Also + -------- + elliprc : Degenerate symmetric integral. + elliprd : Symmetric elliptic integral of the second kind. + elliprf : Completely-symmetric elliptic integral of the first kind. + elliprj : Symmetric elliptic integral of the third kind. + + Notes + ----- + The implementation uses the relation [1]_ + + .. math:: + + 2 R_{\mathrm{G}}(x, y, z) = + z R_{\mathrm{F}}(x, y, z) - + \frac{1}{3} (x - z) (y - z) R_{\mathrm{D}}(x, y, z) + + \sqrt{\frac{x y}{z}} + + and the symmetry of `x`, `y`, `z` when at least one non-zero parameter can + be chosen as the pivot. When one of the arguments is close to zero, the AGM + method is applied instead. Other special cases are computed following Ref. + [2]_ + + .. versionadded:: 1.8.0 + + References + ---------- + .. [1] B. C. Carlson, "Numerical computation of real or complex elliptic + integrals," Numer. Algorithm, vol. 10, no. 1, pp. 13-26, 1995. + https://arxiv.org/abs/math/9409227 + https://doi.org/10.1007/BF02198293 + .. [2] B. C. Carlson, ed., Chapter 19 in "Digital Library of Mathematical + Functions," NIST, US Dept. of Commerce. + https://dlmf.nist.gov/19.16.E1 + https://dlmf.nist.gov/19.20.ii + + Examples + -------- + Basic homogeneity property: + + >>> import numpy as np + >>> from scipy.special import elliprg + + >>> x = 1.2 + 3.4j + >>> y = 5. + >>> z = 6. + >>> scale = 0.3 + 0.4j + >>> elliprg(scale*x, scale*y, scale*z) + (1.195936862005246+0.8470988320464167j) + + >>> elliprg(x, y, z)*np.sqrt(scale) + (1.195936862005246+0.8470988320464165j) + + Simplifications: + + >>> elliprg(0, y, y) + 1.756203682760182 + + >>> 0.25*np.pi*np.sqrt(y) + 1.7562036827601817 + + >>> elliprg(0, 0, z) + 1.224744871391589 + + >>> 0.5*np.sqrt(z) + 1.224744871391589 + + The surface area of a triaxial ellipsoid with semiaxes ``a``, ``b``, and + ``c`` is given by + + .. math:: + + S = 4 \pi a b c R_{\mathrm{G}}(1 / a^2, 1 / b^2, 1 / c^2). + + >>> def ellipsoid_area(a, b, c): + ... r = 4.0 * np.pi * a * b * c + ... return r * elliprg(1.0 / (a * a), 1.0 / (b * b), 1.0 / (c * c)) + >>> print(ellipsoid_area(1, 3, 5)) + 108.62688289491807 + """) + +add_newdoc( + "elliprj", + r""" + elliprj(x, y, z, p, out=None) + + Symmetric elliptic integral of the third kind. + + The function RJ is defined as [1]_ + + .. math:: + + R_{\mathrm{J}}(x, y, z, p) = + \frac{3}{2} \int_0^{+\infty} [(t + x) (t + y) (t + z)]^{-1/2} + (t + p)^{-1} dt + + .. warning:: + This function should be considered experimental when the inputs are + unbalanced. Check correctness with another independent implementation. + + Parameters + ---------- + x, y, z, p : array_like + Real or complex input parameters. `x`, `y`, or `z` are numbers in + the complex plane cut along the negative real axis (subject to further + constraints, see Notes), and at most one of them can be zero. `p` must + be non-zero. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + R : scalar or ndarray + Value of the integral. If all of `x`, `y`, `z`, and `p` are real, the + return value is real. Otherwise, the return value is complex. + + If `p` is real and negative, while `x`, `y`, and `z` are real, + non-negative, and at most one of them is zero, the Cauchy principal + value is returned. [1]_ [2]_ + + See Also + -------- + elliprc : Degenerate symmetric integral. + elliprd : Symmetric elliptic integral of the second kind. + elliprf : Completely-symmetric elliptic integral of the first kind. + elliprg : Completely-symmetric elliptic integral of the second kind. + + Notes + ----- + The code implements Carlson's algorithm based on the duplication theorems + and series expansion up to the 7th order. [3]_ The algorithm is slightly + different from its earlier incarnation as it appears in [1]_, in that the + call to `elliprc` (or ``atan``/``atanh``, see [4]_) is no longer needed in + the inner loop. Asymptotic approximations are used where arguments differ + widely in the order of magnitude. [5]_ + + The input values are subject to certain sufficient but not necessary + constraints when input arguments are complex. Notably, ``x``, ``y``, and + ``z`` must have non-negative real parts, unless two of them are + non-negative and complex-conjugates to each other while the other is a real + non-negative number. [1]_ If the inputs do not satisfy the sufficient + condition described in Ref. [1]_ they are rejected outright with the output + set to NaN. + + In the case where one of ``x``, ``y``, and ``z`` is equal to ``p``, the + function ``elliprd`` should be preferred because of its less restrictive + domain. + + .. versionadded:: 1.8.0 + + References + ---------- + .. [1] B. C. Carlson, "Numerical computation of real or complex elliptic + integrals," Numer. Algorithm, vol. 10, no. 1, pp. 13-26, 1995. + https://arxiv.org/abs/math/9409227 + https://doi.org/10.1007/BF02198293 + .. [2] B. C. Carlson, ed., Chapter 19 in "Digital Library of Mathematical + Functions," NIST, US Dept. of Commerce. + https://dlmf.nist.gov/19.20.iii + .. [3] B. C. Carlson, J. FitzSimmons, "Reduction Theorems for Elliptic + Integrands with the Square Root of Two Quadratic Factors," J. + Comput. Appl. Math., vol. 118, nos. 1-2, pp. 71-85, 2000. + https://doi.org/10.1016/S0377-0427(00)00282-X + .. [4] F. Johansson, "Numerical Evaluation of Elliptic Functions, Elliptic + Integrals and Modular Forms," in J. Blumlein, C. Schneider, P. + Paule, eds., "Elliptic Integrals, Elliptic Functions and Modular + Forms in Quantum Field Theory," pp. 269-293, 2019 (Cham, + Switzerland: Springer Nature Switzerland) + https://arxiv.org/abs/1806.06725 + https://doi.org/10.1007/978-3-030-04480-0 + .. [5] B. C. Carlson, J. L. Gustafson, "Asymptotic Approximations for + Symmetric Elliptic Integrals," SIAM J. Math. Anls., vol. 25, no. 2, + pp. 288-303, 1994. + https://arxiv.org/abs/math/9310223 + https://doi.org/10.1137/S0036141092228477 + + Examples + -------- + Basic homogeneity property: + + >>> import numpy as np + >>> from scipy.special import elliprj + + >>> x = 1.2 + 3.4j + >>> y = 5. + >>> z = 6. + >>> p = 7. + >>> scale = 0.3 - 0.4j + >>> elliprj(scale*x, scale*y, scale*z, scale*p) + (0.10834905565679157+0.19694950747103812j) + + >>> elliprj(x, y, z, p)*np.power(scale, -1.5) + (0.10834905565679556+0.19694950747103854j) + + Reduction to simpler elliptic integral: + + >>> elliprj(x, y, z, z) + (0.08288462362195129-0.028376809745123258j) + + >>> from scipy.special import elliprd + >>> elliprd(x, y, z) + (0.08288462362195136-0.028376809745123296j) + + All arguments coincide: + + >>> elliprj(x, x, x, x) + (-0.03986825876151896-0.14051741840449586j) + + >>> np.power(x, -1.5) + (-0.03986825876151894-0.14051741840449583j) + + """) + +add_newdoc("entr", + r""" + entr(x, out=None) + + Elementwise function for computing entropy. + + .. math:: \text{entr}(x) = \begin{cases} - x \log(x) & x > 0 \\ 0 & x = 0 + \\ -\infty & \text{otherwise} \end{cases} + + Parameters + ---------- + x : ndarray + Input array. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + res : scalar or ndarray + The value of the elementwise entropy function at the given points `x`. + + See Also + -------- + kl_div, rel_entr, scipy.stats.entropy + + Notes + ----- + .. versionadded:: 0.15.0 + + This function is concave. + + The origin of this function is in convex programming; see [1]_. + Given a probability distribution :math:`p_1, \ldots, p_n`, + the definition of entropy in the context of *information theory* is + + .. math:: + + \sum_{i = 1}^n \mathrm{entr}(p_i). + + To compute the latter quantity, use `scipy.stats.entropy`. + + References + ---------- + .. [1] Boyd, Stephen and Lieven Vandenberghe. *Convex optimization*. + Cambridge University Press, 2004. + :doi:`https://doi.org/10.1017/CBO9780511804441` + + """) + +add_newdoc("erf", + """ + erf(z, out=None) + + Returns the error function of complex argument. + + It is defined as ``2/sqrt(pi)*integral(exp(-t**2), t=0..z)``. + + Parameters + ---------- + x : ndarray + Input array. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + res : scalar or ndarray + The values of the error function at the given points `x`. + + See Also + -------- + erfc, erfinv, erfcinv, wofz, erfcx, erfi + + Notes + ----- + The cumulative of the unit normal distribution is given by + ``Phi(z) = 1/2[1 + erf(z/sqrt(2))]``. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Error_function + .. [2] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, + 1972. http://www.math.sfu.ca/~cbm/aands/page_297.htm + .. [3] Steven G. Johnson, Faddeeva W function implementation. + http://ab-initio.mit.edu/Faddeeva + + Examples + -------- + >>> import numpy as np + >>> from scipy import special + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(-3, 3) + >>> plt.plot(x, special.erf(x)) + >>> plt.xlabel('$x$') + >>> plt.ylabel('$erf(x)$') + >>> plt.show() + + """) + +add_newdoc("erfc", + """ + erfc(x, out=None) + + Complementary error function, ``1 - erf(x)``. + + Parameters + ---------- + x : array_like + Real or complex valued argument + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + Values of the complementary error function + + See Also + -------- + erf, erfi, erfcx, dawsn, wofz + + References + ---------- + .. [1] Steven G. Johnson, Faddeeva W function implementation. + http://ab-initio.mit.edu/Faddeeva + + Examples + -------- + >>> import numpy as np + >>> from scipy import special + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(-3, 3) + >>> plt.plot(x, special.erfc(x)) + >>> plt.xlabel('$x$') + >>> plt.ylabel('$erfc(x)$') + >>> plt.show() + + """) + +add_newdoc("erfi", + """ + erfi(z, out=None) + + Imaginary error function, ``-i erf(i z)``. + + Parameters + ---------- + z : array_like + Real or complex valued argument + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + Values of the imaginary error function + + See Also + -------- + erf, erfc, erfcx, dawsn, wofz + + Notes + ----- + + .. versionadded:: 0.12.0 + + References + ---------- + .. [1] Steven G. Johnson, Faddeeva W function implementation. + http://ab-initio.mit.edu/Faddeeva + + Examples + -------- + >>> import numpy as np + >>> from scipy import special + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(-3, 3) + >>> plt.plot(x, special.erfi(x)) + >>> plt.xlabel('$x$') + >>> plt.ylabel('$erfi(x)$') + >>> plt.show() + + """) + +add_newdoc("erfcx", + """ + erfcx(x, out=None) + + Scaled complementary error function, ``exp(x**2) * erfc(x)``. + + Parameters + ---------- + x : array_like + Real or complex valued argument + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + Values of the scaled complementary error function + + + See Also + -------- + erf, erfc, erfi, dawsn, wofz + + Notes + ----- + + .. versionadded:: 0.12.0 + + References + ---------- + .. [1] Steven G. Johnson, Faddeeva W function implementation. + http://ab-initio.mit.edu/Faddeeva + + Examples + -------- + >>> import numpy as np + >>> from scipy import special + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(-3, 3) + >>> plt.plot(x, special.erfcx(x)) + >>> plt.xlabel('$x$') + >>> plt.ylabel('$erfcx(x)$') + >>> plt.show() + + """) + +add_newdoc( + "erfinv", + """ + erfinv(y, out=None) + + Inverse of the error function. + + Computes the inverse of the error function. + + In the complex domain, there is no unique complex number w satisfying + erf(w)=z. This indicates a true inverse function would be multivalued. + When the domain restricts to the real, -1 < x < 1, there is a unique real + number satisfying erf(erfinv(x)) = x. + + Parameters + ---------- + y : ndarray + Argument at which to evaluate. Domain: [-1, 1] + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + erfinv : scalar or ndarray + The inverse of erf of y, element-wise + + See Also + -------- + erf : Error function of a complex argument + erfc : Complementary error function, ``1 - erf(x)`` + erfcinv : Inverse of the complementary error function + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.special import erfinv, erf + + >>> erfinv(0.5) + 0.4769362762044699 + + >>> y = np.linspace(-1.0, 1.0, num=9) + >>> x = erfinv(y) + >>> x + array([ -inf, -0.81341985, -0.47693628, -0.22531206, 0. , + 0.22531206, 0.47693628, 0.81341985, inf]) + + Verify that ``erf(erfinv(y))`` is ``y``. + + >>> erf(x) + array([-1. , -0.75, -0.5 , -0.25, 0. , 0.25, 0.5 , 0.75, 1. ]) + + Plot the function: + + >>> y = np.linspace(-1, 1, 200) + >>> fig, ax = plt.subplots() + >>> ax.plot(y, erfinv(y)) + >>> ax.grid(True) + >>> ax.set_xlabel('y') + >>> ax.set_title('erfinv(y)') + >>> plt.show() + + """) + +add_newdoc( + "erfcinv", + """ + erfcinv(y, out=None) + + Inverse of the complementary error function. + + Computes the inverse of the complementary error function. + + In the complex domain, there is no unique complex number w satisfying + erfc(w)=z. This indicates a true inverse function would be multivalued. + When the domain restricts to the real, 0 < x < 2, there is a unique real + number satisfying erfc(erfcinv(x)) = erfcinv(erfc(x)). + + It is related to inverse of the error function by erfcinv(1-x) = erfinv(x) + + Parameters + ---------- + y : ndarray + Argument at which to evaluate. Domain: [0, 2] + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + erfcinv : scalar or ndarray + The inverse of erfc of y, element-wise + + See Also + -------- + erf : Error function of a complex argument + erfc : Complementary error function, ``1 - erf(x)`` + erfinv : Inverse of the error function + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.special import erfcinv + + >>> erfcinv(0.5) + 0.4769362762044699 + + >>> y = np.linspace(0.0, 2.0, num=11) + >>> erfcinv(y) + array([ inf, 0.9061938 , 0.59511608, 0.37080716, 0.17914345, + -0. , -0.17914345, -0.37080716, -0.59511608, -0.9061938 , + -inf]) + + Plot the function: + + >>> y = np.linspace(0, 2, 200) + >>> fig, ax = plt.subplots() + >>> ax.plot(y, erfcinv(y)) + >>> ax.grid(True) + >>> ax.set_xlabel('y') + >>> ax.set_title('erfcinv(y)') + >>> plt.show() + + """) + +add_newdoc("eval_jacobi", + r""" + eval_jacobi(n, alpha, beta, x, out=None) + + Evaluate Jacobi polynomial at a point. + + The Jacobi polynomials can be defined via the Gauss hypergeometric + function :math:`{}_2F_1` as + + .. math:: + + P_n^{(\alpha, \beta)}(x) = \frac{(\alpha + 1)_n}{\Gamma(n + 1)} + {}_2F_1(-n, 1 + \alpha + \beta + n; \alpha + 1; (1 - z)/2) + + where :math:`(\cdot)_n` is the Pochhammer symbol; see `poch`. When + :math:`n` is an integer the result is a polynomial of degree + :math:`n`. See 22.5.42 in [AS]_ for details. + + Parameters + ---------- + n : array_like + Degree of the polynomial. If not an integer the result is + determined via the relation to the Gauss hypergeometric + function. + alpha : array_like + Parameter + beta : array_like + Parameter + x : array_like + Points at which to evaluate the polynomial + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + P : scalar or ndarray + Values of the Jacobi polynomial + + See Also + -------- + roots_jacobi : roots and quadrature weights of Jacobi polynomials + jacobi : Jacobi polynomial object + hyp2f1 : Gauss hypergeometric function + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + """) + +add_newdoc("eval_sh_jacobi", + r""" + eval_sh_jacobi(n, p, q, x, out=None) + + Evaluate shifted Jacobi polynomial at a point. + + Defined by + + .. math:: + + G_n^{(p, q)}(x) + = \binom{2n + p - 1}{n}^{-1} P_n^{(p - q, q - 1)}(2x - 1), + + where :math:`P_n^{(\cdot, \cdot)}` is the n-th Jacobi + polynomial. See 22.5.2 in [AS]_ for details. + + Parameters + ---------- + n : int + Degree of the polynomial. If not an integer, the result is + determined via the relation to `binom` and `eval_jacobi`. + p : float + Parameter + q : float + Parameter + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + G : scalar or ndarray + Values of the shifted Jacobi polynomial. + + See Also + -------- + roots_sh_jacobi : roots and quadrature weights of shifted Jacobi + polynomials + sh_jacobi : shifted Jacobi polynomial object + eval_jacobi : evaluate Jacobi polynomials + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + """) + +add_newdoc("eval_gegenbauer", + r""" + eval_gegenbauer(n, alpha, x, out=None) + + Evaluate Gegenbauer polynomial at a point. + + The Gegenbauer polynomials can be defined via the Gauss + hypergeometric function :math:`{}_2F_1` as + + .. math:: + + C_n^{(\alpha)} = \frac{(2\alpha)_n}{\Gamma(n + 1)} + {}_2F_1(-n, 2\alpha + n; \alpha + 1/2; (1 - z)/2). + + When :math:`n` is an integer the result is a polynomial of degree + :math:`n`. See 22.5.46 in [AS]_ for details. + + Parameters + ---------- + n : array_like + Degree of the polynomial. If not an integer, the result is + determined via the relation to the Gauss hypergeometric + function. + alpha : array_like + Parameter + x : array_like + Points at which to evaluate the Gegenbauer polynomial + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + C : scalar or ndarray + Values of the Gegenbauer polynomial + + See Also + -------- + roots_gegenbauer : roots and quadrature weights of Gegenbauer + polynomials + gegenbauer : Gegenbauer polynomial object + hyp2f1 : Gauss hypergeometric function + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + """) + +add_newdoc("eval_chebyt", + r""" + eval_chebyt(n, x, out=None) + + Evaluate Chebyshev polynomial of the first kind at a point. + + The Chebyshev polynomials of the first kind can be defined via the + Gauss hypergeometric function :math:`{}_2F_1` as + + .. math:: + + T_n(x) = {}_2F_1(n, -n; 1/2; (1 - x)/2). + + When :math:`n` is an integer the result is a polynomial of degree + :math:`n`. See 22.5.47 in [AS]_ for details. + + Parameters + ---------- + n : array_like + Degree of the polynomial. If not an integer, the result is + determined via the relation to the Gauss hypergeometric + function. + x : array_like + Points at which to evaluate the Chebyshev polynomial + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + T : scalar or ndarray + Values of the Chebyshev polynomial + + See Also + -------- + roots_chebyt : roots and quadrature weights of Chebyshev + polynomials of the first kind + chebyu : Chebychev polynomial object + eval_chebyu : evaluate Chebyshev polynomials of the second kind + hyp2f1 : Gauss hypergeometric function + numpy.polynomial.chebyshev.Chebyshev : Chebyshev series + + Notes + ----- + This routine is numerically stable for `x` in ``[-1, 1]`` at least + up to order ``10000``. + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + """) + +add_newdoc("eval_chebyu", + r""" + eval_chebyu(n, x, out=None) + + Evaluate Chebyshev polynomial of the second kind at a point. + + The Chebyshev polynomials of the second kind can be defined via + the Gauss hypergeometric function :math:`{}_2F_1` as + + .. math:: + + U_n(x) = (n + 1) {}_2F_1(-n, n + 2; 3/2; (1 - x)/2). + + When :math:`n` is an integer the result is a polynomial of degree + :math:`n`. See 22.5.48 in [AS]_ for details. + + Parameters + ---------- + n : array_like + Degree of the polynomial. If not an integer, the result is + determined via the relation to the Gauss hypergeometric + function. + x : array_like + Points at which to evaluate the Chebyshev polynomial + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + U : scalar or ndarray + Values of the Chebyshev polynomial + + See Also + -------- + roots_chebyu : roots and quadrature weights of Chebyshev + polynomials of the second kind + chebyu : Chebyshev polynomial object + eval_chebyt : evaluate Chebyshev polynomials of the first kind + hyp2f1 : Gauss hypergeometric function + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + """) + +add_newdoc("eval_chebys", + r""" + eval_chebys(n, x, out=None) + + Evaluate Chebyshev polynomial of the second kind on [-2, 2] at a + point. + + These polynomials are defined as + + .. math:: + + S_n(x) = U_n(x/2) + + where :math:`U_n` is a Chebyshev polynomial of the second + kind. See 22.5.13 in [AS]_ for details. + + Parameters + ---------- + n : array_like + Degree of the polynomial. If not an integer, the result is + determined via the relation to `eval_chebyu`. + x : array_like + Points at which to evaluate the Chebyshev polynomial + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + S : scalar or ndarray + Values of the Chebyshev polynomial + + See Also + -------- + roots_chebys : roots and quadrature weights of Chebyshev + polynomials of the second kind on [-2, 2] + chebys : Chebyshev polynomial object + eval_chebyu : evaluate Chebyshev polynomials of the second kind + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + Examples + -------- + >>> import numpy as np + >>> import scipy.special as sc + + They are a scaled version of the Chebyshev polynomials of the + second kind. + + >>> x = np.linspace(-2, 2, 6) + >>> sc.eval_chebys(3, x) + array([-4. , 0.672, 0.736, -0.736, -0.672, 4. ]) + >>> sc.eval_chebyu(3, x / 2) + array([-4. , 0.672, 0.736, -0.736, -0.672, 4. ]) + + """) + +add_newdoc("eval_chebyc", + r""" + eval_chebyc(n, x, out=None) + + Evaluate Chebyshev polynomial of the first kind on [-2, 2] at a + point. + + These polynomials are defined as + + .. math:: + + C_n(x) = 2 T_n(x/2) + + where :math:`T_n` is a Chebyshev polynomial of the first kind. See + 22.5.11 in [AS]_ for details. + + Parameters + ---------- + n : array_like + Degree of the polynomial. If not an integer, the result is + determined via the relation to `eval_chebyt`. + x : array_like + Points at which to evaluate the Chebyshev polynomial + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + C : scalar or ndarray + Values of the Chebyshev polynomial + + See Also + -------- + roots_chebyc : roots and quadrature weights of Chebyshev + polynomials of the first kind on [-2, 2] + chebyc : Chebyshev polynomial object + numpy.polynomial.chebyshev.Chebyshev : Chebyshev series + eval_chebyt : evaluate Chebycshev polynomials of the first kind + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + Examples + -------- + >>> import numpy as np + >>> import scipy.special as sc + + They are a scaled version of the Chebyshev polynomials of the + first kind. + + >>> x = np.linspace(-2, 2, 6) + >>> sc.eval_chebyc(3, x) + array([-2. , 1.872, 1.136, -1.136, -1.872, 2. ]) + >>> 2 * sc.eval_chebyt(3, x / 2) + array([-2. , 1.872, 1.136, -1.136, -1.872, 2. ]) + + """) + +add_newdoc("eval_sh_chebyt", + r""" + eval_sh_chebyt(n, x, out=None) + + Evaluate shifted Chebyshev polynomial of the first kind at a + point. + + These polynomials are defined as + + .. math:: + + T_n^*(x) = T_n(2x - 1) + + where :math:`T_n` is a Chebyshev polynomial of the first kind. See + 22.5.14 in [AS]_ for details. + + Parameters + ---------- + n : array_like + Degree of the polynomial. If not an integer, the result is + determined via the relation to `eval_chebyt`. + x : array_like + Points at which to evaluate the shifted Chebyshev polynomial + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + T : scalar or ndarray + Values of the shifted Chebyshev polynomial + + See Also + -------- + roots_sh_chebyt : roots and quadrature weights of shifted + Chebyshev polynomials of the first kind + sh_chebyt : shifted Chebyshev polynomial object + eval_chebyt : evaluate Chebyshev polynomials of the first kind + numpy.polynomial.chebyshev.Chebyshev : Chebyshev series + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + """) + +add_newdoc("eval_sh_chebyu", + r""" + eval_sh_chebyu(n, x, out=None) + + Evaluate shifted Chebyshev polynomial of the second kind at a + point. + + These polynomials are defined as + + .. math:: + + U_n^*(x) = U_n(2x - 1) + + where :math:`U_n` is a Chebyshev polynomial of the first kind. See + 22.5.15 in [AS]_ for details. + + Parameters + ---------- + n : array_like + Degree of the polynomial. If not an integer, the result is + determined via the relation to `eval_chebyu`. + x : array_like + Points at which to evaluate the shifted Chebyshev polynomial + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + U : scalar or ndarray + Values of the shifted Chebyshev polynomial + + See Also + -------- + roots_sh_chebyu : roots and quadrature weights of shifted + Chebychev polynomials of the second kind + sh_chebyu : shifted Chebyshev polynomial object + eval_chebyu : evaluate Chebyshev polynomials of the second kind + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + """) + +add_newdoc("eval_legendre", + r""" + eval_legendre(n, x, out=None) + + Evaluate Legendre polynomial at a point. + + The Legendre polynomials can be defined via the Gauss + hypergeometric function :math:`{}_2F_1` as + + .. math:: + + P_n(x) = {}_2F_1(-n, n + 1; 1; (1 - x)/2). + + When :math:`n` is an integer the result is a polynomial of degree + :math:`n`. See 22.5.49 in [AS]_ for details. + + Parameters + ---------- + n : array_like + Degree of the polynomial. If not an integer, the result is + determined via the relation to the Gauss hypergeometric + function. + x : array_like + Points at which to evaluate the Legendre polynomial + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + P : scalar or ndarray + Values of the Legendre polynomial + + See Also + -------- + roots_legendre : roots and quadrature weights of Legendre + polynomials + legendre : Legendre polynomial object + hyp2f1 : Gauss hypergeometric function + numpy.polynomial.legendre.Legendre : Legendre series + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import eval_legendre + + Evaluate the zero-order Legendre polynomial at x = 0 + + >>> eval_legendre(0, 0) + 1.0 + + Evaluate the first-order Legendre polynomial between -1 and 1 + + >>> X = np.linspace(-1, 1, 5) # Domain of Legendre polynomials + >>> eval_legendre(1, X) + array([-1. , -0.5, 0. , 0.5, 1. ]) + + Evaluate Legendre polynomials of order 0 through 4 at x = 0 + + >>> N = range(0, 5) + >>> eval_legendre(N, 0) + array([ 1. , 0. , -0.5 , 0. , 0.375]) + + Plot Legendre polynomials of order 0 through 4 + + >>> X = np.linspace(-1, 1) + + >>> import matplotlib.pyplot as plt + >>> for n in range(0, 5): + ... y = eval_legendre(n, X) + ... plt.plot(X, y, label=r'$P_{}(x)$'.format(n)) + + >>> plt.title("Legendre Polynomials") + >>> plt.xlabel("x") + >>> plt.ylabel(r'$P_n(x)$') + >>> plt.legend(loc='lower right') + >>> plt.show() + + """) + +add_newdoc("eval_sh_legendre", + r""" + eval_sh_legendre(n, x, out=None) + + Evaluate shifted Legendre polynomial at a point. + + These polynomials are defined as + + .. math:: + + P_n^*(x) = P_n(2x - 1) + + where :math:`P_n` is a Legendre polynomial. See 2.2.11 in [AS]_ + for details. + + Parameters + ---------- + n : array_like + Degree of the polynomial. If not an integer, the value is + determined via the relation to `eval_legendre`. + x : array_like + Points at which to evaluate the shifted Legendre polynomial + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + P : scalar or ndarray + Values of the shifted Legendre polynomial + + See Also + -------- + roots_sh_legendre : roots and quadrature weights of shifted + Legendre polynomials + sh_legendre : shifted Legendre polynomial object + eval_legendre : evaluate Legendre polynomials + numpy.polynomial.legendre.Legendre : Legendre series + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + """) + +add_newdoc("eval_genlaguerre", + r""" + eval_genlaguerre(n, alpha, x, out=None) + + Evaluate generalized Laguerre polynomial at a point. + + The generalized Laguerre polynomials can be defined via the + confluent hypergeometric function :math:`{}_1F_1` as + + .. math:: + + L_n^{(\alpha)}(x) = \binom{n + \alpha}{n} + {}_1F_1(-n, \alpha + 1, x). + + When :math:`n` is an integer the result is a polynomial of degree + :math:`n`. See 22.5.54 in [AS]_ for details. The Laguerre + polynomials are the special case where :math:`\alpha = 0`. + + Parameters + ---------- + n : array_like + Degree of the polynomial. If not an integer, the result is + determined via the relation to the confluent hypergeometric + function. + alpha : array_like + Parameter; must have ``alpha > -1`` + x : array_like + Points at which to evaluate the generalized Laguerre + polynomial + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + L : scalar or ndarray + Values of the generalized Laguerre polynomial + + See Also + -------- + roots_genlaguerre : roots and quadrature weights of generalized + Laguerre polynomials + genlaguerre : generalized Laguerre polynomial object + hyp1f1 : confluent hypergeometric function + eval_laguerre : evaluate Laguerre polynomials + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + """) + +add_newdoc("eval_laguerre", + r""" + eval_laguerre(n, x, out=None) + + Evaluate Laguerre polynomial at a point. + + The Laguerre polynomials can be defined via the confluent + hypergeometric function :math:`{}_1F_1` as + + .. math:: + + L_n(x) = {}_1F_1(-n, 1, x). + + See 22.5.16 and 22.5.54 in [AS]_ for details. When :math:`n` is an + integer the result is a polynomial of degree :math:`n`. + + Parameters + ---------- + n : array_like + Degree of the polynomial. If not an integer the result is + determined via the relation to the confluent hypergeometric + function. + x : array_like + Points at which to evaluate the Laguerre polynomial + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + L : scalar or ndarray + Values of the Laguerre polynomial + + See Also + -------- + roots_laguerre : roots and quadrature weights of Laguerre + polynomials + laguerre : Laguerre polynomial object + numpy.polynomial.laguerre.Laguerre : Laguerre series + eval_genlaguerre : evaluate generalized Laguerre polynomials + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + """) + +add_newdoc("eval_hermite", + r""" + eval_hermite(n, x, out=None) + + Evaluate physicist's Hermite polynomial at a point. + + Defined by + + .. math:: + + H_n(x) = (-1)^n e^{x^2} \frac{d^n}{dx^n} e^{-x^2}; + + :math:`H_n` is a polynomial of degree :math:`n`. See 22.11.7 in + [AS]_ for details. + + Parameters + ---------- + n : array_like + Degree of the polynomial + x : array_like + Points at which to evaluate the Hermite polynomial + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + H : scalar or ndarray + Values of the Hermite polynomial + + See Also + -------- + roots_hermite : roots and quadrature weights of physicist's + Hermite polynomials + hermite : physicist's Hermite polynomial object + numpy.polynomial.hermite.Hermite : Physicist's Hermite series + eval_hermitenorm : evaluate Probabilist's Hermite polynomials + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + """) + +add_newdoc("eval_hermitenorm", + r""" + eval_hermitenorm(n, x, out=None) + + Evaluate probabilist's (normalized) Hermite polynomial at a + point. + + Defined by + + .. math:: + + He_n(x) = (-1)^n e^{x^2/2} \frac{d^n}{dx^n} e^{-x^2/2}; + + :math:`He_n` is a polynomial of degree :math:`n`. See 22.11.8 in + [AS]_ for details. + + Parameters + ---------- + n : array_like + Degree of the polynomial + x : array_like + Points at which to evaluate the Hermite polynomial + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + He : scalar or ndarray + Values of the Hermite polynomial + + See Also + -------- + roots_hermitenorm : roots and quadrature weights of probabilist's + Hermite polynomials + hermitenorm : probabilist's Hermite polynomial object + numpy.polynomial.hermite_e.HermiteE : Probabilist's Hermite series + eval_hermite : evaluate physicist's Hermite polynomials + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + """) + + +add_newdoc("exp10", + """ + exp10(x, out=None) + + Compute ``10**x`` element-wise. + + Parameters + ---------- + x : array_like + `x` must contain real numbers. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + ``10**x``, computed element-wise. + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import exp10 + + >>> exp10(3) + 1000.0 + >>> x = np.array([[-1, -0.5, 0], [0.5, 1, 1.5]]) + >>> exp10(x) + array([[ 0.1 , 0.31622777, 1. ], + [ 3.16227766, 10. , 31.6227766 ]]) + + """) + +add_newdoc("exp2", + """ + exp2(x, out=None) + + Compute ``2**x`` element-wise. + + Parameters + ---------- + x : array_like + `x` must contain real numbers. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + ``2**x``, computed element-wise. + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import exp2 + + >>> exp2(3) + 8.0 + >>> x = np.array([[-1, -0.5, 0], [0.5, 1, 1.5]]) + >>> exp2(x) + array([[ 0.5 , 0.70710678, 1. ], + [ 1.41421356, 2. , 2.82842712]]) + """) + +add_newdoc("expm1", + """ + expm1(x, out=None) + + Compute ``exp(x) - 1``. + + When `x` is near zero, ``exp(x)`` is near 1, so the numerical calculation + of ``exp(x) - 1`` can suffer from catastrophic loss of precision. + ``expm1(x)`` is implemented to avoid the loss of precision that occurs when + `x` is near zero. + + Parameters + ---------- + x : array_like + `x` must contain real numbers. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + ``exp(x) - 1`` computed element-wise. + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import expm1 + + >>> expm1(1.0) + 1.7182818284590451 + >>> expm1([-0.2, -0.1, 0, 0.1, 0.2]) + array([-0.18126925, -0.09516258, 0. , 0.10517092, 0.22140276]) + + The exact value of ``exp(7.5e-13) - 1`` is:: + + 7.5000000000028125000000007031250000001318...*10**-13. + + Here is what ``expm1(7.5e-13)`` gives: + + >>> expm1(7.5e-13) + 7.5000000000028135e-13 + + Compare that to ``exp(7.5e-13) - 1``, where the subtraction results in + a "catastrophic" loss of precision: + + >>> np.exp(7.5e-13) - 1 + 7.5006667543675576e-13 + + """) + +add_newdoc("expn", + r""" + expn(n, x, out=None) + + Generalized exponential integral En. + + For integer :math:`n \geq 0` and real :math:`x \geq 0` the + generalized exponential integral is defined as [dlmf]_ + + .. math:: + + E_n(x) = x^{n - 1} \int_x^\infty \frac{e^{-t}}{t^n} dt. + + Parameters + ---------- + n : array_like + Non-negative integers + x : array_like + Real argument + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + Values of the generalized exponential integral + + See Also + -------- + exp1 : special case of :math:`E_n` for :math:`n = 1` + expi : related to :math:`E_n` when :math:`n = 1` + + References + ---------- + .. [dlmf] Digital Library of Mathematical Functions, 8.19.2 + https://dlmf.nist.gov/8.19#E2 + + Examples + -------- + >>> import numpy as np + >>> import scipy.special as sc + + Its domain is nonnegative n and x. + + >>> sc.expn(-1, 1.0), sc.expn(1, -1.0) + (nan, nan) + + It has a pole at ``x = 0`` for ``n = 1, 2``; for larger ``n`` it + is equal to ``1 / (n - 1)``. + + >>> sc.expn([0, 1, 2, 3, 4], 0) + array([ inf, inf, 1. , 0.5 , 0.33333333]) + + For n equal to 0 it reduces to ``exp(-x) / x``. + + >>> x = np.array([1, 2, 3, 4]) + >>> sc.expn(0, x) + array([0.36787944, 0.06766764, 0.01659569, 0.00457891]) + >>> np.exp(-x) / x + array([0.36787944, 0.06766764, 0.01659569, 0.00457891]) + + For n equal to 1 it reduces to `exp1`. + + >>> sc.expn(1, x) + array([0.21938393, 0.04890051, 0.01304838, 0.00377935]) + >>> sc.exp1(x) + array([0.21938393, 0.04890051, 0.01304838, 0.00377935]) + + """) + +add_newdoc("fdtr", + r""" + fdtr(dfn, dfd, x, out=None) + + F cumulative distribution function. + + Returns the value of the cumulative distribution function of the + F-distribution, also known as Snedecor's F-distribution or the + Fisher-Snedecor distribution. + + The F-distribution with parameters :math:`d_n` and :math:`d_d` is the + distribution of the random variable, + + .. math:: + X = \frac{U_n/d_n}{U_d/d_d}, + + where :math:`U_n` and :math:`U_d` are random variables distributed + :math:`\chi^2`, with :math:`d_n` and :math:`d_d` degrees of freedom, + respectively. + + Parameters + ---------- + dfn : array_like + First parameter (positive float). + dfd : array_like + Second parameter (positive float). + x : array_like + Argument (nonnegative float). + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + y : scalar or ndarray + The CDF of the F-distribution with parameters `dfn` and `dfd` at `x`. + + See Also + -------- + fdtrc : F distribution survival function + fdtri : F distribution inverse cumulative distribution + scipy.stats.f : F distribution + + Notes + ----- + The regularized incomplete beta function is used, according to the + formula, + + .. math:: + F(d_n, d_d; x) = I_{xd_n/(d_d + xd_n)}(d_n/2, d_d/2). + + Wrapper for the Cephes [1]_ routine `fdtr`. The F distribution is also + available as `scipy.stats.f`. Calling `fdtr` directly can improve + performance compared to the ``cdf`` method of `scipy.stats.f` (see last + example below). + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + Examples + -------- + Calculate the function for ``dfn=1`` and ``dfd=2`` at ``x=1``. + + >>> import numpy as np + >>> from scipy.special import fdtr + >>> fdtr(1, 2, 1) + 0.5773502691896258 + + Calculate the function at several points by providing a NumPy array for + `x`. + + >>> x = np.array([0.5, 2., 3.]) + >>> fdtr(1, 2, x) + array([0.4472136 , 0.70710678, 0.77459667]) + + Plot the function for several parameter sets. + + >>> import matplotlib.pyplot as plt + >>> dfn_parameters = [1, 5, 10, 50] + >>> dfd_parameters = [1, 1, 2, 3] + >>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot'] + >>> parameters_list = list(zip(dfn_parameters, dfd_parameters, + ... linestyles)) + >>> x = np.linspace(0, 30, 1000) + >>> fig, ax = plt.subplots() + >>> for parameter_set in parameters_list: + ... dfn, dfd, style = parameter_set + ... fdtr_vals = fdtr(dfn, dfd, x) + ... ax.plot(x, fdtr_vals, label=rf"$d_n={dfn},\, d_d={dfd}$", + ... ls=style) + >>> ax.legend() + >>> ax.set_xlabel("$x$") + >>> ax.set_title("F distribution cumulative distribution function") + >>> plt.show() + + The F distribution is also available as `scipy.stats.f`. Using `fdtr` + directly can be much faster than calling the ``cdf`` method of + `scipy.stats.f`, especially for small arrays or individual values. + To get the same results one must use the following parametrization: + ``stats.f(dfn, dfd).cdf(x)=fdtr(dfn, dfd, x)``. + + >>> from scipy.stats import f + >>> dfn, dfd = 1, 2 + >>> x = 1 + >>> fdtr_res = fdtr(dfn, dfd, x) # this will often be faster than below + >>> f_dist_res = f(dfn, dfd).cdf(x) + >>> fdtr_res == f_dist_res # test that results are equal + True + """) + +add_newdoc("fdtrc", + r""" + fdtrc(dfn, dfd, x, out=None) + + F survival function. + + Returns the complemented F-distribution function (the integral of the + density from `x` to infinity). + + Parameters + ---------- + dfn : array_like + First parameter (positive float). + dfd : array_like + Second parameter (positive float). + x : array_like + Argument (nonnegative float). + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + y : scalar or ndarray + The complemented F-distribution function with parameters `dfn` and + `dfd` at `x`. + + See Also + -------- + fdtr : F distribution cumulative distribution function + fdtri : F distribution inverse cumulative distribution function + scipy.stats.f : F distribution + + Notes + ----- + The regularized incomplete beta function is used, according to the + formula, + + .. math:: + F(d_n, d_d; x) = I_{d_d/(d_d + xd_n)}(d_d/2, d_n/2). + + Wrapper for the Cephes [1]_ routine `fdtrc`. The F distribution is also + available as `scipy.stats.f`. Calling `fdtrc` directly can improve + performance compared to the ``sf`` method of `scipy.stats.f` (see last + example below). + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + Examples + -------- + Calculate the function for ``dfn=1`` and ``dfd=2`` at ``x=1``. + + >>> import numpy as np + >>> from scipy.special import fdtrc + >>> fdtrc(1, 2, 1) + 0.42264973081037427 + + Calculate the function at several points by providing a NumPy array for + `x`. + + >>> x = np.array([0.5, 2., 3.]) + >>> fdtrc(1, 2, x) + array([0.5527864 , 0.29289322, 0.22540333]) + + Plot the function for several parameter sets. + + >>> import matplotlib.pyplot as plt + >>> dfn_parameters = [1, 5, 10, 50] + >>> dfd_parameters = [1, 1, 2, 3] + >>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot'] + >>> parameters_list = list(zip(dfn_parameters, dfd_parameters, + ... linestyles)) + >>> x = np.linspace(0, 30, 1000) + >>> fig, ax = plt.subplots() + >>> for parameter_set in parameters_list: + ... dfn, dfd, style = parameter_set + ... fdtrc_vals = fdtrc(dfn, dfd, x) + ... ax.plot(x, fdtrc_vals, label=rf"$d_n={dfn},\, d_d={dfd}$", + ... ls=style) + >>> ax.legend() + >>> ax.set_xlabel("$x$") + >>> ax.set_title("F distribution survival function") + >>> plt.show() + + The F distribution is also available as `scipy.stats.f`. Using `fdtrc` + directly can be much faster than calling the ``sf`` method of + `scipy.stats.f`, especially for small arrays or individual values. + To get the same results one must use the following parametrization: + ``stats.f(dfn, dfd).sf(x)=fdtrc(dfn, dfd, x)``. + + >>> from scipy.stats import f + >>> dfn, dfd = 1, 2 + >>> x = 1 + >>> fdtrc_res = fdtrc(dfn, dfd, x) # this will often be faster than below + >>> f_dist_res = f(dfn, dfd).sf(x) + >>> f_dist_res == fdtrc_res # test that results are equal + True + """) + +add_newdoc("fdtri", + r""" + fdtri(dfn, dfd, p, out=None) + + The `p`-th quantile of the F-distribution. + + This function is the inverse of the F-distribution CDF, `fdtr`, returning + the `x` such that `fdtr(dfn, dfd, x) = p`. + + Parameters + ---------- + dfn : array_like + First parameter (positive float). + dfd : array_like + Second parameter (positive float). + p : array_like + Cumulative probability, in [0, 1]. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + x : scalar or ndarray + The quantile corresponding to `p`. + + See Also + -------- + fdtr : F distribution cumulative distribution function + fdtrc : F distribution survival function + scipy.stats.f : F distribution + + Notes + ----- + The computation is carried out using the relation to the inverse + regularized beta function, :math:`I^{-1}_x(a, b)`. Let + :math:`z = I^{-1}_p(d_d/2, d_n/2).` Then, + + .. math:: + x = \frac{d_d (1 - z)}{d_n z}. + + If `p` is such that :math:`x < 0.5`, the following relation is used + instead for improved stability: let + :math:`z' = I^{-1}_{1 - p}(d_n/2, d_d/2).` Then, + + .. math:: + x = \frac{d_d z'}{d_n (1 - z')}. + + Wrapper for the Cephes [1]_ routine `fdtri`. + + The F distribution is also available as `scipy.stats.f`. Calling + `fdtri` directly can improve performance compared to the ``ppf`` + method of `scipy.stats.f` (see last example below). + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + Examples + -------- + `fdtri` represents the inverse of the F distribution CDF which is + available as `fdtr`. Here, we calculate the CDF for ``df1=1``, ``df2=2`` + at ``x=3``. `fdtri` then returns ``3`` given the same values for `df1`, + `df2` and the computed CDF value. + + >>> import numpy as np + >>> from scipy.special import fdtri, fdtr + >>> df1, df2 = 1, 2 + >>> x = 3 + >>> cdf_value = fdtr(df1, df2, x) + >>> fdtri(df1, df2, cdf_value) + 3.000000000000006 + + Calculate the function at several points by providing a NumPy array for + `x`. + + >>> x = np.array([0.1, 0.4, 0.7]) + >>> fdtri(1, 2, x) + array([0.02020202, 0.38095238, 1.92156863]) + + Plot the function for several parameter sets. + + >>> import matplotlib.pyplot as plt + >>> dfn_parameters = [50, 10, 1, 50] + >>> dfd_parameters = [0.5, 1, 1, 5] + >>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot'] + >>> parameters_list = list(zip(dfn_parameters, dfd_parameters, + ... linestyles)) + >>> x = np.linspace(0, 1, 1000) + >>> fig, ax = plt.subplots() + >>> for parameter_set in parameters_list: + ... dfn, dfd, style = parameter_set + ... fdtri_vals = fdtri(dfn, dfd, x) + ... ax.plot(x, fdtri_vals, label=rf"$d_n={dfn},\, d_d={dfd}$", + ... ls=style) + >>> ax.legend() + >>> ax.set_xlabel("$x$") + >>> title = "F distribution inverse cumulative distribution function" + >>> ax.set_title(title) + >>> ax.set_ylim(0, 30) + >>> plt.show() + + The F distribution is also available as `scipy.stats.f`. Using `fdtri` + directly can be much faster than calling the ``ppf`` method of + `scipy.stats.f`, especially for small arrays or individual values. + To get the same results one must use the following parametrization: + ``stats.f(dfn, dfd).ppf(x)=fdtri(dfn, dfd, x)``. + + >>> from scipy.stats import f + >>> dfn, dfd = 1, 2 + >>> x = 0.7 + >>> fdtri_res = fdtri(dfn, dfd, x) # this will often be faster than below + >>> f_dist_res = f(dfn, dfd).ppf(x) + >>> f_dist_res == fdtri_res # test that results are equal + True + """) + +add_newdoc("fdtridfd", + """ + fdtridfd(dfn, p, x, out=None) + + Inverse to `fdtr` vs dfd + + Finds the F density argument dfd such that ``fdtr(dfn, dfd, x) == p``. + + Parameters + ---------- + dfn : array_like + First parameter (positive float). + p : array_like + Cumulative probability, in [0, 1]. + x : array_like + Argument (nonnegative float). + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + dfd : scalar or ndarray + `dfd` such that ``fdtr(dfn, dfd, x) == p``. + + See Also + -------- + fdtr : F distribution cumulative distribution function + fdtrc : F distribution survival function + fdtri : F distribution quantile function + scipy.stats.f : F distribution + + Examples + -------- + Compute the F distribution cumulative distribution function for one + parameter set. + + >>> from scipy.special import fdtridfd, fdtr + >>> dfn, dfd, x = 10, 5, 2 + >>> cdf_value = fdtr(dfn, dfd, x) + >>> cdf_value + 0.7700248806501017 + + Verify that `fdtridfd` recovers the original value for `dfd`: + + >>> fdtridfd(dfn, cdf_value, x) + 5.0 + """) + +''' +commented out as fdtridfn seems to have bugs and is not in functions.json +see: https://github.com/scipy/scipy/pull/15622#discussion_r811440983 + +add_newdoc( + "fdtridfn", + """ + fdtridfn(p, dfd, x, out=None) + + Inverse to `fdtr` vs dfn + + finds the F density argument dfn such that ``fdtr(dfn, dfd, x) == p``. + + + Parameters + ---------- + p : array_like + Cumulative probability, in [0, 1]. + dfd : array_like + Second parameter (positive float). + x : array_like + Argument (nonnegative float). + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + dfn : scalar or ndarray + `dfn` such that ``fdtr(dfn, dfd, x) == p``. + + See Also + -------- + fdtr, fdtrc, fdtri, fdtridfd + + + """) +''' + +add_newdoc("fresnel", + r""" + fresnel(z, out=None) + + Fresnel integrals. + + The Fresnel integrals are defined as + + .. math:: + + S(z) &= \int_0^z \sin(\pi t^2 /2) dt \\ + C(z) &= \int_0^z \cos(\pi t^2 /2) dt. + + See [dlmf]_ for details. + + Parameters + ---------- + z : array_like + Real or complex valued argument + out : 2-tuple of ndarrays, optional + Optional output arrays for the function results + + Returns + ------- + S, C : 2-tuple of scalar or ndarray + Values of the Fresnel integrals + + See Also + -------- + fresnel_zeros : zeros of the Fresnel integrals + + References + ---------- + .. [dlmf] NIST Digital Library of Mathematical Functions + https://dlmf.nist.gov/7.2#iii + + Examples + -------- + >>> import numpy as np + >>> import scipy.special as sc + + As z goes to infinity along the real axis, S and C converge to 0.5. + + >>> S, C = sc.fresnel([0.1, 1, 10, 100, np.inf]) + >>> S + array([0.00052359, 0.43825915, 0.46816998, 0.4968169 , 0.5 ]) + >>> C + array([0.09999753, 0.7798934 , 0.49989869, 0.4999999 , 0.5 ]) + + They are related to the error function `erf`. + + >>> z = np.array([1, 2, 3, 4]) + >>> zeta = 0.5 * np.sqrt(np.pi) * (1 - 1j) * z + >>> S, C = sc.fresnel(z) + >>> C + 1j*S + array([0.7798934 +0.43825915j, 0.48825341+0.34341568j, + 0.60572079+0.496313j , 0.49842603+0.42051575j]) + >>> 0.5 * (1 + 1j) * sc.erf(zeta) + array([0.7798934 +0.43825915j, 0.48825341+0.34341568j, + 0.60572079+0.496313j , 0.49842603+0.42051575j]) + + """) + +add_newdoc("gammainc", + r""" + gammainc(a, x, out=None) + + Regularized lower incomplete gamma function. + + It is defined as + + .. math:: + + P(a, x) = \frac{1}{\Gamma(a)} \int_0^x t^{a - 1}e^{-t} dt + + for :math:`a > 0` and :math:`x \geq 0`. See [dlmf]_ for details. + + Parameters + ---------- + a : array_like + Positive parameter + x : array_like + Nonnegative argument + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + Values of the lower incomplete gamma function + + See Also + -------- + gammaincc : regularized upper incomplete gamma function + gammaincinv : inverse of the regularized lower incomplete gamma function + gammainccinv : inverse of the regularized upper incomplete gamma function + + Notes + ----- + The function satisfies the relation ``gammainc(a, x) + + gammaincc(a, x) = 1`` where `gammaincc` is the regularized upper + incomplete gamma function. + + The implementation largely follows that of [boost]_. + + References + ---------- + .. [dlmf] NIST Digital Library of Mathematical functions + https://dlmf.nist.gov/8.2#E4 + .. [boost] Maddock et. al., "Incomplete Gamma Functions", + https://www.boost.org/doc/libs/1_61_0/libs/math/doc/html/math_toolkit/sf_gamma/igamma.html + + Examples + -------- + >>> import scipy.special as sc + + It is the CDF of the gamma distribution, so it starts at 0 and + monotonically increases to 1. + + >>> sc.gammainc(0.5, [0, 1, 10, 100]) + array([0. , 0.84270079, 0.99999226, 1. ]) + + It is equal to one minus the upper incomplete gamma function. + + >>> a, x = 0.5, 0.4 + >>> sc.gammainc(a, x) + 0.6289066304773024 + >>> 1 - sc.gammaincc(a, x) + 0.6289066304773024 + + """) + +add_newdoc("gammaincc", + r""" + gammaincc(a, x, out=None) + + Regularized upper incomplete gamma function. + + It is defined as + + .. math:: + + Q(a, x) = \frac{1}{\Gamma(a)} \int_x^\infty t^{a - 1}e^{-t} dt + + for :math:`a > 0` and :math:`x \geq 0`. See [dlmf]_ for details. + + Parameters + ---------- + a : array_like + Positive parameter + x : array_like + Nonnegative argument + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + Values of the upper incomplete gamma function + + See Also + -------- + gammainc : regularized lower incomplete gamma function + gammaincinv : inverse of the regularized lower incomplete gamma function + gammainccinv : inverse of the regularized upper incomplete gamma function + + Notes + ----- + The function satisfies the relation ``gammainc(a, x) + + gammaincc(a, x) = 1`` where `gammainc` is the regularized lower + incomplete gamma function. + + The implementation largely follows that of [boost]_. + + References + ---------- + .. [dlmf] NIST Digital Library of Mathematical functions + https://dlmf.nist.gov/8.2#E4 + .. [boost] Maddock et. al., "Incomplete Gamma Functions", + https://www.boost.org/doc/libs/1_61_0/libs/math/doc/html/math_toolkit/sf_gamma/igamma.html + + Examples + -------- + >>> import scipy.special as sc + + It is the survival function of the gamma distribution, so it + starts at 1 and monotonically decreases to 0. + + >>> sc.gammaincc(0.5, [0, 1, 10, 100, 1000]) + array([1.00000000e+00, 1.57299207e-01, 7.74421643e-06, 2.08848758e-45, + 0.00000000e+00]) + + It is equal to one minus the lower incomplete gamma function. + + >>> a, x = 0.5, 0.4 + >>> sc.gammaincc(a, x) + 0.37109336952269756 + >>> 1 - sc.gammainc(a, x) + 0.37109336952269756 + + """) + +add_newdoc("gammainccinv", + """ + gammainccinv(a, y, out=None) + + Inverse of the regularized upper incomplete gamma function. + + Given an input :math:`y` between 0 and 1, returns :math:`x` such + that :math:`y = Q(a, x)`. Here :math:`Q` is the regularized upper + incomplete gamma function; see `gammaincc`. This is well-defined + because the upper incomplete gamma function is monotonic as can + be seen from its definition in [dlmf]_. + + Parameters + ---------- + a : array_like + Positive parameter + y : array_like + Argument between 0 and 1, inclusive + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + Values of the inverse of the upper incomplete gamma function + + See Also + -------- + gammaincc : regularized upper incomplete gamma function + gammainc : regularized lower incomplete gamma function + gammaincinv : inverse of the regularized lower incomplete gamma function + + References + ---------- + .. [dlmf] NIST Digital Library of Mathematical Functions + https://dlmf.nist.gov/8.2#E4 + + Examples + -------- + >>> import scipy.special as sc + + It starts at infinity and monotonically decreases to 0. + + >>> sc.gammainccinv(0.5, [0, 0.1, 0.5, 1]) + array([ inf, 1.35277173, 0.22746821, 0. ]) + + It inverts the upper incomplete gamma function. + + >>> a, x = 0.5, [0, 0.1, 0.5, 1] + >>> sc.gammaincc(a, sc.gammainccinv(a, x)) + array([0. , 0.1, 0.5, 1. ]) + + >>> a, x = 0.5, [0, 10, 50] + >>> sc.gammainccinv(a, sc.gammaincc(a, x)) + array([ 0., 10., 50.]) + + """) + +add_newdoc("gammaincinv", + """ + gammaincinv(a, y, out=None) + + Inverse to the regularized lower incomplete gamma function. + + Given an input :math:`y` between 0 and 1, returns :math:`x` such + that :math:`y = P(a, x)`. Here :math:`P` is the regularized lower + incomplete gamma function; see `gammainc`. This is well-defined + because the lower incomplete gamma function is monotonic as can be + seen from its definition in [dlmf]_. + + Parameters + ---------- + a : array_like + Positive parameter + y : array_like + Parameter between 0 and 1, inclusive + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + Values of the inverse of the lower incomplete gamma function + + See Also + -------- + gammainc : regularized lower incomplete gamma function + gammaincc : regularized upper incomplete gamma function + gammainccinv : inverse of the regularized upper incomplete gamma function + + References + ---------- + .. [dlmf] NIST Digital Library of Mathematical Functions + https://dlmf.nist.gov/8.2#E4 + + Examples + -------- + >>> import scipy.special as sc + + It starts at 0 and monotonically increases to infinity. + + >>> sc.gammaincinv(0.5, [0, 0.1 ,0.5, 1]) + array([0. , 0.00789539, 0.22746821, inf]) + + It inverts the lower incomplete gamma function. + + >>> a, x = 0.5, [0, 0.1, 0.5, 1] + >>> sc.gammainc(a, sc.gammaincinv(a, x)) + array([0. , 0.1, 0.5, 1. ]) + + >>> a, x = 0.5, [0, 10, 25] + >>> sc.gammaincinv(a, sc.gammainc(a, x)) + array([ 0. , 10. , 25.00001465]) + + """) + +add_newdoc("gammasgn", + r""" + gammasgn(x, out=None) + + Sign of the gamma function. + + It is defined as + + .. math:: + + \text{gammasgn}(x) = + \begin{cases} + +1 & \Gamma(x) > 0 \\ + -1 & \Gamma(x) < 0 + \end{cases} + + where :math:`\Gamma` is the gamma function; see `gamma`. This + definition is complete since the gamma function is never zero; + see the discussion after [dlmf]_. + + Parameters + ---------- + x : array_like + Real argument + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + Sign of the gamma function + + See Also + -------- + gamma : the gamma function + gammaln : log of the absolute value of the gamma function + loggamma : analytic continuation of the log of the gamma function + + Notes + ----- + The gamma function can be computed as ``gammasgn(x) * + np.exp(gammaln(x))``. + + References + ---------- + .. [dlmf] NIST Digital Library of Mathematical Functions + https://dlmf.nist.gov/5.2#E1 + + Examples + -------- + >>> import numpy as np + >>> import scipy.special as sc + + It is 1 for `x > 0`. + + >>> sc.gammasgn([1, 2, 3, 4]) + array([1., 1., 1., 1.]) + + It alternates between -1 and 1 for negative integers. + + >>> sc.gammasgn([-0.5, -1.5, -2.5, -3.5]) + array([-1., 1., -1., 1.]) + + It can be used to compute the gamma function. + + >>> x = [1.5, 0.5, -0.5, -1.5] + >>> sc.gammasgn(x) * np.exp(sc.gammaln(x)) + array([ 0.88622693, 1.77245385, -3.5449077 , 2.3632718 ]) + >>> sc.gamma(x) + array([ 0.88622693, 1.77245385, -3.5449077 , 2.3632718 ]) + + """) + +add_newdoc("gdtr", + r""" + gdtr(a, b, x, out=None) + + Gamma distribution cumulative distribution function. + + Returns the integral from zero to `x` of the gamma probability density + function, + + .. math:: + + F = \int_0^x \frac{a^b}{\Gamma(b)} t^{b-1} e^{-at}\,dt, + + where :math:`\Gamma` is the gamma function. + + Parameters + ---------- + a : array_like + The rate parameter of the gamma distribution, sometimes denoted + :math:`\beta` (float). It is also the reciprocal of the scale + parameter :math:`\theta`. + b : array_like + The shape parameter of the gamma distribution, sometimes denoted + :math:`\alpha` (float). + x : array_like + The quantile (upper limit of integration; float). + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + F : scalar or ndarray + The CDF of the gamma distribution with parameters `a` and `b` + evaluated at `x`. + + See Also + -------- + gdtrc : 1 - CDF of the gamma distribution. + scipy.stats.gamma: Gamma distribution + + Notes + ----- + The evaluation is carried out using the relation to the incomplete gamma + integral (regularized gamma function). + + Wrapper for the Cephes [1]_ routine `gdtr`. Calling `gdtr` directly can + improve performance compared to the ``cdf`` method of `scipy.stats.gamma` + (see last example below). + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + Examples + -------- + Compute the function for ``a=1``, ``b=2`` at ``x=5``. + + >>> import numpy as np + >>> from scipy.special import gdtr + >>> import matplotlib.pyplot as plt + >>> gdtr(1., 2., 5.) + 0.9595723180054873 + + Compute the function for ``a=1`` and ``b=2`` at several points by + providing a NumPy array for `x`. + + >>> xvalues = np.array([1., 2., 3., 4]) + >>> gdtr(1., 1., xvalues) + array([0.63212056, 0.86466472, 0.95021293, 0.98168436]) + + `gdtr` can evaluate different parameter sets by providing arrays with + broadcasting compatible shapes for `a`, `b` and `x`. Here we compute the + function for three different `a` at four positions `x` and ``b=3``, + resulting in a 3x4 array. + + >>> a = np.array([[0.5], [1.5], [2.5]]) + >>> x = np.array([1., 2., 3., 4]) + >>> a.shape, x.shape + ((3, 1), (4,)) + + >>> gdtr(a, 3., x) + array([[0.01438768, 0.0803014 , 0.19115317, 0.32332358], + [0.19115317, 0.57680992, 0.82642193, 0.9380312 ], + [0.45618688, 0.87534798, 0.97974328, 0.9972306 ]]) + + Plot the function for four different parameter sets. + + >>> a_parameters = [0.3, 1, 2, 6] + >>> b_parameters = [2, 10, 15, 20] + >>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot'] + >>> parameters_list = list(zip(a_parameters, b_parameters, linestyles)) + >>> x = np.linspace(0, 30, 1000) + >>> fig, ax = plt.subplots() + >>> for parameter_set in parameters_list: + ... a, b, style = parameter_set + ... gdtr_vals = gdtr(a, b, x) + ... ax.plot(x, gdtr_vals, label=fr"$a= {a},\, b={b}$", ls=style) + >>> ax.legend() + >>> ax.set_xlabel("$x$") + >>> ax.set_title("Gamma distribution cumulative distribution function") + >>> plt.show() + + The gamma distribution is also available as `scipy.stats.gamma`. Using + `gdtr` directly can be much faster than calling the ``cdf`` method of + `scipy.stats.gamma`, especially for small arrays or individual values. + To get the same results one must use the following parametrization: + ``stats.gamma(b, scale=1/a).cdf(x)=gdtr(a, b, x)``. + + >>> from scipy.stats import gamma + >>> a = 2. + >>> b = 3 + >>> x = 1. + >>> gdtr_result = gdtr(a, b, x) # this will often be faster than below + >>> gamma_dist_result = gamma(b, scale=1/a).cdf(x) + >>> gdtr_result == gamma_dist_result # test that results are equal + True + """) + +add_newdoc("gdtrc", + r""" + gdtrc(a, b, x, out=None) + + Gamma distribution survival function. + + Integral from `x` to infinity of the gamma probability density function, + + .. math:: + + F = \int_x^\infty \frac{a^b}{\Gamma(b)} t^{b-1} e^{-at}\,dt, + + where :math:`\Gamma` is the gamma function. + + Parameters + ---------- + a : array_like + The rate parameter of the gamma distribution, sometimes denoted + :math:`\beta` (float). It is also the reciprocal of the scale + parameter :math:`\theta`. + b : array_like + The shape parameter of the gamma distribution, sometimes denoted + :math:`\alpha` (float). + x : array_like + The quantile (lower limit of integration; float). + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + F : scalar or ndarray + The survival function of the gamma distribution with parameters `a` + and `b` evaluated at `x`. + + See Also + -------- + gdtr: Gamma distribution cumulative distribution function + scipy.stats.gamma: Gamma distribution + gdtrix + + Notes + ----- + The evaluation is carried out using the relation to the incomplete gamma + integral (regularized gamma function). + + Wrapper for the Cephes [1]_ routine `gdtrc`. Calling `gdtrc` directly can + improve performance compared to the ``sf`` method of `scipy.stats.gamma` + (see last example below). + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + Examples + -------- + Compute the function for ``a=1`` and ``b=2`` at ``x=5``. + + >>> import numpy as np + >>> from scipy.special import gdtrc + >>> import matplotlib.pyplot as plt + >>> gdtrc(1., 2., 5.) + 0.04042768199451279 + + Compute the function for ``a=1``, ``b=2`` at several points by providing + a NumPy array for `x`. + + >>> xvalues = np.array([1., 2., 3., 4]) + >>> gdtrc(1., 1., xvalues) + array([0.36787944, 0.13533528, 0.04978707, 0.01831564]) + + `gdtrc` can evaluate different parameter sets by providing arrays with + broadcasting compatible shapes for `a`, `b` and `x`. Here we compute the + function for three different `a` at four positions `x` and ``b=3``, + resulting in a 3x4 array. + + >>> a = np.array([[0.5], [1.5], [2.5]]) + >>> x = np.array([1., 2., 3., 4]) + >>> a.shape, x.shape + ((3, 1), (4,)) + + >>> gdtrc(a, 3., x) + array([[0.98561232, 0.9196986 , 0.80884683, 0.67667642], + [0.80884683, 0.42319008, 0.17357807, 0.0619688 ], + [0.54381312, 0.12465202, 0.02025672, 0.0027694 ]]) + + Plot the function for four different parameter sets. + + >>> a_parameters = [0.3, 1, 2, 6] + >>> b_parameters = [2, 10, 15, 20] + >>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot'] + >>> parameters_list = list(zip(a_parameters, b_parameters, linestyles)) + >>> x = np.linspace(0, 30, 1000) + >>> fig, ax = plt.subplots() + >>> for parameter_set in parameters_list: + ... a, b, style = parameter_set + ... gdtrc_vals = gdtrc(a, b, x) + ... ax.plot(x, gdtrc_vals, label=fr"$a= {a},\, b={b}$", ls=style) + >>> ax.legend() + >>> ax.set_xlabel("$x$") + >>> ax.set_title("Gamma distribution survival function") + >>> plt.show() + + The gamma distribution is also available as `scipy.stats.gamma`. + Using `gdtrc` directly can be much faster than calling the ``sf`` method + of `scipy.stats.gamma`, especially for small arrays or individual + values. To get the same results one must use the following parametrization: + ``stats.gamma(b, scale=1/a).sf(x)=gdtrc(a, b, x)``. + + >>> from scipy.stats import gamma + >>> a = 2 + >>> b = 3 + >>> x = 1. + >>> gdtrc_result = gdtrc(a, b, x) # this will often be faster than below + >>> gamma_dist_result = gamma(b, scale=1/a).sf(x) + >>> gdtrc_result == gamma_dist_result # test that results are equal + True + """) + +add_newdoc("gdtria", + """ + gdtria(p, b, x, out=None) + + Inverse of `gdtr` vs a. + + Returns the inverse with respect to the parameter `a` of ``p = + gdtr(a, b, x)``, the cumulative distribution function of the gamma + distribution. + + Parameters + ---------- + p : array_like + Probability values. + b : array_like + `b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter + of the gamma distribution. + x : array_like + Nonnegative real values, from the domain of the gamma distribution. + out : ndarray, optional + If a fourth argument is given, it must be a numpy.ndarray whose size + matches the broadcast result of `a`, `b` and `x`. `out` is then the + array returned by the function. + + Returns + ------- + a : scalar or ndarray + Values of the `a` parameter such that `p = gdtr(a, b, x)`. `1/a` + is the "scale" parameter of the gamma distribution. + + See Also + -------- + gdtr : CDF of the gamma distribution. + gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`. + gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`. + + Notes + ----- + Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`. + + The cumulative distribution function `p` is computed using a routine by + DiDinato and Morris [2]_. Computation of `a` involves a search for a value + that produces the desired value of `p`. The search relies on the + monotonicity of `p` with `a`. + + References + ---------- + .. [1] Barry Brown, James Lovato, and Kathy Russell, + CDFLIB: Library of Fortran Routines for Cumulative Distribution + Functions, Inverses, and Other Parameters. + .. [2] DiDinato, A. R. and Morris, A. H., + Computation of the incomplete gamma function ratios and their + inverse. ACM Trans. Math. Softw. 12 (1986), 377-393. + + Examples + -------- + First evaluate `gdtr`. + + >>> from scipy.special import gdtr, gdtria + >>> p = gdtr(1.2, 3.4, 5.6) + >>> print(p) + 0.94378087442 + + Verify the inverse. + + >>> gdtria(p, 3.4, 5.6) + 1.2 + """) + +add_newdoc("gdtrib", + """ + gdtrib(a, p, x, out=None) + + Inverse of `gdtr` vs b. + + Returns the inverse with respect to the parameter `b` of ``p = + gdtr(a, b, x)``, the cumulative distribution function of the gamma + distribution. + + Parameters + ---------- + a : array_like + `a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale" + parameter of the gamma distribution. + p : array_like + Probability values. + x : array_like + Nonnegative real values, from the domain of the gamma distribution. + out : ndarray, optional + If a fourth argument is given, it must be a numpy.ndarray whose size + matches the broadcast result of `a`, `b` and `x`. `out` is then the + array returned by the function. + + Returns + ------- + b : scalar or ndarray + Values of the `b` parameter such that `p = gdtr(a, b, x)`. `b` is + the "shape" parameter of the gamma distribution. + + See Also + -------- + gdtr : CDF of the gamma distribution. + gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`. + gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`. + + Notes + ----- + Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`. + + The cumulative distribution function `p` is computed using a routine by + DiDinato and Morris [2]_. Computation of `b` involves a search for a value + that produces the desired value of `p`. The search relies on the + monotonicity of `p` with `b`. + + References + ---------- + .. [1] Barry Brown, James Lovato, and Kathy Russell, + CDFLIB: Library of Fortran Routines for Cumulative Distribution + Functions, Inverses, and Other Parameters. + .. [2] DiDinato, A. R. and Morris, A. H., + Computation of the incomplete gamma function ratios and their + inverse. ACM Trans. Math. Softw. 12 (1986), 377-393. + + Examples + -------- + First evaluate `gdtr`. + + >>> from scipy.special import gdtr, gdtrib + >>> p = gdtr(1.2, 3.4, 5.6) + >>> print(p) + 0.94378087442 + + Verify the inverse. + + >>> gdtrib(1.2, p, 5.6) + 3.3999999999723882 + """) + +add_newdoc("gdtrix", + """ + gdtrix(a, b, p, out=None) + + Inverse of `gdtr` vs x. + + Returns the inverse with respect to the parameter `x` of ``p = + gdtr(a, b, x)``, the cumulative distribution function of the gamma + distribution. This is also known as the pth quantile of the + distribution. + + Parameters + ---------- + a : array_like + `a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale" + parameter of the gamma distribution. + b : array_like + `b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter + of the gamma distribution. + p : array_like + Probability values. + out : ndarray, optional + If a fourth argument is given, it must be a numpy.ndarray whose size + matches the broadcast result of `a`, `b` and `x`. `out` is then the + array returned by the function. + + Returns + ------- + x : scalar or ndarray + Values of the `x` parameter such that `p = gdtr(a, b, x)`. + + See Also + -------- + gdtr : CDF of the gamma distribution. + gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`. + gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`. + + Notes + ----- + Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`. + + The cumulative distribution function `p` is computed using a routine by + DiDinato and Morris [2]_. Computation of `x` involves a search for a value + that produces the desired value of `p`. The search relies on the + monotonicity of `p` with `x`. + + References + ---------- + .. [1] Barry Brown, James Lovato, and Kathy Russell, + CDFLIB: Library of Fortran Routines for Cumulative Distribution + Functions, Inverses, and Other Parameters. + .. [2] DiDinato, A. R. and Morris, A. H., + Computation of the incomplete gamma function ratios and their + inverse. ACM Trans. Math. Softw. 12 (1986), 377-393. + + Examples + -------- + First evaluate `gdtr`. + + >>> from scipy.special import gdtr, gdtrix + >>> p = gdtr(1.2, 3.4, 5.6) + >>> print(p) + 0.94378087442 + + Verify the inverse. + + >>> gdtrix(1.2, 3.4, p) + 5.5999999999999996 + """) + +add_newdoc("hankel1", + r""" + hankel1(v, z, out=None) + + Hankel function of the first kind + + Parameters + ---------- + v : array_like + Order (float). + z : array_like + Argument (float or complex). + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + Values of the Hankel function of the first kind. + + See Also + -------- + hankel1e : ndarray + This function with leading exponential behavior stripped off. + + Notes + ----- + A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the + computation using the relation, + + .. math:: H^{(1)}_v(z) = + \frac{2}{\imath\pi} \exp(-\imath \pi v/2) K_v(z \exp(-\imath\pi/2)) + + where :math:`K_v` is the modified Bessel function of the second kind. + For negative orders, the relation + + .. math:: H^{(1)}_{-v}(z) = H^{(1)}_v(z) \exp(\imath\pi v) + + is used. + + References + ---------- + .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions + of a Complex Argument and Nonnegative Order", + http://netlib.org/amos/ + """) + +add_newdoc("hankel1e", + r""" + hankel1e(v, z, out=None) + + Exponentially scaled Hankel function of the first kind + + Defined as:: + + hankel1e(v, z) = hankel1(v, z) * exp(-1j * z) + + Parameters + ---------- + v : array_like + Order (float). + z : array_like + Argument (float or complex). + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + Values of the exponentially scaled Hankel function. + + Notes + ----- + A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the + computation using the relation, + + .. math:: H^{(1)}_v(z) = + \frac{2}{\imath\pi} \exp(-\imath \pi v/2) K_v(z \exp(-\imath\pi/2)) + + where :math:`K_v` is the modified Bessel function of the second kind. + For negative orders, the relation + + .. math:: H^{(1)}_{-v}(z) = H^{(1)}_v(z) \exp(\imath\pi v) + + is used. + + References + ---------- + .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions + of a Complex Argument and Nonnegative Order", + http://netlib.org/amos/ + """) + +add_newdoc("hankel2", + r""" + hankel2(v, z, out=None) + + Hankel function of the second kind + + Parameters + ---------- + v : array_like + Order (float). + z : array_like + Argument (float or complex). + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + Values of the Hankel function of the second kind. + + See Also + -------- + hankel2e : this function with leading exponential behavior stripped off. + + Notes + ----- + A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the + computation using the relation, + + .. math:: H^{(2)}_v(z) = + -\frac{2}{\imath\pi} \exp(\imath \pi v/2) K_v(z \exp(\imath\pi/2)) + + where :math:`K_v` is the modified Bessel function of the second kind. + For negative orders, the relation + + .. math:: H^{(2)}_{-v}(z) = H^{(2)}_v(z) \exp(-\imath\pi v) + + is used. + + References + ---------- + .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions + of a Complex Argument and Nonnegative Order", + http://netlib.org/amos/ + """) + +add_newdoc("hankel2e", + r""" + hankel2e(v, z, out=None) + + Exponentially scaled Hankel function of the second kind + + Defined as:: + + hankel2e(v, z) = hankel2(v, z) * exp(1j * z) + + Parameters + ---------- + v : array_like + Order (float). + z : array_like + Argument (float or complex). + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + Values of the exponentially scaled Hankel function of the second kind. + + Notes + ----- + A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the + computation using the relation, + + .. math:: H^{(2)}_v(z) = -\frac{2}{\imath\pi} + \exp(\frac{\imath \pi v}{2}) K_v(z exp(\frac{\imath\pi}{2})) + + where :math:`K_v` is the modified Bessel function of the second kind. + For negative orders, the relation + + .. math:: H^{(2)}_{-v}(z) = H^{(2)}_v(z) \exp(-\imath\pi v) + + is used. + + References + ---------- + .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions + of a Complex Argument and Nonnegative Order", + http://netlib.org/amos/ + + """) + +add_newdoc("huber", + r""" + huber(delta, r, out=None) + + Huber loss function. + + .. math:: \text{huber}(\delta, r) = \begin{cases} \infty & \delta < 0 \\ + \frac{1}{2}r^2 & 0 \le \delta, | r | \le \delta \\ + \delta ( |r| - \frac{1}{2}\delta ) & \text{otherwise} \end{cases} + + Parameters + ---------- + delta : ndarray + Input array, indicating the quadratic vs. linear loss changepoint. + r : ndarray + Input array, possibly representing residuals. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + The computed Huber loss function values. + + See Also + -------- + pseudo_huber : smooth approximation of this function + + Notes + ----- + `huber` is useful as a loss function in robust statistics or machine + learning to reduce the influence of outliers as compared to the common + squared error loss, residuals with a magnitude higher than `delta` are + not squared [1]_. + + Typically, `r` represents residuals, the difference + between a model prediction and data. Then, for :math:`|r|\leq\delta`, + `huber` resembles the squared error and for :math:`|r|>\delta` the + absolute error. This way, the Huber loss often achieves + a fast convergence in model fitting for small residuals like the squared + error loss function and still reduces the influence of outliers + (:math:`|r|>\delta`) like the absolute error loss. As :math:`\delta` is + the cutoff between squared and absolute error regimes, it has + to be tuned carefully for each problem. `huber` is also + convex, making it suitable for gradient based optimization. + + .. versionadded:: 0.15.0 + + References + ---------- + .. [1] Peter Huber. "Robust Estimation of a Location Parameter", + 1964. Annals of Statistics. 53 (1): 73 - 101. + + Examples + -------- + Import all necessary modules. + + >>> import numpy as np + >>> from scipy.special import huber + >>> import matplotlib.pyplot as plt + + Compute the function for ``delta=1`` at ``r=2`` + + >>> huber(1., 2.) + 1.5 + + Compute the function for different `delta` by providing a NumPy array or + list for `delta`. + + >>> huber([1., 3., 5.], 4.) + array([3.5, 7.5, 8. ]) + + Compute the function at different points by providing a NumPy array or + list for `r`. + + >>> huber(2., np.array([1., 1.5, 3.])) + array([0.5 , 1.125, 4. ]) + + The function can be calculated for different `delta` and `r` by + providing arrays for both with compatible shapes for broadcasting. + + >>> r = np.array([1., 2.5, 8., 10.]) + >>> deltas = np.array([[1.], [5.], [9.]]) + >>> print(r.shape, deltas.shape) + (4,) (3, 1) + + >>> huber(deltas, r) + array([[ 0.5 , 2. , 7.5 , 9.5 ], + [ 0.5 , 3.125, 27.5 , 37.5 ], + [ 0.5 , 3.125, 32. , 49.5 ]]) + + Plot the function for different `delta`. + + >>> x = np.linspace(-4, 4, 500) + >>> deltas = [1, 2, 3] + >>> linestyles = ["dashed", "dotted", "dashdot"] + >>> fig, ax = plt.subplots() + >>> combined_plot_parameters = list(zip(deltas, linestyles)) + >>> for delta, style in combined_plot_parameters: + ... ax.plot(x, huber(delta, x), label=fr"$\delta={delta}$", ls=style) + >>> ax.legend(loc="upper center") + >>> ax.set_xlabel("$x$") + >>> ax.set_title(r"Huber loss function $h_{\delta}(x)$") + >>> ax.set_xlim(-4, 4) + >>> ax.set_ylim(0, 8) + >>> plt.show() + """) + +add_newdoc("hyp0f1", + r""" + hyp0f1(v, z, out=None) + + Confluent hypergeometric limit function 0F1. + + Parameters + ---------- + v : array_like + Real-valued parameter + z : array_like + Real- or complex-valued argument + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + The confluent hypergeometric limit function + + Notes + ----- + This function is defined as: + + .. math:: _0F_1(v, z) = \sum_{k=0}^{\infty}\frac{z^k}{(v)_k k!}. + + It's also the limit as :math:`q \to \infty` of :math:`_1F_1(q; v; z/q)`, + and satisfies the differential equation :math:`f''(z) + vf'(z) = + f(z)`. See [1]_ for more information. + + References + ---------- + .. [1] Wolfram MathWorld, "Confluent Hypergeometric Limit Function", + http://mathworld.wolfram.com/ConfluentHypergeometricLimitFunction.html + + Examples + -------- + >>> import numpy as np + >>> import scipy.special as sc + + It is one when `z` is zero. + + >>> sc.hyp0f1(1, 0) + 1.0 + + It is the limit of the confluent hypergeometric function as `q` + goes to infinity. + + >>> q = np.array([1, 10, 100, 1000]) + >>> v = 1 + >>> z = 1 + >>> sc.hyp1f1(q, v, z / q) + array([2.71828183, 2.31481985, 2.28303778, 2.27992985]) + >>> sc.hyp0f1(v, z) + 2.2795853023360673 + + It is related to Bessel functions. + + >>> n = 1 + >>> x = np.linspace(0, 1, 5) + >>> sc.jv(n, x) + array([0. , 0.12402598, 0.24226846, 0.3492436 , 0.44005059]) + >>> (0.5 * x)**n / sc.factorial(n) * sc.hyp0f1(n + 1, -0.25 * x**2) + array([0. , 0.12402598, 0.24226846, 0.3492436 , 0.44005059]) + + """) + +add_newdoc("hyp1f1", + r""" + hyp1f1(a, b, x, out=None) + + Confluent hypergeometric function 1F1. + + The confluent hypergeometric function is defined by the series + + .. math:: + + {}_1F_1(a; b; x) = \sum_{k = 0}^\infty \frac{(a)_k}{(b)_k k!} x^k. + + See [dlmf]_ for more details. Here :math:`(\cdot)_k` is the + Pochhammer symbol; see `poch`. + + Parameters + ---------- + a, b : array_like + Real parameters + x : array_like + Real or complex argument + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + Values of the confluent hypergeometric function + + See Also + -------- + hyperu : another confluent hypergeometric function + hyp0f1 : confluent hypergeometric limit function + hyp2f1 : Gaussian hypergeometric function + + References + ---------- + .. [dlmf] NIST Digital Library of Mathematical Functions + https://dlmf.nist.gov/13.2#E2 + + Examples + -------- + >>> import numpy as np + >>> import scipy.special as sc + + It is one when `x` is zero: + + >>> sc.hyp1f1(0.5, 0.5, 0) + 1.0 + + It is singular when `b` is a nonpositive integer. + + >>> sc.hyp1f1(0.5, -1, 0) + inf + + It is a polynomial when `a` is a nonpositive integer. + + >>> a, b, x = -1, 0.5, np.array([1.0, 2.0, 3.0, 4.0]) + >>> sc.hyp1f1(a, b, x) + array([-1., -3., -5., -7.]) + >>> 1 + (a / b) * x + array([-1., -3., -5., -7.]) + + It reduces to the exponential function when `a = b`. + + >>> sc.hyp1f1(2, 2, [1, 2, 3, 4]) + array([ 2.71828183, 7.3890561 , 20.08553692, 54.59815003]) + >>> np.exp([1, 2, 3, 4]) + array([ 2.71828183, 7.3890561 , 20.08553692, 54.59815003]) + + """) + +add_newdoc("hyperu", + r""" + hyperu(a, b, x, out=None) + + Confluent hypergeometric function U + + It is defined as the solution to the equation + + .. math:: + + x \frac{d^2w}{dx^2} + (b - x) \frac{dw}{dx} - aw = 0 + + which satisfies the property + + .. math:: + + U(a, b, x) \sim x^{-a} + + as :math:`x \to \infty`. See [dlmf]_ for more details. + + Parameters + ---------- + a, b : array_like + Real-valued parameters + x : array_like + Real-valued argument + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + Values of `U` + + References + ---------- + .. [dlmf] NIST Digital Library of Mathematics Functions + https://dlmf.nist.gov/13.2#E6 + + Examples + -------- + >>> import numpy as np + >>> import scipy.special as sc + + It has a branch cut along the negative `x` axis. + + >>> x = np.linspace(-0.1, -10, 5) + >>> sc.hyperu(1, 1, x) + array([nan, nan, nan, nan, nan]) + + It approaches zero as `x` goes to infinity. + + >>> x = np.array([1, 10, 100]) + >>> sc.hyperu(1, 1, x) + array([0.59634736, 0.09156333, 0.00990194]) + + It satisfies Kummer's transformation. + + >>> a, b, x = 2, 1, 1 + >>> sc.hyperu(a, b, x) + 0.1926947246463881 + >>> x**(1 - b) * sc.hyperu(a - b + 1, 2 - b, x) + 0.1926947246463881 + + """) + +add_newdoc("i0", + r""" + i0(x, out=None) + + Modified Bessel function of order 0. + + Defined as, + + .. math:: + I_0(x) = \sum_{k=0}^\infty \frac{(x^2/4)^k}{(k!)^2} = J_0(\imath x), + + where :math:`J_0` is the Bessel function of the first kind of order 0. + + Parameters + ---------- + x : array_like + Argument (float) + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + I : scalar or ndarray + Value of the modified Bessel function of order 0 at `x`. + + See Also + -------- + iv: Modified Bessel function of any order + i0e: Exponentially scaled modified Bessel function of order 0 + + Notes + ----- + The range is partitioned into the two intervals [0, 8] and (8, infinity). + Chebyshev polynomial expansions are employed in each interval. + + This function is a wrapper for the Cephes [1]_ routine `i0`. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + Examples + -------- + Calculate the function at one point: + + >>> from scipy.special import i0 + >>> i0(1.) + 1.2660658777520082 + + Calculate at several points: + + >>> import numpy as np + >>> i0(np.array([-2., 0., 3.5])) + array([2.2795853 , 1. , 7.37820343]) + + Plot the function from -10 to 10. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> x = np.linspace(-10., 10., 1000) + >>> y = i0(x) + >>> ax.plot(x, y) + >>> plt.show() + + """) + +add_newdoc("i0e", + """ + i0e(x, out=None) + + Exponentially scaled modified Bessel function of order 0. + + Defined as:: + + i0e(x) = exp(-abs(x)) * i0(x). + + Parameters + ---------- + x : array_like + Argument (float) + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + I : scalar or ndarray + Value of the exponentially scaled modified Bessel function of order 0 + at `x`. + + See Also + -------- + iv: Modified Bessel function of the first kind + i0: Modified Bessel function of order 0 + + Notes + ----- + The range is partitioned into the two intervals [0, 8] and (8, infinity). + Chebyshev polynomial expansions are employed in each interval. The + polynomial expansions used are the same as those in `i0`, but + they are not multiplied by the dominant exponential factor. + + This function is a wrapper for the Cephes [1]_ routine `i0e`. `i0e` + is useful for large arguments `x`: for these, `i0` quickly overflows. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + Examples + -------- + In the following example `i0` returns infinity whereas `i0e` still returns + a finite number. + + >>> from scipy.special import i0, i0e + >>> i0(1000.), i0e(1000.) + (inf, 0.012617240455891257) + + Calculate the function at several points by providing a NumPy array or + list for `x`: + + >>> import numpy as np + >>> i0e(np.array([-2., 0., 3.])) + array([0.30850832, 1. , 0.24300035]) + + Plot the function from -10 to 10. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> x = np.linspace(-10., 10., 1000) + >>> y = i0e(x) + >>> ax.plot(x, y) + >>> plt.show() + """) + +add_newdoc("i1", + r""" + i1(x, out=None) + + Modified Bessel function of order 1. + + Defined as, + + .. math:: + I_1(x) = \frac{1}{2}x \sum_{k=0}^\infty \frac{(x^2/4)^k}{k! (k + 1)!} + = -\imath J_1(\imath x), + + where :math:`J_1` is the Bessel function of the first kind of order 1. + + Parameters + ---------- + x : array_like + Argument (float) + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + I : scalar or ndarray + Value of the modified Bessel function of order 1 at `x`. + + See Also + -------- + iv: Modified Bessel function of the first kind + i1e: Exponentially scaled modified Bessel function of order 1 + + Notes + ----- + The range is partitioned into the two intervals [0, 8] and (8, infinity). + Chebyshev polynomial expansions are employed in each interval. + + This function is a wrapper for the Cephes [1]_ routine `i1`. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + Examples + -------- + Calculate the function at one point: + + >>> from scipy.special import i1 + >>> i1(1.) + 0.5651591039924851 + + Calculate the function at several points: + + >>> import numpy as np + >>> i1(np.array([-2., 0., 6.])) + array([-1.59063685, 0. , 61.34193678]) + + Plot the function between -10 and 10. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> x = np.linspace(-10., 10., 1000) + >>> y = i1(x) + >>> ax.plot(x, y) + >>> plt.show() + + """) + +add_newdoc("i1e", + """ + i1e(x, out=None) + + Exponentially scaled modified Bessel function of order 1. + + Defined as:: + + i1e(x) = exp(-abs(x)) * i1(x) + + Parameters + ---------- + x : array_like + Argument (float) + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + I : scalar or ndarray + Value of the exponentially scaled modified Bessel function of order 1 + at `x`. + + See Also + -------- + iv: Modified Bessel function of the first kind + i1: Modified Bessel function of order 1 + + Notes + ----- + The range is partitioned into the two intervals [0, 8] and (8, infinity). + Chebyshev polynomial expansions are employed in each interval. The + polynomial expansions used are the same as those in `i1`, but + they are not multiplied by the dominant exponential factor. + + This function is a wrapper for the Cephes [1]_ routine `i1e`. `i1e` + is useful for large arguments `x`: for these, `i1` quickly overflows. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + Examples + -------- + In the following example `i1` returns infinity whereas `i1e` still returns + a finite number. + + >>> from scipy.special import i1, i1e + >>> i1(1000.), i1e(1000.) + (inf, 0.01261093025692863) + + Calculate the function at several points by providing a NumPy array or + list for `x`: + + >>> import numpy as np + >>> i1e(np.array([-2., 0., 6.])) + array([-0.21526929, 0. , 0.15205146]) + + Plot the function between -10 and 10. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> x = np.linspace(-10., 10., 1000) + >>> y = i1e(x) + >>> ax.plot(x, y) + >>> plt.show() + """) + +add_newdoc("_igam_fac", + """ + Internal function, do not use. + """) + +add_newdoc("iv", + r""" + iv(v, z, out=None) + + Modified Bessel function of the first kind of real order. + + Parameters + ---------- + v : array_like + Order. If `z` is of real type and negative, `v` must be integer + valued. + z : array_like of float or complex + Argument. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + Values of the modified Bessel function. + + See Also + -------- + ive : This function with leading exponential behavior stripped off. + i0 : Faster version of this function for order 0. + i1 : Faster version of this function for order 1. + + Notes + ----- + For real `z` and :math:`v \in [-50, 50]`, the evaluation is carried out + using Temme's method [1]_. For larger orders, uniform asymptotic + expansions are applied. + + For complex `z` and positive `v`, the AMOS [2]_ `zbesi` routine is + called. It uses a power series for small `z`, the asymptotic expansion + for large `abs(z)`, the Miller algorithm normalized by the Wronskian + and a Neumann series for intermediate magnitudes, and the uniform + asymptotic expansions for :math:`I_v(z)` and :math:`J_v(z)` for large + orders. Backward recurrence is used to generate sequences or reduce + orders when necessary. + + The calculations above are done in the right half plane and continued + into the left half plane by the formula, + + .. math:: I_v(z \exp(\pm\imath\pi)) = \exp(\pm\pi v) I_v(z) + + (valid when the real part of `z` is positive). For negative `v`, the + formula + + .. math:: I_{-v}(z) = I_v(z) + \frac{2}{\pi} \sin(\pi v) K_v(z) + + is used, where :math:`K_v(z)` is the modified Bessel function of the + second kind, evaluated using the AMOS routine `zbesk`. + + References + ---------- + .. [1] Temme, Journal of Computational Physics, vol 21, 343 (1976) + .. [2] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions + of a Complex Argument and Nonnegative Order", + http://netlib.org/amos/ + + Examples + -------- + Evaluate the function of order 0 at one point. + + >>> from scipy.special import iv + >>> iv(0, 1.) + 1.2660658777520084 + + Evaluate the function at one point for different orders. + + >>> iv(0, 1.), iv(1, 1.), iv(1.5, 1.) + (1.2660658777520084, 0.565159103992485, 0.2935253263474798) + + The evaluation for different orders can be carried out in one call by + providing a list or NumPy array as argument for the `v` parameter: + + >>> iv([0, 1, 1.5], 1.) + array([1.26606588, 0.5651591 , 0.29352533]) + + Evaluate the function at several points for order 0 by providing an + array for `z`. + + >>> import numpy as np + >>> points = np.array([-2., 0., 3.]) + >>> iv(0, points) + array([2.2795853 , 1. , 4.88079259]) + + If `z` is an array, the order parameter `v` must be broadcastable to + the correct shape if different orders shall be computed in one call. + To calculate the orders 0 and 1 for an 1D array: + + >>> orders = np.array([[0], [1]]) + >>> orders.shape + (2, 1) + + >>> iv(orders, points) + array([[ 2.2795853 , 1. , 4.88079259], + [-1.59063685, 0. , 3.95337022]]) + + Plot the functions of order 0 to 3 from -5 to 5. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> x = np.linspace(-5., 5., 1000) + >>> for i in range(4): + ... ax.plot(x, iv(i, x), label=f'$I_{i!r}$') + >>> ax.legend() + >>> plt.show() + + """) + +add_newdoc("ive", + r""" + ive(v, z, out=None) + + Exponentially scaled modified Bessel function of the first kind. + + Defined as:: + + ive(v, z) = iv(v, z) * exp(-abs(z.real)) + + For imaginary numbers without a real part, returns the unscaled + Bessel function of the first kind `iv`. + + Parameters + ---------- + v : array_like of float + Order. + z : array_like of float or complex + Argument. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + Values of the exponentially scaled modified Bessel function. + + See Also + -------- + iv: Modified Bessel function of the first kind + i0e: Faster implementation of this function for order 0 + i1e: Faster implementation of this function for order 1 + + Notes + ----- + For positive `v`, the AMOS [1]_ `zbesi` routine is called. It uses a + power series for small `z`, the asymptotic expansion for large + `abs(z)`, the Miller algorithm normalized by the Wronskian and a + Neumann series for intermediate magnitudes, and the uniform asymptotic + expansions for :math:`I_v(z)` and :math:`J_v(z)` for large orders. + Backward recurrence is used to generate sequences or reduce orders when + necessary. + + The calculations above are done in the right half plane and continued + into the left half plane by the formula, + + .. math:: I_v(z \exp(\pm\imath\pi)) = \exp(\pm\pi v) I_v(z) + + (valid when the real part of `z` is positive). For negative `v`, the + formula + + .. math:: I_{-v}(z) = I_v(z) + \frac{2}{\pi} \sin(\pi v) K_v(z) + + is used, where :math:`K_v(z)` is the modified Bessel function of the + second kind, evaluated using the AMOS routine `zbesk`. + + `ive` is useful for large arguments `z`: for these, `iv` easily overflows, + while `ive` does not due to the exponential scaling. + + References + ---------- + .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions + of a Complex Argument and Nonnegative Order", + http://netlib.org/amos/ + + Examples + -------- + In the following example `iv` returns infinity whereas `ive` still returns + a finite number. + + >>> from scipy.special import iv, ive + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> iv(3, 1000.), ive(3, 1000.) + (inf, 0.01256056218254712) + + Evaluate the function at one point for different orders by + providing a list or NumPy array as argument for the `v` parameter: + + >>> ive([0, 1, 1.5], 1.) + array([0.46575961, 0.20791042, 0.10798193]) + + Evaluate the function at several points for order 0 by providing an + array for `z`. + + >>> points = np.array([-2., 0., 3.]) + >>> ive(0, points) + array([0.30850832, 1. , 0.24300035]) + + Evaluate the function at several points for different orders by + providing arrays for both `v` for `z`. Both arrays have to be + broadcastable to the correct shape. To calculate the orders 0, 1 + and 2 for a 1D array of points: + + >>> ive([[0], [1], [2]], points) + array([[ 0.30850832, 1. , 0.24300035], + [-0.21526929, 0. , 0.19682671], + [ 0.09323903, 0. , 0.11178255]]) + + Plot the functions of order 0 to 3 from -5 to 5. + + >>> fig, ax = plt.subplots() + >>> x = np.linspace(-5., 5., 1000) + >>> for i in range(4): + ... ax.plot(x, ive(i, x), label=fr'$I_{i!r}(z)\cdot e^{{-|z|}}$') + >>> ax.legend() + >>> ax.set_xlabel(r"$z$") + >>> plt.show() + """) + +add_newdoc("j0", + r""" + j0(x, out=None) + + Bessel function of the first kind of order 0. + + Parameters + ---------- + x : array_like + Argument (float). + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + J : scalar or ndarray + Value of the Bessel function of the first kind of order 0 at `x`. + + See Also + -------- + jv : Bessel function of real order and complex argument. + spherical_jn : spherical Bessel functions. + + Notes + ----- + The domain is divided into the intervals [0, 5] and (5, infinity). In the + first interval the following rational approximation is used: + + .. math:: + + J_0(x) \approx (w - r_1^2)(w - r_2^2) \frac{P_3(w)}{Q_8(w)}, + + where :math:`w = x^2` and :math:`r_1`, :math:`r_2` are the zeros of + :math:`J_0`, and :math:`P_3` and :math:`Q_8` are polynomials of degrees 3 + and 8, respectively. + + In the second interval, the Hankel asymptotic expansion is employed with + two rational functions of degree 6/6 and 7/7. + + This function is a wrapper for the Cephes [1]_ routine `j0`. + It should not be confused with the spherical Bessel functions (see + `spherical_jn`). + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + Examples + -------- + Calculate the function at one point: + + >>> from scipy.special import j0 + >>> j0(1.) + 0.7651976865579665 + + Calculate the function at several points: + + >>> import numpy as np + >>> j0(np.array([-2., 0., 4.])) + array([ 0.22389078, 1. , -0.39714981]) + + Plot the function from -20 to 20. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> x = np.linspace(-20., 20., 1000) + >>> y = j0(x) + >>> ax.plot(x, y) + >>> plt.show() + + """) + +add_newdoc("j1", + """ + j1(x, out=None) + + Bessel function of the first kind of order 1. + + Parameters + ---------- + x : array_like + Argument (float). + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + J : scalar or ndarray + Value of the Bessel function of the first kind of order 1 at `x`. + + See Also + -------- + jv: Bessel function of the first kind + spherical_jn: spherical Bessel functions. + + Notes + ----- + The domain is divided into the intervals [0, 8] and (8, infinity). In the + first interval a 24 term Chebyshev expansion is used. In the second, the + asymptotic trigonometric representation is employed using two rational + functions of degree 5/5. + + This function is a wrapper for the Cephes [1]_ routine `j1`. + It should not be confused with the spherical Bessel functions (see + `spherical_jn`). + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + Examples + -------- + Calculate the function at one point: + + >>> from scipy.special import j1 + >>> j1(1.) + 0.44005058574493355 + + Calculate the function at several points: + + >>> import numpy as np + >>> j1(np.array([-2., 0., 4.])) + array([-0.57672481, 0. , -0.06604333]) + + Plot the function from -20 to 20. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> x = np.linspace(-20., 20., 1000) + >>> y = j1(x) + >>> ax.plot(x, y) + >>> plt.show() + + """) + +add_newdoc("jn", + """ + jn(n, x, out=None) + + Bessel function of the first kind of integer order and real argument. + + Parameters + ---------- + n : array_like + order of the Bessel function + x : array_like + argument of the Bessel function + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + The value of the bessel function + + See Also + -------- + jv + spherical_jn : spherical Bessel functions. + + Notes + ----- + `jn` is an alias of `jv`. + Not to be confused with the spherical Bessel functions (see + `spherical_jn`). + + """) + +add_newdoc("jv", + r""" + jv(v, z, out=None) + + Bessel function of the first kind of real order and complex argument. + + Parameters + ---------- + v : array_like + Order (float). + z : array_like + Argument (float or complex). + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + J : scalar or ndarray + Value of the Bessel function, :math:`J_v(z)`. + + See Also + -------- + jve : :math:`J_v` with leading exponential behavior stripped off. + spherical_jn : spherical Bessel functions. + j0 : faster version of this function for order 0. + j1 : faster version of this function for order 1. + + Notes + ----- + For positive `v` values, the computation is carried out using the AMOS + [1]_ `zbesj` routine, which exploits the connection to the modified + Bessel function :math:`I_v`, + + .. math:: + J_v(z) = \exp(v\pi\imath/2) I_v(-\imath z)\qquad (\Im z > 0) + + J_v(z) = \exp(-v\pi\imath/2) I_v(\imath z)\qquad (\Im z < 0) + + For negative `v` values the formula, + + .. math:: J_{-v}(z) = J_v(z) \cos(\pi v) - Y_v(z) \sin(\pi v) + + is used, where :math:`Y_v(z)` is the Bessel function of the second + kind, computed using the AMOS routine `zbesy`. Note that the second + term is exactly zero for integer `v`; to improve accuracy the second + term is explicitly omitted for `v` values such that `v = floor(v)`. + + Not to be confused with the spherical Bessel functions (see `spherical_jn`). + + References + ---------- + .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions + of a Complex Argument and Nonnegative Order", + http://netlib.org/amos/ + + Examples + -------- + Evaluate the function of order 0 at one point. + + >>> from scipy.special import jv + >>> jv(0, 1.) + 0.7651976865579666 + + Evaluate the function at one point for different orders. + + >>> jv(0, 1.), jv(1, 1.), jv(1.5, 1.) + (0.7651976865579666, 0.44005058574493355, 0.24029783912342725) + + The evaluation for different orders can be carried out in one call by + providing a list or NumPy array as argument for the `v` parameter: + + >>> jv([0, 1, 1.5], 1.) + array([0.76519769, 0.44005059, 0.24029784]) + + Evaluate the function at several points for order 0 by providing an + array for `z`. + + >>> import numpy as np + >>> points = np.array([-2., 0., 3.]) + >>> jv(0, points) + array([ 0.22389078, 1. , -0.26005195]) + + If `z` is an array, the order parameter `v` must be broadcastable to + the correct shape if different orders shall be computed in one call. + To calculate the orders 0 and 1 for an 1D array: + + >>> orders = np.array([[0], [1]]) + >>> orders.shape + (2, 1) + + >>> jv(orders, points) + array([[ 0.22389078, 1. , -0.26005195], + [-0.57672481, 0. , 0.33905896]]) + + Plot the functions of order 0 to 3 from -10 to 10. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> x = np.linspace(-10., 10., 1000) + >>> for i in range(4): + ... ax.plot(x, jv(i, x), label=f'$J_{i!r}$') + >>> ax.legend() + >>> plt.show() + + """) + +add_newdoc("jve", + r""" + jve(v, z, out=None) + + Exponentially scaled Bessel function of the first kind of order `v`. + + Defined as:: + + jve(v, z) = jv(v, z) * exp(-abs(z.imag)) + + Parameters + ---------- + v : array_like + Order (float). + z : array_like + Argument (float or complex). + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + J : scalar or ndarray + Value of the exponentially scaled Bessel function. + + See Also + -------- + jv: Unscaled Bessel function of the first kind + + Notes + ----- + For positive `v` values, the computation is carried out using the AMOS + [1]_ `zbesj` routine, which exploits the connection to the modified + Bessel function :math:`I_v`, + + .. math:: + J_v(z) = \exp(v\pi\imath/2) I_v(-\imath z)\qquad (\Im z > 0) + + J_v(z) = \exp(-v\pi\imath/2) I_v(\imath z)\qquad (\Im z < 0) + + For negative `v` values the formula, + + .. math:: J_{-v}(z) = J_v(z) \cos(\pi v) - Y_v(z) \sin(\pi v) + + is used, where :math:`Y_v(z)` is the Bessel function of the second + kind, computed using the AMOS routine `zbesy`. Note that the second + term is exactly zero for integer `v`; to improve accuracy the second + term is explicitly omitted for `v` values such that `v = floor(v)`. + + Exponentially scaled Bessel functions are useful for large arguments `z`: + for these, the unscaled Bessel functions can easily under-or overflow. + + References + ---------- + .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions + of a Complex Argument and Nonnegative Order", + http://netlib.org/amos/ + + Examples + -------- + Compare the output of `jv` and `jve` for large complex arguments for `z` + by computing their values for order ``v=1`` at ``z=1000j``. We see that + `jv` overflows but `jve` returns a finite number: + + >>> import numpy as np + >>> from scipy.special import jv, jve + >>> v = 1 + >>> z = 1000j + >>> jv(v, z), jve(v, z) + ((inf+infj), (7.721967686709077e-19+0.012610930256928629j)) + + For real arguments for `z`, `jve` returns the same as `jv`. + + >>> v, z = 1, 1000 + >>> jv(v, z), jve(v, z) + (0.004728311907089523, 0.004728311907089523) + + The function can be evaluated for several orders at the same time by + providing a list or NumPy array for `v`: + + >>> jve([1, 3, 5], 1j) + array([1.27304208e-17+2.07910415e-01j, -4.99352086e-19-8.15530777e-03j, + 6.11480940e-21+9.98657141e-05j]) + + In the same way, the function can be evaluated at several points in one + call by providing a list or NumPy array for `z`: + + >>> jve(1, np.array([1j, 2j, 3j])) + array([1.27308412e-17+0.20791042j, 1.31814423e-17+0.21526929j, + 1.20521602e-17+0.19682671j]) + + It is also possible to evaluate several orders at several points + at the same time by providing arrays for `v` and `z` with + compatible shapes for broadcasting. Compute `jve` for two different orders + `v` and three points `z` resulting in a 2x3 array. + + >>> v = np.array([[1], [3]]) + >>> z = np.array([1j, 2j, 3j]) + >>> v.shape, z.shape + ((2, 1), (3,)) + + >>> jve(v, z) + array([[1.27304208e-17+0.20791042j, 1.31810070e-17+0.21526929j, + 1.20517622e-17+0.19682671j], + [-4.99352086e-19-0.00815531j, -1.76289571e-18-0.02879122j, + -2.92578784e-18-0.04778332j]]) + """) + +add_newdoc("k0", + r""" + k0(x, out=None) + + Modified Bessel function of the second kind of order 0, :math:`K_0`. + + This function is also sometimes referred to as the modified Bessel + function of the third kind of order 0. + + Parameters + ---------- + x : array_like + Argument (float). + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + K : scalar or ndarray + Value of the modified Bessel function :math:`K_0` at `x`. + + See Also + -------- + kv: Modified Bessel function of the second kind of any order + k0e: Exponentially scaled modified Bessel function of the second kind + + Notes + ----- + The range is partitioned into the two intervals [0, 2] and (2, infinity). + Chebyshev polynomial expansions are employed in each interval. + + This function is a wrapper for the Cephes [1]_ routine `k0`. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + Examples + -------- + Calculate the function at one point: + + >>> from scipy.special import k0 + >>> k0(1.) + 0.42102443824070823 + + Calculate the function at several points: + + >>> import numpy as np + >>> k0(np.array([0.5, 2., 3.])) + array([0.92441907, 0.11389387, 0.0347395 ]) + + Plot the function from 0 to 10. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> x = np.linspace(0., 10., 1000) + >>> y = k0(x) + >>> ax.plot(x, y) + >>> plt.show() + + """) + +add_newdoc("k0e", + """ + k0e(x, out=None) + + Exponentially scaled modified Bessel function K of order 0 + + Defined as:: + + k0e(x) = exp(x) * k0(x). + + Parameters + ---------- + x : array_like + Argument (float) + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + K : scalar or ndarray + Value of the exponentially scaled modified Bessel function K of order + 0 at `x`. + + See Also + -------- + kv: Modified Bessel function of the second kind of any order + k0: Modified Bessel function of the second kind + + Notes + ----- + The range is partitioned into the two intervals [0, 2] and (2, infinity). + Chebyshev polynomial expansions are employed in each interval. + + This function is a wrapper for the Cephes [1]_ routine `k0e`. `k0e` is + useful for large arguments: for these, `k0` easily underflows. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + Examples + -------- + In the following example `k0` returns 0 whereas `k0e` still returns a + useful finite number: + + >>> from scipy.special import k0, k0e + >>> k0(1000.), k0e(1000) + (0., 0.03962832160075422) + + Calculate the function at several points by providing a NumPy array or + list for `x`: + + >>> import numpy as np + >>> k0e(np.array([0.5, 2., 3.])) + array([1.52410939, 0.84156822, 0.6977616 ]) + + Plot the function from 0 to 10. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> x = np.linspace(0., 10., 1000) + >>> y = k0e(x) + >>> ax.plot(x, y) + >>> plt.show() + """) + +add_newdoc("k1", + """ + k1(x, out=None) + + Modified Bessel function of the second kind of order 1, :math:`K_1(x)`. + + Parameters + ---------- + x : array_like + Argument (float) + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + K : scalar or ndarray + Value of the modified Bessel function K of order 1 at `x`. + + See Also + -------- + kv: Modified Bessel function of the second kind of any order + k1e: Exponentially scaled modified Bessel function K of order 1 + + Notes + ----- + The range is partitioned into the two intervals [0, 2] and (2, infinity). + Chebyshev polynomial expansions are employed in each interval. + + This function is a wrapper for the Cephes [1]_ routine `k1`. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + Examples + -------- + Calculate the function at one point: + + >>> from scipy.special import k1 + >>> k1(1.) + 0.6019072301972346 + + Calculate the function at several points: + + >>> import numpy as np + >>> k1(np.array([0.5, 2., 3.])) + array([1.65644112, 0.13986588, 0.04015643]) + + Plot the function from 0 to 10. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> x = np.linspace(0., 10., 1000) + >>> y = k1(x) + >>> ax.plot(x, y) + >>> plt.show() + + """) + +add_newdoc("k1e", + """ + k1e(x, out=None) + + Exponentially scaled modified Bessel function K of order 1 + + Defined as:: + + k1e(x) = exp(x) * k1(x) + + Parameters + ---------- + x : array_like + Argument (float) + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + K : scalar or ndarray + Value of the exponentially scaled modified Bessel function K of order + 1 at `x`. + + See Also + -------- + kv: Modified Bessel function of the second kind of any order + k1: Modified Bessel function of the second kind of order 1 + + Notes + ----- + The range is partitioned into the two intervals [0, 2] and (2, infinity). + Chebyshev polynomial expansions are employed in each interval. + + This function is a wrapper for the Cephes [1]_ routine `k1e`. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + Examples + -------- + In the following example `k1` returns 0 whereas `k1e` still returns a + useful floating point number. + + >>> from scipy.special import k1, k1e + >>> k1(1000.), k1e(1000.) + (0., 0.03964813081296021) + + Calculate the function at several points by providing a NumPy array or + list for `x`: + + >>> import numpy as np + >>> k1e(np.array([0.5, 2., 3.])) + array([2.73100971, 1.03347685, 0.80656348]) + + Plot the function from 0 to 10. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> x = np.linspace(0., 10., 1000) + >>> y = k1e(x) + >>> ax.plot(x, y) + >>> plt.show() + """) + +add_newdoc("kelvin", + """ + kelvin(x, out=None) + + Kelvin functions as complex numbers + + Parameters + ---------- + x : array_like + Argument + out : tuple of ndarray, optional + Optional output arrays for the function values + + Returns + ------- + Be, Ke, Bep, Kep : 4-tuple of scalar or ndarray + The tuple (Be, Ke, Bep, Kep) contains complex numbers + representing the real and imaginary Kelvin functions and their + derivatives evaluated at `x`. For example, kelvin(x)[0].real = + ber x and kelvin(x)[0].imag = bei x with similar relationships + for ker and kei. + """) + +add_newdoc("ker", + r""" + ker(x, out=None) + + Kelvin function ker. + + Defined as + + .. math:: + + \mathrm{ker}(x) = \Re[K_0(x e^{\pi i / 4})] + + Where :math:`K_0` is the modified Bessel function of the second + kind (see `kv`). See [dlmf]_ for more details. + + Parameters + ---------- + x : array_like + Real argument. + out : ndarray, optional + Optional output array for the function results. + + Returns + ------- + scalar or ndarray + Values of the Kelvin function. + + See Also + -------- + kei : the corresponding imaginary part + kerp : the derivative of ker + kv : modified Bessel function of the second kind + + References + ---------- + .. [dlmf] NIST, Digital Library of Mathematical Functions, + https://dlmf.nist.gov/10.61 + + Examples + -------- + It can be expressed using the modified Bessel function of the + second kind. + + >>> import numpy as np + >>> import scipy.special as sc + >>> x = np.array([1.0, 2.0, 3.0, 4.0]) + >>> sc.kv(0, x * np.exp(np.pi * 1j / 4)).real + array([ 0.28670621, -0.04166451, -0.06702923, -0.03617885]) + >>> sc.ker(x) + array([ 0.28670621, -0.04166451, -0.06702923, -0.03617885]) + + """) + +add_newdoc("kerp", + r""" + kerp(x, out=None) + + Derivative of the Kelvin function ker. + + Parameters + ---------- + x : array_like + Real argument. + out : ndarray, optional + Optional output array for the function results. + + Returns + ------- + scalar or ndarray + Values of the derivative of ker. + + See Also + -------- + ker + + References + ---------- + .. [dlmf] NIST, Digital Library of Mathematical Functions, + https://dlmf.nist.gov/10#PT5 + + """) + +add_newdoc("kl_div", + r""" + kl_div(x, y, out=None) + + Elementwise function for computing Kullback-Leibler divergence. + + .. math:: + + \mathrm{kl\_div}(x, y) = + \begin{cases} + x \log(x / y) - x + y & x > 0, y > 0 \\ + y & x = 0, y \ge 0 \\ + \infty & \text{otherwise} + \end{cases} + + Parameters + ---------- + x, y : array_like + Real arguments + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + Values of the Kullback-Liebler divergence. + + See Also + -------- + entr, rel_entr, scipy.stats.entropy + + Notes + ----- + .. versionadded:: 0.15.0 + + This function is non-negative and is jointly convex in `x` and `y`. + + The origin of this function is in convex programming; see [1]_ for + details. This is why the function contains the extra :math:`-x + + y` terms over what might be expected from the Kullback-Leibler + divergence. For a version of the function without the extra terms, + see `rel_entr`. + + References + ---------- + .. [1] Boyd, Stephen and Lieven Vandenberghe. *Convex optimization*. + Cambridge University Press, 2004. + :doi:`https://doi.org/10.1017/CBO9780511804441` + + """) + +add_newdoc("kn", + r""" + kn(n, x, out=None) + + Modified Bessel function of the second kind of integer order `n` + + Returns the modified Bessel function of the second kind for integer order + `n` at real `z`. + + These are also sometimes called functions of the third kind, Basset + functions, or Macdonald functions. + + Parameters + ---------- + n : array_like of int + Order of Bessel functions (floats will truncate with a warning) + x : array_like of float + Argument at which to evaluate the Bessel functions + out : ndarray, optional + Optional output array for the function results. + + Returns + ------- + scalar or ndarray + Value of the Modified Bessel function of the second kind, + :math:`K_n(x)`. + + See Also + -------- + kv : Same function, but accepts real order and complex argument + kvp : Derivative of this function + + Notes + ----- + Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the + algorithm used, see [2]_ and the references therein. + + References + ---------- + .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions + of a Complex Argument and Nonnegative Order", + http://netlib.org/amos/ + .. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel + functions of a complex argument and nonnegative order", ACM + TOMS Vol. 12 Issue 3, Sept. 1986, p. 265 + + Examples + -------- + Plot the function of several orders for real input: + + >>> import numpy as np + >>> from scipy.special import kn + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(0, 5, 1000) + >>> for N in range(6): + ... plt.plot(x, kn(N, x), label='$K_{}(x)$'.format(N)) + >>> plt.ylim(0, 10) + >>> plt.legend() + >>> plt.title(r'Modified Bessel function of the second kind $K_n(x)$') + >>> plt.show() + + Calculate for a single value at multiple orders: + + >>> kn([4, 5, 6], 1) + array([ 44.23241585, 360.9605896 , 3653.83831186]) + """) + +add_newdoc("kolmogi", + """ + kolmogi(p, out=None) + + Inverse Survival Function of Kolmogorov distribution + + It is the inverse function to `kolmogorov`. + Returns y such that ``kolmogorov(y) == p``. + + Parameters + ---------- + p : float array_like + Probability + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + The value(s) of kolmogi(p) + + See Also + -------- + kolmogorov : The Survival Function for the distribution + scipy.stats.kstwobign : Provides the functionality as a continuous distribution + smirnov, smirnovi : Functions for the one-sided distribution + + Notes + ----- + `kolmogorov` is used by `stats.kstest` in the application of the + Kolmogorov-Smirnov Goodness of Fit test. For historical reasons this + function is exposed in `scpy.special`, but the recommended way to achieve + the most accurate CDF/SF/PDF/PPF/ISF computations is to use the + `stats.kstwobign` distribution. + + Examples + -------- + >>> from scipy.special import kolmogi + >>> kolmogi([0, 0.1, 0.25, 0.5, 0.75, 0.9, 1.0]) + array([ inf, 1.22384787, 1.01918472, 0.82757356, 0.67644769, + 0.57117327, 0. ]) + + """) + +add_newdoc("kolmogorov", + r""" + kolmogorov(y, out=None) + + Complementary cumulative distribution (Survival Function) function of + Kolmogorov distribution. + + Returns the complementary cumulative distribution function of + Kolmogorov's limiting distribution (``D_n*\sqrt(n)`` as n goes to infinity) + of a two-sided test for equality between an empirical and a theoretical + distribution. It is equal to the (limit as n->infinity of the) + probability that ``sqrt(n) * max absolute deviation > y``. + + Parameters + ---------- + y : float array_like + Absolute deviation between the Empirical CDF (ECDF) and the target CDF, + multiplied by sqrt(n). + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + The value(s) of kolmogorov(y) + + See Also + -------- + kolmogi : The Inverse Survival Function for the distribution + scipy.stats.kstwobign : Provides the functionality as a continuous distribution + smirnov, smirnovi : Functions for the one-sided distribution + + Notes + ----- + `kolmogorov` is used by `stats.kstest` in the application of the + Kolmogorov-Smirnov Goodness of Fit test. For historical reasons this + function is exposed in `scpy.special`, but the recommended way to achieve + the most accurate CDF/SF/PDF/PPF/ISF computations is to use the + `stats.kstwobign` distribution. + + Examples + -------- + Show the probability of a gap at least as big as 0, 0.5 and 1.0. + + >>> import numpy as np + >>> from scipy.special import kolmogorov + >>> from scipy.stats import kstwobign + >>> kolmogorov([0, 0.5, 1.0]) + array([ 1. , 0.96394524, 0.26999967]) + + Compare a sample of size 1000 drawn from a Laplace(0, 1) distribution against + the target distribution, a Normal(0, 1) distribution. + + >>> from scipy.stats import norm, laplace + >>> rng = np.random.default_rng() + >>> n = 1000 + >>> lap01 = laplace(0, 1) + >>> x = np.sort(lap01.rvs(n, random_state=rng)) + >>> np.mean(x), np.std(x) + (-0.05841730131499543, 1.3968109101997568) + + Construct the Empirical CDF and the K-S statistic Dn. + + >>> target = norm(0,1) # Normal mean 0, stddev 1 + >>> cdfs = target.cdf(x) + >>> ecdfs = np.arange(n+1, dtype=float)/n + >>> gaps = np.column_stack([cdfs - ecdfs[:n], ecdfs[1:] - cdfs]) + >>> Dn = np.max(gaps) + >>> Kn = np.sqrt(n) * Dn + >>> print('Dn=%f, sqrt(n)*Dn=%f' % (Dn, Kn)) + Dn=0.043363, sqrt(n)*Dn=1.371265 + >>> print(chr(10).join(['For a sample of size n drawn from a N(0, 1) distribution:', + ... ' the approximate Kolmogorov probability that sqrt(n)*Dn>=%f is %f' % + ... (Kn, kolmogorov(Kn)), + ... ' the approximate Kolmogorov probability that sqrt(n)*Dn<=%f is %f' % + ... (Kn, kstwobign.cdf(Kn))])) + For a sample of size n drawn from a N(0, 1) distribution: + the approximate Kolmogorov probability that sqrt(n)*Dn>=1.371265 is 0.046533 + the approximate Kolmogorov probability that sqrt(n)*Dn<=1.371265 is 0.953467 + + Plot the Empirical CDF against the target N(0, 1) CDF. + + >>> import matplotlib.pyplot as plt + >>> plt.step(np.concatenate([[-3], x]), ecdfs, where='post', label='Empirical CDF') + >>> x3 = np.linspace(-3, 3, 100) + >>> plt.plot(x3, target.cdf(x3), label='CDF for N(0, 1)') + >>> plt.ylim([0, 1]); plt.grid(True); plt.legend(); + >>> # Add vertical lines marking Dn+ and Dn- + >>> iminus, iplus = np.argmax(gaps, axis=0) + >>> plt.vlines([x[iminus]], ecdfs[iminus], cdfs[iminus], + ... color='r', linestyle='dashed', lw=4) + >>> plt.vlines([x[iplus]], cdfs[iplus], ecdfs[iplus+1], + ... color='r', linestyle='dashed', lw=4) + >>> plt.show() + """) + +add_newdoc("_kolmogc", + r""" + Internal function, do not use. + """) + +add_newdoc("_kolmogci", + r""" + Internal function, do not use. + """) + +add_newdoc("_kolmogp", + r""" + Internal function, do not use. + """) + +add_newdoc("kv", + r""" + kv(v, z, out=None) + + Modified Bessel function of the second kind of real order `v` + + Returns the modified Bessel function of the second kind for real order + `v` at complex `z`. + + These are also sometimes called functions of the third kind, Basset + functions, or Macdonald functions. They are defined as those solutions + of the modified Bessel equation for which, + + .. math:: + K_v(x) \sim \sqrt{\pi/(2x)} \exp(-x) + + as :math:`x \to \infty` [3]_. + + Parameters + ---------- + v : array_like of float + Order of Bessel functions + z : array_like of complex + Argument at which to evaluate the Bessel functions + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + The results. Note that input must be of complex type to get complex + output, e.g. ``kv(3, -2+0j)`` instead of ``kv(3, -2)``. + + See Also + -------- + kve : This function with leading exponential behavior stripped off. + kvp : Derivative of this function + + Notes + ----- + Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the + algorithm used, see [2]_ and the references therein. + + References + ---------- + .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions + of a Complex Argument and Nonnegative Order", + http://netlib.org/amos/ + .. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel + functions of a complex argument and nonnegative order", ACM + TOMS Vol. 12 Issue 3, Sept. 1986, p. 265 + .. [3] NIST Digital Library of Mathematical Functions, + Eq. 10.25.E3. https://dlmf.nist.gov/10.25.E3 + + Examples + -------- + Plot the function of several orders for real input: + + >>> import numpy as np + >>> from scipy.special import kv + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(0, 5, 1000) + >>> for N in np.linspace(0, 6, 5): + ... plt.plot(x, kv(N, x), label='$K_{{{}}}(x)$'.format(N)) + >>> plt.ylim(0, 10) + >>> plt.legend() + >>> plt.title(r'Modified Bessel function of the second kind $K_\nu(x)$') + >>> plt.show() + + Calculate for a single value at multiple orders: + + >>> kv([4, 4.5, 5], 1+2j) + array([ 0.1992+2.3892j, 2.3493+3.6j , 7.2827+3.8104j]) + + """) + +add_newdoc("kve", + r""" + kve(v, z, out=None) + + Exponentially scaled modified Bessel function of the second kind. + + Returns the exponentially scaled, modified Bessel function of the + second kind (sometimes called the third kind) for real order `v` at + complex `z`:: + + kve(v, z) = kv(v, z) * exp(z) + + Parameters + ---------- + v : array_like of float + Order of Bessel functions + z : array_like of complex + Argument at which to evaluate the Bessel functions + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + The exponentially scaled modified Bessel function of the second kind. + + See Also + -------- + kv : This function without exponential scaling. + k0e : Faster version of this function for order 0. + k1e : Faster version of this function for order 1. + + Notes + ----- + Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the + algorithm used, see [2]_ and the references therein. + + References + ---------- + .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions + of a Complex Argument and Nonnegative Order", + http://netlib.org/amos/ + .. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel + functions of a complex argument and nonnegative order", ACM + TOMS Vol. 12 Issue 3, Sept. 1986, p. 265 + + Examples + -------- + In the following example `kv` returns 0 whereas `kve` still returns + a useful finite number. + + >>> import numpy as np + >>> from scipy.special import kv, kve + >>> import matplotlib.pyplot as plt + >>> kv(3, 1000.), kve(3, 1000.) + (0.0, 0.03980696128440973) + + Evaluate the function at one point for different orders by + providing a list or NumPy array as argument for the `v` parameter: + + >>> kve([0, 1, 1.5], 1.) + array([1.14446308, 1.63615349, 2.50662827]) + + Evaluate the function at several points for order 0 by providing an + array for `z`. + + >>> points = np.array([1., 3., 10.]) + >>> kve(0, points) + array([1.14446308, 0.6977616 , 0.39163193]) + + Evaluate the function at several points for different orders by + providing arrays for both `v` for `z`. Both arrays have to be + broadcastable to the correct shape. To calculate the orders 0, 1 + and 2 for a 1D array of points: + + >>> kve([[0], [1], [2]], points) + array([[1.14446308, 0.6977616 , 0.39163193], + [1.63615349, 0.80656348, 0.41076657], + [4.41677005, 1.23547058, 0.47378525]]) + + Plot the functions of order 0 to 3 from 0 to 5. + + >>> fig, ax = plt.subplots() + >>> x = np.linspace(0., 5., 1000) + >>> for i in range(4): + ... ax.plot(x, kve(i, x), label=fr'$K_{i!r}(z)\cdot e^z$') + >>> ax.legend() + >>> ax.set_xlabel(r"$z$") + >>> ax.set_ylim(0, 4) + >>> ax.set_xlim(0, 5) + >>> plt.show() + """) + +add_newdoc("_lanczos_sum_expg_scaled", + """ + Internal function, do not use. + """) + +add_newdoc("_lgam1p", + """ + Internal function, do not use. + """) + +add_newdoc("log1p", + """ + log1p(x, out=None) + + Calculates log(1 + x) for use when `x` is near zero. + + Parameters + ---------- + x : array_like + Real or complex valued input. + out : ndarray, optional + Optional output array for the function results. + + Returns + ------- + scalar or ndarray + Values of ``log(1 + x)``. + + See Also + -------- + expm1, cosm1 + + Examples + -------- + >>> import numpy as np + >>> import scipy.special as sc + + It is more accurate than using ``log(1 + x)`` directly for ``x`` + near 0. Note that in the below example ``1 + 1e-17 == 1`` to + double precision. + + >>> sc.log1p(1e-17) + 1e-17 + >>> np.log(1 + 1e-17) + 0.0 + + """) + +add_newdoc("_log1pmx", + """ + Internal function, do not use. + """) + +add_newdoc("lpmv", + r""" + lpmv(m, v, x, out=None) + + Associated Legendre function of integer order and real degree. + + Defined as + + .. math:: + + P_v^m = (-1)^m (1 - x^2)^{m/2} \frac{d^m}{dx^m} P_v(x) + + where + + .. math:: + + P_v = \sum_{k = 0}^\infty \frac{(-v)_k (v + 1)_k}{(k!)^2} + \left(\frac{1 - x}{2}\right)^k + + is the Legendre function of the first kind. Here :math:`(\cdot)_k` + is the Pochhammer symbol; see `poch`. + + Parameters + ---------- + m : array_like + Order (int or float). If passed a float not equal to an + integer the function returns NaN. + v : array_like + Degree (float). + x : array_like + Argument (float). Must have ``|x| <= 1``. + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + pmv : scalar or ndarray + Value of the associated Legendre function. + + See Also + -------- + lpmn : Compute the associated Legendre function for all orders + ``0, ..., m`` and degrees ``0, ..., n``. + clpmn : Compute the associated Legendre function at complex + arguments. + + Notes + ----- + Note that this implementation includes the Condon-Shortley phase. + + References + ---------- + .. [1] Zhang, Jin, "Computation of Special Functions", John Wiley + and Sons, Inc, 1996. + + """) + +add_newdoc("modstruve", + r""" + modstruve(v, x, out=None) + + Modified Struve function. + + Return the value of the modified Struve function of order `v` at `x`. The + modified Struve function is defined as, + + .. math:: + L_v(x) = -\imath \exp(-\pi\imath v/2) H_v(\imath x), + + where :math:`H_v` is the Struve function. + + Parameters + ---------- + v : array_like + Order of the modified Struve function (float). + x : array_like + Argument of the Struve function (float; must be positive unless `v` is + an integer). + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + L : scalar or ndarray + Value of the modified Struve function of order `v` at `x`. + + See Also + -------- + struve + + Notes + ----- + Three methods discussed in [1]_ are used to evaluate the function: + + - power series + - expansion in Bessel functions (if :math:`|x| < |v| + 20`) + - asymptotic large-x expansion (if :math:`x \geq 0.7v + 12`) + + Rounding errors are estimated based on the largest terms in the sums, and + the result associated with the smallest error is returned. + + References + ---------- + .. [1] NIST Digital Library of Mathematical Functions + https://dlmf.nist.gov/11 + + Examples + -------- + Calculate the modified Struve function of order 1 at 2. + + >>> import numpy as np + >>> from scipy.special import modstruve + >>> import matplotlib.pyplot as plt + >>> modstruve(1, 2.) + 1.102759787367716 + + Calculate the modified Struve function at 2 for orders 1, 2 and 3 by + providing a list for the order parameter `v`. + + >>> modstruve([1, 2, 3], 2.) + array([1.10275979, 0.41026079, 0.11247294]) + + Calculate the modified Struve function of order 1 for several points + by providing an array for `x`. + + >>> points = np.array([2., 5., 8.]) + >>> modstruve(1, points) + array([ 1.10275979, 23.72821578, 399.24709139]) + + Compute the modified Struve function for several orders at several + points by providing arrays for `v` and `z`. The arrays have to be + broadcastable to the correct shapes. + + >>> orders = np.array([[1], [2], [3]]) + >>> points.shape, orders.shape + ((3,), (3, 1)) + + >>> modstruve(orders, points) + array([[1.10275979e+00, 2.37282158e+01, 3.99247091e+02], + [4.10260789e-01, 1.65535979e+01, 3.25973609e+02], + [1.12472937e-01, 9.42430454e+00, 2.33544042e+02]]) + + Plot the modified Struve functions of order 0 to 3 from -5 to 5. + + >>> fig, ax = plt.subplots() + >>> x = np.linspace(-5., 5., 1000) + >>> for i in range(4): + ... ax.plot(x, modstruve(i, x), label=f'$L_{i!r}$') + >>> ax.legend(ncol=2) + >>> ax.set_xlim(-5, 5) + >>> ax.set_title(r"Modified Struve functions $L_{\nu}$") + >>> plt.show() + """) + +add_newdoc("nbdtr", + r""" + nbdtr(k, n, p, out=None) + + Negative binomial cumulative distribution function. + + Returns the sum of the terms 0 through `k` of the negative binomial + distribution probability mass function, + + .. math:: + + F = \sum_{j=0}^k {{n + j - 1}\choose{j}} p^n (1 - p)^j. + + In a sequence of Bernoulli trials with individual success probabilities + `p`, this is the probability that `k` or fewer failures precede the nth + success. + + Parameters + ---------- + k : array_like + The maximum number of allowed failures (nonnegative int). + n : array_like + The target number of successes (positive int). + p : array_like + Probability of success in a single event (float). + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + F : scalar or ndarray + The probability of `k` or fewer failures before `n` successes in a + sequence of events with individual success probability `p`. + + See Also + -------- + nbdtrc : Negative binomial survival function + nbdtrik : Negative binomial quantile function + scipy.stats.nbinom : Negative binomial distribution + + Notes + ----- + If floating point values are passed for `k` or `n`, they will be truncated + to integers. + + The terms are not summed directly; instead the regularized incomplete beta + function is employed, according to the formula, + + .. math:: + \mathrm{nbdtr}(k, n, p) = I_{p}(n, k + 1). + + Wrapper for the Cephes [1]_ routine `nbdtr`. + + The negative binomial distribution is also available as + `scipy.stats.nbinom`. Using `nbdtr` directly can improve performance + compared to the ``cdf`` method of `scipy.stats.nbinom` (see last example). + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + Examples + -------- + Compute the function for ``k=10`` and ``n=5`` at ``p=0.5``. + + >>> import numpy as np + >>> from scipy.special import nbdtr + >>> nbdtr(10, 5, 0.5) + 0.940765380859375 + + Compute the function for ``n=10`` and ``p=0.5`` at several points by + providing a NumPy array or list for `k`. + + >>> nbdtr([5, 10, 15], 10, 0.5) + array([0.15087891, 0.58809853, 0.88523853]) + + Plot the function for four different parameter sets. + + >>> import matplotlib.pyplot as plt + >>> k = np.arange(130) + >>> n_parameters = [20, 20, 20, 80] + >>> p_parameters = [0.2, 0.5, 0.8, 0.5] + >>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot'] + >>> parameters_list = list(zip(p_parameters, n_parameters, + ... linestyles)) + >>> fig, ax = plt.subplots(figsize=(8, 8)) + >>> for parameter_set in parameters_list: + ... p, n, style = parameter_set + ... nbdtr_vals = nbdtr(k, n, p) + ... ax.plot(k, nbdtr_vals, label=rf"$n={n},\, p={p}$", + ... ls=style) + >>> ax.legend() + >>> ax.set_xlabel("$k$") + >>> ax.set_title("Negative binomial cumulative distribution function") + >>> plt.show() + + The negative binomial distribution is also available as + `scipy.stats.nbinom`. Using `nbdtr` directly can be much faster than + calling the ``cdf`` method of `scipy.stats.nbinom`, especially for small + arrays or individual values. To get the same results one must use the + following parametrization: ``nbinom(n, p).cdf(k)=nbdtr(k, n, p)``. + + >>> from scipy.stats import nbinom + >>> k, n, p = 5, 3, 0.5 + >>> nbdtr_res = nbdtr(k, n, p) # this will often be faster than below + >>> stats_res = nbinom(n, p).cdf(k) + >>> stats_res, nbdtr_res # test that results are equal + (0.85546875, 0.85546875) + + `nbdtr` can evaluate different parameter sets by providing arrays with + shapes compatible for broadcasting for `k`, `n` and `p`. Here we compute + the function for three different `k` at four locations `p`, resulting in + a 3x4 array. + + >>> k = np.array([[5], [10], [15]]) + >>> p = np.array([0.3, 0.5, 0.7, 0.9]) + >>> k.shape, p.shape + ((3, 1), (4,)) + + >>> nbdtr(k, 5, p) + array([[0.15026833, 0.62304687, 0.95265101, 0.9998531 ], + [0.48450894, 0.94076538, 0.99932777, 0.99999999], + [0.76249222, 0.99409103, 0.99999445, 1. ]]) + """) + +add_newdoc("nbdtrc", + r""" + nbdtrc(k, n, p, out=None) + + Negative binomial survival function. + + Returns the sum of the terms `k + 1` to infinity of the negative binomial + distribution probability mass function, + + .. math:: + + F = \sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j. + + In a sequence of Bernoulli trials with individual success probabilities + `p`, this is the probability that more than `k` failures precede the nth + success. + + Parameters + ---------- + k : array_like + The maximum number of allowed failures (nonnegative int). + n : array_like + The target number of successes (positive int). + p : array_like + Probability of success in a single event (float). + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + F : scalar or ndarray + The probability of `k + 1` or more failures before `n` successes in a + sequence of events with individual success probability `p`. + + See Also + -------- + nbdtr : Negative binomial cumulative distribution function + nbdtrik : Negative binomial percentile function + scipy.stats.nbinom : Negative binomial distribution + + Notes + ----- + If floating point values are passed for `k` or `n`, they will be truncated + to integers. + + The terms are not summed directly; instead the regularized incomplete beta + function is employed, according to the formula, + + .. math:: + \mathrm{nbdtrc}(k, n, p) = I_{1 - p}(k + 1, n). + + Wrapper for the Cephes [1]_ routine `nbdtrc`. + + The negative binomial distribution is also available as + `scipy.stats.nbinom`. Using `nbdtrc` directly can improve performance + compared to the ``sf`` method of `scipy.stats.nbinom` (see last example). + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + Examples + -------- + Compute the function for ``k=10`` and ``n=5`` at ``p=0.5``. + + >>> import numpy as np + >>> from scipy.special import nbdtrc + >>> nbdtrc(10, 5, 0.5) + 0.059234619140624986 + + Compute the function for ``n=10`` and ``p=0.5`` at several points by + providing a NumPy array or list for `k`. + + >>> nbdtrc([5, 10, 15], 10, 0.5) + array([0.84912109, 0.41190147, 0.11476147]) + + Plot the function for four different parameter sets. + + >>> import matplotlib.pyplot as plt + >>> k = np.arange(130) + >>> n_parameters = [20, 20, 20, 80] + >>> p_parameters = [0.2, 0.5, 0.8, 0.5] + >>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot'] + >>> parameters_list = list(zip(p_parameters, n_parameters, + ... linestyles)) + >>> fig, ax = plt.subplots(figsize=(8, 8)) + >>> for parameter_set in parameters_list: + ... p, n, style = parameter_set + ... nbdtrc_vals = nbdtrc(k, n, p) + ... ax.plot(k, nbdtrc_vals, label=rf"$n={n},\, p={p}$", + ... ls=style) + >>> ax.legend() + >>> ax.set_xlabel("$k$") + >>> ax.set_title("Negative binomial distribution survival function") + >>> plt.show() + + The negative binomial distribution is also available as + `scipy.stats.nbinom`. Using `nbdtrc` directly can be much faster than + calling the ``sf`` method of `scipy.stats.nbinom`, especially for small + arrays or individual values. To get the same results one must use the + following parametrization: ``nbinom(n, p).sf(k)=nbdtrc(k, n, p)``. + + >>> from scipy.stats import nbinom + >>> k, n, p = 3, 5, 0.5 + >>> nbdtr_res = nbdtrc(k, n, p) # this will often be faster than below + >>> stats_res = nbinom(n, p).sf(k) + >>> stats_res, nbdtr_res # test that results are equal + (0.6367187499999999, 0.6367187499999999) + + `nbdtrc` can evaluate different parameter sets by providing arrays with + shapes compatible for broadcasting for `k`, `n` and `p`. Here we compute + the function for three different `k` at four locations `p`, resulting in + a 3x4 array. + + >>> k = np.array([[5], [10], [15]]) + >>> p = np.array([0.3, 0.5, 0.7, 0.9]) + >>> k.shape, p.shape + ((3, 1), (4,)) + + >>> nbdtrc(k, 5, p) + array([[8.49731667e-01, 3.76953125e-01, 4.73489874e-02, 1.46902600e-04], + [5.15491059e-01, 5.92346191e-02, 6.72234070e-04, 9.29610100e-09], + [2.37507779e-01, 5.90896606e-03, 5.55025308e-06, 3.26346760e-13]]) + """) + +add_newdoc( + "nbdtri", + r""" + nbdtri(k, n, y, out=None) + + Returns the inverse with respect to the parameter `p` of + `y = nbdtr(k, n, p)`, the negative binomial cumulative distribution + function. + + Parameters + ---------- + k : array_like + The maximum number of allowed failures (nonnegative int). + n : array_like + The target number of successes (positive int). + y : array_like + The probability of `k` or fewer failures before `n` successes (float). + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + p : scalar or ndarray + Probability of success in a single event (float) such that + `nbdtr(k, n, p) = y`. + + See Also + -------- + nbdtr : Cumulative distribution function of the negative binomial. + nbdtrc : Negative binomial survival function. + scipy.stats.nbinom : negative binomial distribution. + nbdtrik : Inverse with respect to `k` of `nbdtr(k, n, p)`. + nbdtrin : Inverse with respect to `n` of `nbdtr(k, n, p)`. + scipy.stats.nbinom : Negative binomial distribution + + Notes + ----- + Wrapper for the Cephes [1]_ routine `nbdtri`. + + The negative binomial distribution is also available as + `scipy.stats.nbinom`. Using `nbdtri` directly can improve performance + compared to the ``ppf`` method of `scipy.stats.nbinom`. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + Examples + -------- + `nbdtri` is the inverse of `nbdtr` with respect to `p`. + Up to floating point errors the following holds: + ``nbdtri(k, n, nbdtr(k, n, p))=p``. + + >>> import numpy as np + >>> from scipy.special import nbdtri, nbdtr + >>> k, n, y = 5, 10, 0.2 + >>> cdf_val = nbdtr(k, n, y) + >>> nbdtri(k, n, cdf_val) + 0.20000000000000004 + + Compute the function for ``k=10`` and ``n=5`` at several points by + providing a NumPy array or list for `y`. + + >>> y = np.array([0.1, 0.4, 0.8]) + >>> nbdtri(3, 5, y) + array([0.34462319, 0.51653095, 0.69677416]) + + Plot the function for three different parameter sets. + + >>> import matplotlib.pyplot as plt + >>> n_parameters = [5, 20, 30, 30] + >>> k_parameters = [20, 20, 60, 80] + >>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot'] + >>> parameters_list = list(zip(n_parameters, k_parameters, linestyles)) + >>> cdf_vals = np.linspace(0, 1, 1000) + >>> fig, ax = plt.subplots(figsize=(8, 8)) + >>> for parameter_set in parameters_list: + ... n, k, style = parameter_set + ... nbdtri_vals = nbdtri(k, n, cdf_vals) + ... ax.plot(cdf_vals, nbdtri_vals, label=rf"$k={k},\ n={n}$", + ... ls=style) + >>> ax.legend() + >>> ax.set_ylabel("$p$") + >>> ax.set_xlabel("$CDF$") + >>> title = "nbdtri: inverse of negative binomial CDF with respect to $p$" + >>> ax.set_title(title) + >>> plt.show() + + `nbdtri` can evaluate different parameter sets by providing arrays with + shapes compatible for broadcasting for `k`, `n` and `p`. Here we compute + the function for three different `k` at four locations `p`, resulting in + a 3x4 array. + + >>> k = np.array([[5], [10], [15]]) + >>> y = np.array([0.3, 0.5, 0.7, 0.9]) + >>> k.shape, y.shape + ((3, 1), (4,)) + + >>> nbdtri(k, 5, y) + array([[0.37258157, 0.45169416, 0.53249956, 0.64578407], + [0.24588501, 0.30451981, 0.36778453, 0.46397088], + [0.18362101, 0.22966758, 0.28054743, 0.36066188]]) + """) + +add_newdoc("nbdtrik", + r""" + nbdtrik(y, n, p, out=None) + + Negative binomial percentile function. + + Returns the inverse with respect to the parameter `k` of + `y = nbdtr(k, n, p)`, the negative binomial cumulative distribution + function. + + Parameters + ---------- + y : array_like + The probability of `k` or fewer failures before `n` successes (float). + n : array_like + The target number of successes (positive int). + p : array_like + Probability of success in a single event (float). + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + k : scalar or ndarray + The maximum number of allowed failures such that `nbdtr(k, n, p) = y`. + + See Also + -------- + nbdtr : Cumulative distribution function of the negative binomial. + nbdtrc : Survival function of the negative binomial. + nbdtri : Inverse with respect to `p` of `nbdtr(k, n, p)`. + nbdtrin : Inverse with respect to `n` of `nbdtr(k, n, p)`. + scipy.stats.nbinom : Negative binomial distribution + + Notes + ----- + Wrapper for the CDFLIB [1]_ Fortran routine `cdfnbn`. + + Formula 26.5.26 of [2]_, + + .. math:: + \sum_{j=k + 1}^\infty {{n + j - 1} + \choose{j}} p^n (1 - p)^j = I_{1 - p}(k + 1, n), + + is used to reduce calculation of the cumulative distribution function to + that of a regularized incomplete beta :math:`I`. + + Computation of `k` involves a search for a value that produces the desired + value of `y`. The search relies on the monotonicity of `y` with `k`. + + References + ---------- + .. [1] Barry Brown, James Lovato, and Kathy Russell, + CDFLIB: Library of Fortran Routines for Cumulative Distribution + Functions, Inverses, and Other Parameters. + .. [2] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + Examples + -------- + Compute the negative binomial cumulative distribution function for an + exemplary parameter set. + + >>> import numpy as np + >>> from scipy.special import nbdtr, nbdtrik + >>> k, n, p = 5, 2, 0.5 + >>> cdf_value = nbdtr(k, n, p) + >>> cdf_value + 0.9375 + + Verify that `nbdtrik` recovers the original value for `k`. + + >>> nbdtrik(cdf_value, n, p) + 5.0 + + Plot the function for different parameter sets. + + >>> import matplotlib.pyplot as plt + >>> p_parameters = [0.2, 0.5, 0.7, 0.5] + >>> n_parameters = [30, 30, 30, 80] + >>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot'] + >>> parameters_list = list(zip(p_parameters, n_parameters, linestyles)) + >>> cdf_vals = np.linspace(0, 1, 1000) + >>> fig, ax = plt.subplots(figsize=(8, 8)) + >>> for parameter_set in parameters_list: + ... p, n, style = parameter_set + ... nbdtrik_vals = nbdtrik(cdf_vals, n, p) + ... ax.plot(cdf_vals, nbdtrik_vals, label=rf"$n={n},\ p={p}$", + ... ls=style) + >>> ax.legend() + >>> ax.set_ylabel("$k$") + >>> ax.set_xlabel("$CDF$") + >>> ax.set_title("Negative binomial percentile function") + >>> plt.show() + + The negative binomial distribution is also available as + `scipy.stats.nbinom`. The percentile function method ``ppf`` + returns the result of `nbdtrik` rounded up to integers: + + >>> from scipy.stats import nbinom + >>> q, n, p = 0.6, 5, 0.5 + >>> nbinom.ppf(q, n, p), nbdtrik(q, n, p) + (5.0, 4.800428460273882) + + """) + +add_newdoc("nbdtrin", + r""" + nbdtrin(k, y, p, out=None) + + Inverse of `nbdtr` vs `n`. + + Returns the inverse with respect to the parameter `n` of + `y = nbdtr(k, n, p)`, the negative binomial cumulative distribution + function. + + Parameters + ---------- + k : array_like + The maximum number of allowed failures (nonnegative int). + y : array_like + The probability of `k` or fewer failures before `n` successes (float). + p : array_like + Probability of success in a single event (float). + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + n : scalar or ndarray + The number of successes `n` such that `nbdtr(k, n, p) = y`. + + See Also + -------- + nbdtr : Cumulative distribution function of the negative binomial. + nbdtri : Inverse with respect to `p` of `nbdtr(k, n, p)`. + nbdtrik : Inverse with respect to `k` of `nbdtr(k, n, p)`. + + Notes + ----- + Wrapper for the CDFLIB [1]_ Fortran routine `cdfnbn`. + + Formula 26.5.26 of [2]_, + + .. math:: + \sum_{j=k + 1}^\infty {{n + j - 1} + \choose{j}} p^n (1 - p)^j = I_{1 - p}(k + 1, n), + + is used to reduce calculation of the cumulative distribution function to + that of a regularized incomplete beta :math:`I`. + + Computation of `n` involves a search for a value that produces the desired + value of `y`. The search relies on the monotonicity of `y` with `n`. + + References + ---------- + .. [1] Barry Brown, James Lovato, and Kathy Russell, + CDFLIB: Library of Fortran Routines for Cumulative Distribution + Functions, Inverses, and Other Parameters. + .. [2] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + Examples + -------- + Compute the negative binomial cumulative distribution function for an + exemplary parameter set. + + >>> from scipy.special import nbdtr, nbdtrin + >>> k, n, p = 5, 2, 0.5 + >>> cdf_value = nbdtr(k, n, p) + >>> cdf_value + 0.9375 + + Verify that `nbdtrin` recovers the original value for `n` up to floating + point accuracy. + + >>> nbdtrin(k, cdf_value, p) + 1.999999999998137 + """) + +add_newdoc("ncfdtr", + r""" + ncfdtr(dfn, dfd, nc, f, out=None) + + Cumulative distribution function of the non-central F distribution. + + The non-central F describes the distribution of, + + .. math:: + Z = \frac{X/d_n}{Y/d_d} + + where :math:`X` and :math:`Y` are independently distributed, with + :math:`X` distributed non-central :math:`\chi^2` with noncentrality + parameter `nc` and :math:`d_n` degrees of freedom, and :math:`Y` + distributed :math:`\chi^2` with :math:`d_d` degrees of freedom. + + Parameters + ---------- + dfn : array_like + Degrees of freedom of the numerator sum of squares. Range (0, inf). + dfd : array_like + Degrees of freedom of the denominator sum of squares. Range (0, inf). + nc : array_like + Noncentrality parameter. Should be in range (0, 1e4). + f : array_like + Quantiles, i.e. the upper limit of integration. + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + cdf : scalar or ndarray + The calculated CDF. If all inputs are scalar, the return will be a + float. Otherwise it will be an array. + + See Also + -------- + ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`. + ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`. + ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`. + ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`. + + Notes + ----- + Wrapper for the CDFLIB [1]_ Fortran routine `cdffnc`. + + The cumulative distribution function is computed using Formula 26.6.20 of + [2]_: + + .. math:: + F(d_n, d_d, n_c, f) = \sum_{j=0}^\infty e^{-n_c/2} + \frac{(n_c/2)^j}{j!} I_{x}(\frac{d_n}{2} + j, \frac{d_d}{2}), + + where :math:`I` is the regularized incomplete beta function, and + :math:`x = f d_n/(f d_n + d_d)`. + + The computation time required for this routine is proportional to the + noncentrality parameter `nc`. Very large values of this parameter can + consume immense computer resources. This is why the search range is + bounded by 10,000. + + References + ---------- + .. [1] Barry Brown, James Lovato, and Kathy Russell, + CDFLIB: Library of Fortran Routines for Cumulative Distribution + Functions, Inverses, and Other Parameters. + .. [2] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + Examples + -------- + >>> import numpy as np + >>> from scipy import special + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + + Plot the CDF of the non-central F distribution, for nc=0. Compare with the + F-distribution from scipy.stats: + + >>> x = np.linspace(-1, 8, num=500) + >>> dfn = 3 + >>> dfd = 2 + >>> ncf_stats = stats.f.cdf(x, dfn, dfd) + >>> ncf_special = special.ncfdtr(dfn, dfd, 0, x) + + >>> fig = plt.figure() + >>> ax = fig.add_subplot(111) + >>> ax.plot(x, ncf_stats, 'b-', lw=3) + >>> ax.plot(x, ncf_special, 'r-') + >>> plt.show() + + """) + +add_newdoc("ncfdtri", + """ + ncfdtri(dfn, dfd, nc, p, out=None) + + Inverse with respect to `f` of the CDF of the non-central F distribution. + + See `ncfdtr` for more details. + + Parameters + ---------- + dfn : array_like + Degrees of freedom of the numerator sum of squares. Range (0, inf). + dfd : array_like + Degrees of freedom of the denominator sum of squares. Range (0, inf). + nc : array_like + Noncentrality parameter. Should be in range (0, 1e4). + p : array_like + Value of the cumulative distribution function. Must be in the + range [0, 1]. + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + f : scalar or ndarray + Quantiles, i.e., the upper limit of integration. + + See Also + -------- + ncfdtr : CDF of the non-central F distribution. + ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`. + ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`. + ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`. + + Examples + -------- + >>> from scipy.special import ncfdtr, ncfdtri + + Compute the CDF for several values of `f`: + + >>> f = [0.5, 1, 1.5] + >>> p = ncfdtr(2, 3, 1.5, f) + >>> p + array([ 0.20782291, 0.36107392, 0.47345752]) + + Compute the inverse. We recover the values of `f`, as expected: + + >>> ncfdtri(2, 3, 1.5, p) + array([ 0.5, 1. , 1.5]) + + """) + +add_newdoc("ncfdtridfd", + """ + ncfdtridfd(dfn, p, nc, f, out=None) + + Calculate degrees of freedom (denominator) for the noncentral F-distribution. + + This is the inverse with respect to `dfd` of `ncfdtr`. + See `ncfdtr` for more details. + + Parameters + ---------- + dfn : array_like + Degrees of freedom of the numerator sum of squares. Range (0, inf). + p : array_like + Value of the cumulative distribution function. Must be in the + range [0, 1]. + nc : array_like + Noncentrality parameter. Should be in range (0, 1e4). + f : array_like + Quantiles, i.e., the upper limit of integration. + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + dfd : scalar or ndarray + Degrees of freedom of the denominator sum of squares. + + See Also + -------- + ncfdtr : CDF of the non-central F distribution. + ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`. + ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`. + ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`. + + Notes + ----- + The value of the cumulative noncentral F distribution is not necessarily + monotone in either degrees of freedom. There thus may be two values that + provide a given CDF value. This routine assumes monotonicity and will + find an arbitrary one of the two values. + + Examples + -------- + >>> from scipy.special import ncfdtr, ncfdtridfd + + Compute the CDF for several values of `dfd`: + + >>> dfd = [1, 2, 3] + >>> p = ncfdtr(2, dfd, 0.25, 15) + >>> p + array([ 0.8097138 , 0.93020416, 0.96787852]) + + Compute the inverse. We recover the values of `dfd`, as expected: + + >>> ncfdtridfd(2, p, 0.25, 15) + array([ 1., 2., 3.]) + + """) + +add_newdoc("ncfdtridfn", + """ + ncfdtridfn(p, dfd, nc, f, out=None) + + Calculate degrees of freedom (numerator) for the noncentral F-distribution. + + This is the inverse with respect to `dfn` of `ncfdtr`. + See `ncfdtr` for more details. + + Parameters + ---------- + p : array_like + Value of the cumulative distribution function. Must be in the + range [0, 1]. + dfd : array_like + Degrees of freedom of the denominator sum of squares. Range (0, inf). + nc : array_like + Noncentrality parameter. Should be in range (0, 1e4). + f : float + Quantiles, i.e., the upper limit of integration. + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + dfn : scalar or ndarray + Degrees of freedom of the numerator sum of squares. + + See Also + -------- + ncfdtr : CDF of the non-central F distribution. + ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`. + ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`. + ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`. + + Notes + ----- + The value of the cumulative noncentral F distribution is not necessarily + monotone in either degrees of freedom. There thus may be two values that + provide a given CDF value. This routine assumes monotonicity and will + find an arbitrary one of the two values. + + Examples + -------- + >>> from scipy.special import ncfdtr, ncfdtridfn + + Compute the CDF for several values of `dfn`: + + >>> dfn = [1, 2, 3] + >>> p = ncfdtr(dfn, 2, 0.25, 15) + >>> p + array([ 0.92562363, 0.93020416, 0.93188394]) + + Compute the inverse. We recover the values of `dfn`, as expected: + + >>> ncfdtridfn(p, 2, 0.25, 15) + array([ 1., 2., 3.]) + + """) + +add_newdoc("ncfdtrinc", + """ + ncfdtrinc(dfn, dfd, p, f, out=None) + + Calculate non-centrality parameter for non-central F distribution. + + This is the inverse with respect to `nc` of `ncfdtr`. + See `ncfdtr` for more details. + + Parameters + ---------- + dfn : array_like + Degrees of freedom of the numerator sum of squares. Range (0, inf). + dfd : array_like + Degrees of freedom of the denominator sum of squares. Range (0, inf). + p : array_like + Value of the cumulative distribution function. Must be in the + range [0, 1]. + f : array_like + Quantiles, i.e., the upper limit of integration. + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + nc : scalar or ndarray + Noncentrality parameter. + + See Also + -------- + ncfdtr : CDF of the non-central F distribution. + ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`. + ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`. + ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`. + + Examples + -------- + >>> from scipy.special import ncfdtr, ncfdtrinc + + Compute the CDF for several values of `nc`: + + >>> nc = [0.5, 1.5, 2.0] + >>> p = ncfdtr(2, 3, nc, 15) + >>> p + array([ 0.96309246, 0.94327955, 0.93304098]) + + Compute the inverse. We recover the values of `nc`, as expected: + + >>> ncfdtrinc(2, 3, p, 15) + array([ 0.5, 1.5, 2. ]) + + """) + +add_newdoc("nctdtr", + """ + nctdtr(df, nc, t, out=None) + + Cumulative distribution function of the non-central `t` distribution. + + Parameters + ---------- + df : array_like + Degrees of freedom of the distribution. Should be in range (0, inf). + nc : array_like + Noncentrality parameter. Should be in range (-1e6, 1e6). + t : array_like + Quantiles, i.e., the upper limit of integration. + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + cdf : scalar or ndarray + The calculated CDF. If all inputs are scalar, the return will be a + float. Otherwise, it will be an array. + + See Also + -------- + nctdtrit : Inverse CDF (iCDF) of the non-central t distribution. + nctdtridf : Calculate degrees of freedom, given CDF and iCDF values. + nctdtrinc : Calculate non-centrality parameter, given CDF iCDF values. + + Examples + -------- + >>> import numpy as np + >>> from scipy import special + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + + Plot the CDF of the non-central t distribution, for nc=0. Compare with the + t-distribution from scipy.stats: + + >>> x = np.linspace(-5, 5, num=500) + >>> df = 3 + >>> nct_stats = stats.t.cdf(x, df) + >>> nct_special = special.nctdtr(df, 0, x) + + >>> fig = plt.figure() + >>> ax = fig.add_subplot(111) + >>> ax.plot(x, nct_stats, 'b-', lw=3) + >>> ax.plot(x, nct_special, 'r-') + >>> plt.show() + + """) + +add_newdoc("nctdtridf", + """ + nctdtridf(p, nc, t, out=None) + + Calculate degrees of freedom for non-central t distribution. + + See `nctdtr` for more details. + + Parameters + ---------- + p : array_like + CDF values, in range (0, 1]. + nc : array_like + Noncentrality parameter. Should be in range (-1e6, 1e6). + t : array_like + Quantiles, i.e., the upper limit of integration. + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + df : scalar or ndarray + The degrees of freedom. If all inputs are scalar, the return will be a + float. Otherwise, it will be an array. + + See Also + -------- + nctdtr : CDF of the non-central `t` distribution. + nctdtrit : Inverse CDF (iCDF) of the non-central t distribution. + nctdtrinc : Calculate non-centrality parameter, given CDF iCDF values. + + Examples + -------- + >>> from scipy.special import nctdtr, nctdtridf + + Compute the CDF for several values of `df`: + + >>> df = [1, 2, 3] + >>> p = nctdtr(df, 0.25, 1) + >>> p + array([0.67491974, 0.716464 , 0.73349456]) + + Compute the inverse. We recover the values of `df`, as expected: + + >>> nctdtridf(p, 0.25, 1) + array([1., 2., 3.]) + + """) + +add_newdoc("nctdtrinc", + """ + nctdtrinc(df, p, t, out=None) + + Calculate non-centrality parameter for non-central t distribution. + + See `nctdtr` for more details. + + Parameters + ---------- + df : array_like + Degrees of freedom of the distribution. Should be in range (0, inf). + p : array_like + CDF values, in range (0, 1]. + t : array_like + Quantiles, i.e., the upper limit of integration. + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + nc : scalar or ndarray + Noncentrality parameter + + See Also + -------- + nctdtr : CDF of the non-central `t` distribution. + nctdtrit : Inverse CDF (iCDF) of the non-central t distribution. + nctdtridf : Calculate degrees of freedom, given CDF and iCDF values. + + Examples + -------- + >>> from scipy.special import nctdtr, nctdtrinc + + Compute the CDF for several values of `nc`: + + >>> nc = [0.5, 1.5, 2.5] + >>> p = nctdtr(3, nc, 1.5) + >>> p + array([0.77569497, 0.45524533, 0.1668691 ]) + + Compute the inverse. We recover the values of `nc`, as expected: + + >>> nctdtrinc(3, p, 1.5) + array([0.5, 1.5, 2.5]) + + """) + +add_newdoc("nctdtrit", + """ + nctdtrit(df, nc, p, out=None) + + Inverse cumulative distribution function of the non-central t distribution. + + See `nctdtr` for more details. + + Parameters + ---------- + df : array_like + Degrees of freedom of the distribution. Should be in range (0, inf). + nc : array_like + Noncentrality parameter. Should be in range (-1e6, 1e6). + p : array_like + CDF values, in range (0, 1]. + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + t : scalar or ndarray + Quantiles + + See Also + -------- + nctdtr : CDF of the non-central `t` distribution. + nctdtridf : Calculate degrees of freedom, given CDF and iCDF values. + nctdtrinc : Calculate non-centrality parameter, given CDF iCDF values. + + Examples + -------- + >>> from scipy.special import nctdtr, nctdtrit + + Compute the CDF for several values of `t`: + + >>> t = [0.5, 1, 1.5] + >>> p = nctdtr(3, 1, t) + >>> p + array([0.29811049, 0.46922687, 0.6257559 ]) + + Compute the inverse. We recover the values of `t`, as expected: + + >>> nctdtrit(3, 1, p) + array([0.5, 1. , 1.5]) + + """) + +add_newdoc("ndtr", + r""" + ndtr(x, out=None) + + Cumulative distribution of the standard normal distribution. + + Returns the area under the standard Gaussian probability + density function, integrated from minus infinity to `x` + + .. math:: + + \frac{1}{\sqrt{2\pi}} \int_{-\infty}^x \exp(-t^2/2) dt + + Parameters + ---------- + x : array_like, real or complex + Argument + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + The value of the normal CDF evaluated at `x` + + See Also + -------- + log_ndtr : Logarithm of ndtr + ndtri : Inverse of ndtr, standard normal percentile function + erf : Error function + erfc : 1 - erf + scipy.stats.norm : Normal distribution + + Examples + -------- + Evaluate `ndtr` at one point. + + >>> import numpy as np + >>> from scipy.special import ndtr + >>> ndtr(0.5) + 0.6914624612740131 + + Evaluate the function at several points by providing a NumPy array + or list for `x`. + + >>> ndtr([0, 0.5, 2]) + array([0.5 , 0.69146246, 0.97724987]) + + Plot the function. + + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(-5, 5, 100) + >>> fig, ax = plt.subplots() + >>> ax.plot(x, ndtr(x)) + >>> ax.set_title(r"Standard normal cumulative distribution function $\Phi$") + >>> plt.show() + """) + + +add_newdoc("nrdtrimn", + """ + nrdtrimn(p, std, x, out=None) + + Calculate mean of normal distribution given other params. + + Parameters + ---------- + p : array_like + CDF values, in range (0, 1]. + std : array_like + Standard deviation. + x : array_like + Quantiles, i.e. the upper limit of integration. + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + mn : scalar or ndarray + The mean of the normal distribution. + + See Also + -------- + scipy.stats.norm : Normal distribution + ndtr : Standard normal cumulative probability distribution + ndtri : Inverse of standard normal CDF with respect to quantile + nrdtrisd : Inverse of normal distribution CDF with respect to + standard deviation + + Examples + -------- + `nrdtrimn` can be used to recover the mean of a normal distribution + if we know the CDF value `p` for a given quantile `x` and the + standard deviation `std`. First, we calculate + the normal distribution CDF for an exemplary parameter set. + + >>> from scipy.stats import norm + >>> mean = 3. + >>> std = 2. + >>> x = 6. + >>> p = norm.cdf(x, loc=mean, scale=std) + >>> p + 0.9331927987311419 + + Verify that `nrdtrimn` returns the original value for `mean`. + + >>> from scipy.special import nrdtrimn + >>> nrdtrimn(p, std, x) + 3.0000000000000004 + + """) + +add_newdoc("nrdtrisd", + """ + nrdtrisd(mn, p, x, out=None) + + Calculate standard deviation of normal distribution given other params. + + Parameters + ---------- + mn : scalar or ndarray + The mean of the normal distribution. + p : array_like + CDF values, in range (0, 1]. + x : array_like + Quantiles, i.e. the upper limit of integration. + + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + std : scalar or ndarray + Standard deviation. + + See Also + -------- + scipy.stats.norm : Normal distribution + ndtr : Standard normal cumulative probability distribution + ndtri : Inverse of standard normal CDF with respect to quantile + nrdtrimn : Inverse of normal distribution CDF with respect to + mean + + Examples + -------- + `nrdtrisd` can be used to recover the standard deviation of a normal + distribution if we know the CDF value `p` for a given quantile `x` and + the mean `mn`. First, we calculate the normal distribution CDF for an + exemplary parameter set. + + >>> from scipy.stats import norm + >>> mean = 3. + >>> std = 2. + >>> x = 6. + >>> p = norm.cdf(x, loc=mean, scale=std) + >>> p + 0.9331927987311419 + + Verify that `nrdtrisd` returns the original value for `std`. + + >>> from scipy.special import nrdtrisd + >>> nrdtrisd(mean, p, x) + 2.0000000000000004 + + """) + +add_newdoc("log_ndtr", + """ + log_ndtr(x, out=None) + + Logarithm of Gaussian cumulative distribution function. + + Returns the log of the area under the standard Gaussian probability + density function, integrated from minus infinity to `x`:: + + log(1/sqrt(2*pi) * integral(exp(-t**2 / 2), t=-inf..x)) + + Parameters + ---------- + x : array_like, real or complex + Argument + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + The value of the log of the normal CDF evaluated at `x` + + See Also + -------- + erf + erfc + scipy.stats.norm + ndtr + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import log_ndtr, ndtr + + The benefit of ``log_ndtr(x)`` over the naive implementation + ``np.log(ndtr(x))`` is most evident with moderate to large positive + values of ``x``: + + >>> x = np.array([6, 7, 9, 12, 15, 25]) + >>> log_ndtr(x) + array([-9.86587646e-010, -1.27981254e-012, -1.12858841e-019, + -1.77648211e-033, -3.67096620e-051, -3.05669671e-138]) + + The results of the naive calculation for the moderate ``x`` values + have only 5 or 6 correct significant digits. For values of ``x`` + greater than approximately 8.3, the naive expression returns 0: + + >>> np.log(ndtr(x)) + array([-9.86587701e-10, -1.27986510e-12, 0.00000000e+00, + 0.00000000e+00, 0.00000000e+00, 0.00000000e+00]) + """) + +add_newdoc("ndtri", + """ + ndtri(y, out=None) + + Inverse of `ndtr` vs x + + Returns the argument x for which the area under the standard normal + probability density function (integrated from minus infinity to `x`) + is equal to y. + + Parameters + ---------- + p : array_like + Probability + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + x : scalar or ndarray + Value of x such that ``ndtr(x) == p``. + + See Also + -------- + ndtr : Standard normal cumulative probability distribution + ndtri_exp : Inverse of log_ndtr + + Examples + -------- + `ndtri` is the percentile function of the standard normal distribution. + This means it returns the inverse of the cumulative density `ndtr`. First, + let us compute a cumulative density value. + + >>> import numpy as np + >>> from scipy.special import ndtri, ndtr + >>> cdf_val = ndtr(2) + >>> cdf_val + 0.9772498680518208 + + Verify that `ndtri` yields the original value for `x` up to floating point + errors. + + >>> ndtri(cdf_val) + 2.0000000000000004 + + Plot the function. For that purpose, we provide a NumPy array as argument. + + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(0.01, 1, 200) + >>> fig, ax = plt.subplots() + >>> ax.plot(x, ndtri(x)) + >>> ax.set_title("Standard normal percentile function") + >>> plt.show() + """) + +add_newdoc("pdtr", + r""" + pdtr(k, m, out=None) + + Poisson cumulative distribution function. + + Defined as the probability that a Poisson-distributed random + variable with event rate :math:`m` is less than or equal to + :math:`k`. More concretely, this works out to be [1]_ + + .. math:: + + \exp(-m) \sum_{j = 0}^{\lfloor{k}\rfloor} \frac{m^j}{j!}. + + Parameters + ---------- + k : array_like + Number of occurrences (nonnegative, real) + m : array_like + Shape parameter (nonnegative, real) + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + Values of the Poisson cumulative distribution function + + See Also + -------- + pdtrc : Poisson survival function + pdtrik : inverse of `pdtr` with respect to `k` + pdtri : inverse of `pdtr` with respect to `m` + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Poisson_distribution + + Examples + -------- + >>> import numpy as np + >>> import scipy.special as sc + + It is a cumulative distribution function, so it converges to 1 + monotonically as `k` goes to infinity. + + >>> sc.pdtr([1, 10, 100, np.inf], 1) + array([0.73575888, 0.99999999, 1. , 1. ]) + + It is discontinuous at integers and constant between integers. + + >>> sc.pdtr([1, 1.5, 1.9, 2], 1) + array([0.73575888, 0.73575888, 0.73575888, 0.9196986 ]) + + """) + +add_newdoc("pdtrc", + """ + pdtrc(k, m, out=None) + + Poisson survival function + + Returns the sum of the terms from k+1 to infinity of the Poisson + distribution: sum(exp(-m) * m**j / j!, j=k+1..inf) = gammainc( + k+1, m). Arguments must both be non-negative doubles. + + Parameters + ---------- + k : array_like + Number of occurrences (nonnegative, real) + m : array_like + Shape parameter (nonnegative, real) + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + Values of the Poisson survival function + + See Also + -------- + pdtr : Poisson cumulative distribution function + pdtrik : inverse of `pdtr` with respect to `k` + pdtri : inverse of `pdtr` with respect to `m` + + Examples + -------- + >>> import numpy as np + >>> import scipy.special as sc + + It is a survival function, so it decreases to 0 + monotonically as `k` goes to infinity. + + >>> k = np.array([1, 10, 100, np.inf]) + >>> sc.pdtrc(k, 1) + array([2.64241118e-001, 1.00477664e-008, 3.94147589e-161, 0.00000000e+000]) + + It can be expressed in terms of the lower incomplete gamma + function `gammainc`. + + >>> sc.gammainc(k + 1, 1) + array([2.64241118e-001, 1.00477664e-008, 3.94147589e-161, 0.00000000e+000]) + + """) + +add_newdoc("pdtri", + """ + pdtri(k, y, out=None) + + Inverse to `pdtr` vs m + + Returns the Poisson variable `m` such that the sum from 0 to `k` of + the Poisson density is equal to the given probability `y`: + calculated by ``gammaincinv(k + 1, y)``. `k` must be a nonnegative + integer and `y` between 0 and 1. + + Parameters + ---------- + k : array_like + Number of occurrences (nonnegative, real) + y : array_like + Probability + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + Values of the shape parameter `m` such that ``pdtr(k, m) = p`` + + See Also + -------- + pdtr : Poisson cumulative distribution function + pdtrc : Poisson survival function + pdtrik : inverse of `pdtr` with respect to `k` + + Examples + -------- + >>> import scipy.special as sc + + Compute the CDF for several values of `m`: + + >>> m = [0.5, 1, 1.5] + >>> p = sc.pdtr(1, m) + >>> p + array([0.90979599, 0.73575888, 0.5578254 ]) + + Compute the inverse. We recover the values of `m`, as expected: + + >>> sc.pdtri(1, p) + array([0.5, 1. , 1.5]) + + """) + +add_newdoc("pdtrik", + """ + pdtrik(p, m, out=None) + + Inverse to `pdtr` vs `k`. + + Parameters + ---------- + p : array_like + Probability + m : array_like + Shape parameter (nonnegative, real) + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + The number of occurrences `k` such that ``pdtr(k, m) = p`` + + See Also + -------- + pdtr : Poisson cumulative distribution function + pdtrc : Poisson survival function + pdtri : inverse of `pdtr` with respect to `m` + + Examples + -------- + >>> import scipy.special as sc + + Compute the CDF for several values of `k`: + + >>> k = [1, 2, 3] + >>> p = sc.pdtr(k, 2) + >>> p + array([0.40600585, 0.67667642, 0.85712346]) + + Compute the inverse. We recover the values of `k`, as expected: + + >>> sc.pdtrik(p, 2) + array([1., 2., 3.]) + + """) + +add_newdoc("poch", + r""" + poch(z, m, out=None) + + Pochhammer symbol. + + The Pochhammer symbol (rising factorial) is defined as + + .. math:: + + (z)_m = \frac{\Gamma(z + m)}{\Gamma(z)} + + For positive integer `m` it reads + + .. math:: + + (z)_m = z (z + 1) ... (z + m - 1) + + See [dlmf]_ for more details. + + Parameters + ---------- + z, m : array_like + Real-valued arguments. + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + The value of the function. + + References + ---------- + .. [dlmf] Nist, Digital Library of Mathematical Functions + https://dlmf.nist.gov/5.2#iii + + Examples + -------- + >>> import scipy.special as sc + + It is 1 when m is 0. + + >>> sc.poch([1, 2, 3, 4], 0) + array([1., 1., 1., 1.]) + + For z equal to 1 it reduces to the factorial function. + + >>> sc.poch(1, 5) + 120.0 + >>> 1 * 2 * 3 * 4 * 5 + 120 + + It can be expressed in terms of the gamma function. + + >>> z, m = 3.7, 2.1 + >>> sc.poch(z, m) + 20.529581933776953 + >>> sc.gamma(z + m) / sc.gamma(z) + 20.52958193377696 + + """) + +add_newdoc("powm1", """ + powm1(x, y, out=None) + + Computes ``x**y - 1``. + + This function is useful when `y` is near 0, or when `x` is near 1. + + The function is implemented for real types only (unlike ``numpy.power``, + which accepts complex inputs). + + Parameters + ---------- + x : array_like + The base. Must be a real type (i.e. integer or float, not complex). + y : array_like + The exponent. Must be a real type (i.e. integer or float, not complex). + + Returns + ------- + array_like + Result of the calculation + + Notes + ----- + .. versionadded:: 1.10.0 + + The underlying code is implemented for single precision and double + precision floats only. Unlike `numpy.power`, integer inputs to + `powm1` are converted to floating point, and complex inputs are + not accepted. + + Note the following edge cases: + + * ``powm1(x, 0)`` returns 0 for any ``x``, including 0, ``inf`` + and ``nan``. + * ``powm1(1, y)`` returns 0 for any ``y``, including ``nan`` + and ``inf``. + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import powm1 + + >>> x = np.array([1.2, 10.0, 0.9999999975]) + >>> y = np.array([1e-9, 1e-11, 0.1875]) + >>> powm1(x, y) + array([ 1.82321557e-10, 2.30258509e-11, -4.68749998e-10]) + + It can be verified that the relative errors in those results + are less than 2.5e-16. + + Compare that to the result of ``x**y - 1``, where the + relative errors are all larger than 8e-8: + + >>> x**y - 1 + array([ 1.82321491e-10, 2.30258035e-11, -4.68750039e-10]) + + """) + + +add_newdoc("pseudo_huber", + r""" + pseudo_huber(delta, r, out=None) + + Pseudo-Huber loss function. + + .. math:: \mathrm{pseudo\_huber}(\delta, r) = + \delta^2 \left( \sqrt{ 1 + \left( \frac{r}{\delta} \right)^2 } - 1 \right) + + Parameters + ---------- + delta : array_like + Input array, indicating the soft quadratic vs. linear loss changepoint. + r : array_like + Input array, possibly representing residuals. + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + res : scalar or ndarray + The computed Pseudo-Huber loss function values. + + See Also + -------- + huber: Similar function which this function approximates + + Notes + ----- + Like `huber`, `pseudo_huber` often serves as a robust loss function + in statistics or machine learning to reduce the influence of outliers. + Unlike `huber`, `pseudo_huber` is smooth. + + Typically, `r` represents residuals, the difference + between a model prediction and data. Then, for :math:`|r|\leq\delta`, + `pseudo_huber` resembles the squared error and for :math:`|r|>\delta` the + absolute error. This way, the Pseudo-Huber loss often achieves + a fast convergence in model fitting for small residuals like the squared + error loss function and still reduces the influence of outliers + (:math:`|r|>\delta`) like the absolute error loss. As :math:`\delta` is + the cutoff between squared and absolute error regimes, it has + to be tuned carefully for each problem. `pseudo_huber` is also + convex, making it suitable for gradient based optimization. [1]_ [2]_ + + .. versionadded:: 0.15.0 + + References + ---------- + .. [1] Hartley, Zisserman, "Multiple View Geometry in Computer Vision". + 2003. Cambridge University Press. p. 619 + .. [2] Charbonnier et al. "Deterministic edge-preserving regularization + in computed imaging". 1997. IEEE Trans. Image Processing. + 6 (2): 298 - 311. + + Examples + -------- + Import all necessary modules. + + >>> import numpy as np + >>> from scipy.special import pseudo_huber, huber + >>> import matplotlib.pyplot as plt + + Calculate the function for ``delta=1`` at ``r=2``. + + >>> pseudo_huber(1., 2.) + 1.2360679774997898 + + Calculate the function at ``r=2`` for different `delta` by providing + a list or NumPy array for `delta`. + + >>> pseudo_huber([1., 2., 4.], 3.) + array([2.16227766, 3.21110255, 4. ]) + + Calculate the function for ``delta=1`` at several points by providing + a list or NumPy array for `r`. + + >>> pseudo_huber(2., np.array([1., 1.5, 3., 4.])) + array([0.47213595, 1. , 3.21110255, 4.94427191]) + + The function can be calculated for different `delta` and `r` by + providing arrays for both with compatible shapes for broadcasting. + + >>> r = np.array([1., 2.5, 8., 10.]) + >>> deltas = np.array([[1.], [5.], [9.]]) + >>> print(r.shape, deltas.shape) + (4,) (3, 1) + + >>> pseudo_huber(deltas, r) + array([[ 0.41421356, 1.6925824 , 7.06225775, 9.04987562], + [ 0.49509757, 2.95084972, 22.16990566, 30.90169944], + [ 0.49846624, 3.06693762, 27.37435121, 40.08261642]]) + + Plot the function for different `delta`. + + >>> x = np.linspace(-4, 4, 500) + >>> deltas = [1, 2, 3] + >>> linestyles = ["dashed", "dotted", "dashdot"] + >>> fig, ax = plt.subplots() + >>> combined_plot_parameters = list(zip(deltas, linestyles)) + >>> for delta, style in combined_plot_parameters: + ... ax.plot(x, pseudo_huber(delta, x), label=rf"$\delta={delta}$", + ... ls=style) + >>> ax.legend(loc="upper center") + >>> ax.set_xlabel("$x$") + >>> ax.set_title(r"Pseudo-Huber loss function $h_{\delta}(x)$") + >>> ax.set_xlim(-4, 4) + >>> ax.set_ylim(0, 8) + >>> plt.show() + + Finally, illustrate the difference between `huber` and `pseudo_huber` by + plotting them and their gradients with respect to `r`. The plot shows + that `pseudo_huber` is continuously differentiable while `huber` is not + at the points :math:`\pm\delta`. + + >>> def huber_grad(delta, x): + ... grad = np.copy(x) + ... linear_area = np.argwhere(np.abs(x) > delta) + ... grad[linear_area]=delta*np.sign(x[linear_area]) + ... return grad + >>> def pseudo_huber_grad(delta, x): + ... return x* (1+(x/delta)**2)**(-0.5) + >>> x=np.linspace(-3, 3, 500) + >>> delta = 1. + >>> fig, ax = plt.subplots(figsize=(7, 7)) + >>> ax.plot(x, huber(delta, x), label="Huber", ls="dashed") + >>> ax.plot(x, huber_grad(delta, x), label="Huber Gradient", ls="dashdot") + >>> ax.plot(x, pseudo_huber(delta, x), label="Pseudo-Huber", ls="dotted") + >>> ax.plot(x, pseudo_huber_grad(delta, x), label="Pseudo-Huber Gradient", + ... ls="solid") + >>> ax.legend(loc="upper center") + >>> plt.show() + """) + +add_newdoc("radian", + """ + radian(d, m, s, out=None) + + Convert from degrees to radians. + + Returns the angle given in (d)egrees, (m)inutes, and (s)econds in + radians. + + Parameters + ---------- + d : array_like + Degrees, can be real-valued. + m : array_like + Minutes, can be real-valued. + s : array_like + Seconds, can be real-valued. + out : ndarray, optional + Optional output array for the function results. + + Returns + ------- + scalar or ndarray + Values of the inputs in radians. + + Examples + -------- + >>> import scipy.special as sc + + There are many ways to specify an angle. + + >>> sc.radian(90, 0, 0) + 1.5707963267948966 + >>> sc.radian(0, 60 * 90, 0) + 1.5707963267948966 + >>> sc.radian(0, 0, 60**2 * 90) + 1.5707963267948966 + + The inputs can be real-valued. + + >>> sc.radian(1.5, 0, 0) + 0.02617993877991494 + >>> sc.radian(1, 30, 0) + 0.02617993877991494 + + """) + +add_newdoc("rel_entr", + r""" + rel_entr(x, y, out=None) + + Elementwise function for computing relative entropy. + + .. math:: + + \mathrm{rel\_entr}(x, y) = + \begin{cases} + x \log(x / y) & x > 0, y > 0 \\ + 0 & x = 0, y \ge 0 \\ + \infty & \text{otherwise} + \end{cases} + + Parameters + ---------- + x, y : array_like + Input arrays + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + Relative entropy of the inputs + + See Also + -------- + entr, kl_div, scipy.stats.entropy + + Notes + ----- + .. versionadded:: 0.15.0 + + This function is jointly convex in x and y. + + The origin of this function is in convex programming; see + [1]_. Given two discrete probability distributions :math:`p_1, + \ldots, p_n` and :math:`q_1, \ldots, q_n`, the definition of relative + entropy in the context of *information theory* is + + .. math:: + + \sum_{i = 1}^n \mathrm{rel\_entr}(p_i, q_i). + + To compute the latter quantity, use `scipy.stats.entropy`. + + See [2]_ for details. + + References + ---------- + .. [1] Boyd, Stephen and Lieven Vandenberghe. *Convex optimization*. + Cambridge University Press, 2004. + :doi:`https://doi.org/10.1017/CBO9780511804441` + .. [2] Kullback-Leibler divergence, + https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence + + """) + +add_newdoc("round", + """ + round(x, out=None) + + Round to the nearest integer. + + Returns the nearest integer to `x`. If `x` ends in 0.5 exactly, + the nearest even integer is chosen. + + Parameters + ---------- + x : array_like + Real valued input. + out : ndarray, optional + Optional output array for the function results. + + Returns + ------- + scalar or ndarray + The nearest integers to the elements of `x`. The result is of + floating type, not integer type. + + Examples + -------- + >>> import scipy.special as sc + + It rounds to even. + + >>> sc.round([0.5, 1.5]) + array([0., 2.]) + + """) + +add_newdoc("shichi", + r""" + shichi(x, out=None) + + Hyperbolic sine and cosine integrals. + + The hyperbolic sine integral is + + .. math:: + + \int_0^x \frac{\sinh{t}}{t}dt + + and the hyperbolic cosine integral is + + .. math:: + + \gamma + \log(x) + \int_0^x \frac{\cosh{t} - 1}{t} dt + + where :math:`\gamma` is Euler's constant and :math:`\log` is the + principal branch of the logarithm [1]_. + + Parameters + ---------- + x : array_like + Real or complex points at which to compute the hyperbolic sine + and cosine integrals. + out : tuple of ndarray, optional + Optional output arrays for the function results + + Returns + ------- + si : scalar or ndarray + Hyperbolic sine integral at ``x`` + ci : scalar or ndarray + Hyperbolic cosine integral at ``x`` + + See Also + -------- + sici : Sine and cosine integrals. + exp1 : Exponential integral E1. + expi : Exponential integral Ei. + + Notes + ----- + For real arguments with ``x < 0``, ``chi`` is the real part of the + hyperbolic cosine integral. For such points ``chi(x)`` and ``chi(x + + 0j)`` differ by a factor of ``1j*pi``. + + For real arguments the function is computed by calling Cephes' + [2]_ *shichi* routine. For complex arguments the algorithm is based + on Mpmath's [3]_ *shi* and *chi* routines. + + References + ---------- + .. [1] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + (See Section 5.2.) + .. [2] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + .. [3] Fredrik Johansson and others. + "mpmath: a Python library for arbitrary-precision floating-point + arithmetic" (Version 0.19) http://mpmath.org/ + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.special import shichi, sici + + `shichi` accepts real or complex input: + + >>> shichi(0.5) + (0.5069967498196671, -0.05277684495649357) + >>> shichi(0.5 + 2.5j) + ((0.11772029666668238+1.831091777729851j), + (0.29912435887648825+1.7395351121166562j)) + + The hyperbolic sine and cosine integrals Shi(z) and Chi(z) are + related to the sine and cosine integrals Si(z) and Ci(z) by + + * Shi(z) = -i*Si(i*z) + * Chi(z) = Ci(-i*z) + i*pi/2 + + >>> z = 0.25 + 5j + >>> shi, chi = shichi(z) + >>> shi, -1j*sici(1j*z)[0] # Should be the same. + ((-0.04834719325101729+1.5469354086921228j), + (-0.04834719325101729+1.5469354086921228j)) + >>> chi, sici(-1j*z)[1] + 1j*np.pi/2 # Should be the same. + ((-0.19568708973868087+1.556276312103824j), + (-0.19568708973868087+1.556276312103824j)) + + Plot the functions evaluated on the real axis: + + >>> xp = np.geomspace(1e-8, 4.0, 250) + >>> x = np.concatenate((-xp[::-1], xp)) + >>> shi, chi = shichi(x) + + >>> fig, ax = plt.subplots() + >>> ax.plot(x, shi, label='Shi(x)') + >>> ax.plot(x, chi, '--', label='Chi(x)') + >>> ax.set_xlabel('x') + >>> ax.set_title('Hyperbolic Sine and Cosine Integrals') + >>> ax.legend(shadow=True, framealpha=1, loc='lower right') + >>> ax.grid(True) + >>> plt.show() + + """) + +add_newdoc("sici", + r""" + sici(x, out=None) + + Sine and cosine integrals. + + The sine integral is + + .. math:: + + \int_0^x \frac{\sin{t}}{t}dt + + and the cosine integral is + + .. math:: + + \gamma + \log(x) + \int_0^x \frac{\cos{t} - 1}{t}dt + + where :math:`\gamma` is Euler's constant and :math:`\log` is the + principal branch of the logarithm [1]_. + + Parameters + ---------- + x : array_like + Real or complex points at which to compute the sine and cosine + integrals. + out : tuple of ndarray, optional + Optional output arrays for the function results + + Returns + ------- + si : scalar or ndarray + Sine integral at ``x`` + ci : scalar or ndarray + Cosine integral at ``x`` + + See Also + -------- + shichi : Hyperbolic sine and cosine integrals. + exp1 : Exponential integral E1. + expi : Exponential integral Ei. + + Notes + ----- + For real arguments with ``x < 0``, ``ci`` is the real part of the + cosine integral. For such points ``ci(x)`` and ``ci(x + 0j)`` + differ by a factor of ``1j*pi``. + + For real arguments the function is computed by calling Cephes' + [2]_ *sici* routine. For complex arguments the algorithm is based + on Mpmath's [3]_ *si* and *ci* routines. + + References + ---------- + .. [1] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + (See Section 5.2.) + .. [2] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + .. [3] Fredrik Johansson and others. + "mpmath: a Python library for arbitrary-precision floating-point + arithmetic" (Version 0.19) http://mpmath.org/ + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.special import sici, exp1 + + `sici` accepts real or complex input: + + >>> sici(2.5) + (1.7785201734438267, 0.2858711963653835) + >>> sici(2.5 + 3j) + ((4.505735874563953+0.06863305018999577j), + (0.0793644206906966-2.935510262937543j)) + + For z in the right half plane, the sine and cosine integrals are + related to the exponential integral E1 (implemented in SciPy as + `scipy.special.exp1`) by + + * Si(z) = (E1(i*z) - E1(-i*z))/2i + pi/2 + * Ci(z) = -(E1(i*z) + E1(-i*z))/2 + + See [1]_ (equations 5.2.21 and 5.2.23). + + We can verify these relations: + + >>> z = 2 - 3j + >>> sici(z) + ((4.54751388956229-1.3991965806460565j), + (1.408292501520851+2.9836177420296055j)) + + >>> (exp1(1j*z) - exp1(-1j*z))/2j + np.pi/2 # Same as sine integral + (4.54751388956229-1.3991965806460565j) + + >>> -(exp1(1j*z) + exp1(-1j*z))/2 # Same as cosine integral + (1.408292501520851+2.9836177420296055j) + + Plot the functions evaluated on the real axis; the dotted horizontal + lines are at pi/2 and -pi/2: + + >>> x = np.linspace(-16, 16, 150) + >>> si, ci = sici(x) + + >>> fig, ax = plt.subplots() + >>> ax.plot(x, si, label='Si(x)') + >>> ax.plot(x, ci, '--', label='Ci(x)') + >>> ax.legend(shadow=True, framealpha=1, loc='upper left') + >>> ax.set_xlabel('x') + >>> ax.set_title('Sine and Cosine Integrals') + >>> ax.axhline(np.pi/2, linestyle=':', alpha=0.5, color='k') + >>> ax.axhline(-np.pi/2, linestyle=':', alpha=0.5, color='k') + >>> ax.grid(True) + >>> plt.show() + + """) + +add_newdoc("sindg", + """ + sindg(x, out=None) + + Sine of the angle `x` given in degrees. + + Parameters + ---------- + x : array_like + Angle, given in degrees. + out : ndarray, optional + Optional output array for the function results. + + Returns + ------- + scalar or ndarray + Sine at the input. + + See Also + -------- + cosdg, tandg, cotdg + + Examples + -------- + >>> import numpy as np + >>> import scipy.special as sc + + It is more accurate than using sine directly. + + >>> x = 180 * np.arange(3) + >>> sc.sindg(x) + array([ 0., -0., 0.]) + >>> np.sin(x * np.pi / 180) + array([ 0.0000000e+00, 1.2246468e-16, -2.4492936e-16]) + + """) + +add_newdoc("smirnov", + r""" + smirnov(n, d, out=None) + + Kolmogorov-Smirnov complementary cumulative distribution function + + Returns the exact Kolmogorov-Smirnov complementary cumulative + distribution function,(aka the Survival Function) of Dn+ (or Dn-) + for a one-sided test of equality between an empirical and a + theoretical distribution. It is equal to the probability that the + maximum difference between a theoretical distribution and an empirical + one based on `n` samples is greater than d. + + Parameters + ---------- + n : int + Number of samples + d : float array_like + Deviation between the Empirical CDF (ECDF) and the target CDF. + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + The value(s) of smirnov(n, d), Prob(Dn+ >= d) (Also Prob(Dn- >= d)) + + See Also + -------- + smirnovi : The Inverse Survival Function for the distribution + scipy.stats.ksone : Provides the functionality as a continuous distribution + kolmogorov, kolmogi : Functions for the two-sided distribution + + Notes + ----- + `smirnov` is used by `stats.kstest` in the application of the + Kolmogorov-Smirnov Goodness of Fit test. For historical reasons this + function is exposed in `scpy.special`, but the recommended way to achieve + the most accurate CDF/SF/PDF/PPF/ISF computations is to use the + `stats.ksone` distribution. + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import smirnov + >>> from scipy.stats import norm + + Show the probability of a gap at least as big as 0, 0.5 and 1.0 for a + sample of size 5. + + >>> smirnov(5, [0, 0.5, 1.0]) + array([ 1. , 0.056, 0. ]) + + Compare a sample of size 5 against N(0, 1), the standard normal + distribution with mean 0 and standard deviation 1. + + `x` is the sample. + + >>> x = np.array([-1.392, -0.135, 0.114, 0.190, 1.82]) + + >>> target = norm(0, 1) + >>> cdfs = target.cdf(x) + >>> cdfs + array([0.0819612 , 0.44630594, 0.5453811 , 0.57534543, 0.9656205 ]) + + Construct the empirical CDF and the K-S statistics (Dn+, Dn-, Dn). + + >>> n = len(x) + >>> ecdfs = np.arange(n+1, dtype=float)/n + >>> cols = np.column_stack([x, ecdfs[1:], cdfs, cdfs - ecdfs[:n], + ... ecdfs[1:] - cdfs]) + >>> with np.printoptions(precision=3): + ... print(cols) + [[-1.392 0.2 0.082 0.082 0.118] + [-0.135 0.4 0.446 0.246 -0.046] + [ 0.114 0.6 0.545 0.145 0.055] + [ 0.19 0.8 0.575 -0.025 0.225] + [ 1.82 1. 0.966 0.166 0.034]] + >>> gaps = cols[:, -2:] + >>> Dnpm = np.max(gaps, axis=0) + >>> print(f'Dn-={Dnpm[0]:f}, Dn+={Dnpm[1]:f}') + Dn-=0.246306, Dn+=0.224655 + >>> probs = smirnov(n, Dnpm) + >>> print(f'For a sample of size {n} drawn from N(0, 1):', + ... f' Smirnov n={n}: Prob(Dn- >= {Dnpm[0]:f}) = {probs[0]:.4f}', + ... f' Smirnov n={n}: Prob(Dn+ >= {Dnpm[1]:f}) = {probs[1]:.4f}', + ... sep='\n') + For a sample of size 5 drawn from N(0, 1): + Smirnov n=5: Prob(Dn- >= 0.246306) = 0.4711 + Smirnov n=5: Prob(Dn+ >= 0.224655) = 0.5245 + + Plot the empirical CDF and the standard normal CDF. + + >>> import matplotlib.pyplot as plt + >>> plt.step(np.concatenate(([-2.5], x, [2.5])), + ... np.concatenate((ecdfs, [1])), + ... where='post', label='Empirical CDF') + >>> xx = np.linspace(-2.5, 2.5, 100) + >>> plt.plot(xx, target.cdf(xx), '--', label='CDF for N(0, 1)') + + Add vertical lines marking Dn+ and Dn-. + + >>> iminus, iplus = np.argmax(gaps, axis=0) + >>> plt.vlines([x[iminus]], ecdfs[iminus], cdfs[iminus], color='r', + ... alpha=0.5, lw=4) + >>> plt.vlines([x[iplus]], cdfs[iplus], ecdfs[iplus+1], color='m', + ... alpha=0.5, lw=4) + + >>> plt.grid(True) + >>> plt.legend(framealpha=1, shadow=True) + >>> plt.show() + """) + +add_newdoc("smirnovi", + """ + smirnovi(n, p, out=None) + + Inverse to `smirnov` + + Returns `d` such that ``smirnov(n, d) == p``, the critical value + corresponding to `p`. + + Parameters + ---------- + n : int + Number of samples + p : float array_like + Probability + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + The value(s) of smirnovi(n, p), the critical values. + + See Also + -------- + smirnov : The Survival Function (SF) for the distribution + scipy.stats.ksone : Provides the functionality as a continuous distribution + kolmogorov, kolmogi : Functions for the two-sided distribution + scipy.stats.kstwobign : Two-sided Kolmogorov-Smirnov distribution, large n + + Notes + ----- + `smirnov` is used by `stats.kstest` in the application of the + Kolmogorov-Smirnov Goodness of Fit test. For historical reasons this + function is exposed in `scpy.special`, but the recommended way to achieve + the most accurate CDF/SF/PDF/PPF/ISF computations is to use the + `stats.ksone` distribution. + + Examples + -------- + >>> from scipy.special import smirnovi, smirnov + + >>> n = 24 + >>> deviations = [0.1, 0.2, 0.3] + + Use `smirnov` to compute the complementary CDF of the Smirnov + distribution for the given number of samples and deviations. + + >>> p = smirnov(n, deviations) + >>> p + array([0.58105083, 0.12826832, 0.01032231]) + + The inverse function ``smirnovi(n, p)`` returns ``deviations``. + + >>> smirnovi(n, p) + array([0.1, 0.2, 0.3]) + + """) + +add_newdoc("_smirnovc", + """ + _smirnovc(n, d) + Internal function, do not use. + """) + +add_newdoc("_smirnovci", + """ + Internal function, do not use. + """) + +add_newdoc("_smirnovp", + """ + _smirnovp(n, p) + Internal function, do not use. + """) + +add_newdoc("spence", + r""" + spence(z, out=None) + + Spence's function, also known as the dilogarithm. + + It is defined to be + + .. math:: + \int_1^z \frac{\log(t)}{1 - t}dt + + for complex :math:`z`, where the contour of integration is taken + to avoid the branch cut of the logarithm. Spence's function is + analytic everywhere except the negative real axis where it has a + branch cut. + + Parameters + ---------- + z : array_like + Points at which to evaluate Spence's function + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + s : scalar or ndarray + Computed values of Spence's function + + Notes + ----- + There is a different convention which defines Spence's function by + the integral + + .. math:: + -\int_0^z \frac{\log(1 - t)}{t}dt; + + this is our ``spence(1 - z)``. + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import spence + >>> import matplotlib.pyplot as plt + + The function is defined for complex inputs: + + >>> spence([1-1j, 1.5+2j, 3j, -10-5j]) + array([-0.20561676+0.91596559j, -0.86766909-1.39560134j, + -0.59422064-2.49129918j, -1.14044398+6.80075924j]) + + For complex inputs on the branch cut, which is the negative real axis, + the function returns the limit for ``z`` with positive imaginary part. + For example, in the following, note the sign change of the imaginary + part of the output for ``z = -2`` and ``z = -2 - 1e-8j``: + + >>> spence([-2 + 1e-8j, -2, -2 - 1e-8j]) + array([2.32018041-3.45139229j, 2.32018042-3.4513923j , + 2.32018041+3.45139229j]) + + The function returns ``nan`` for real inputs on the branch cut: + + >>> spence(-1.5) + nan + + Verify some particular values: ``spence(0) = pi**2/6``, + ``spence(1) = 0`` and ``spence(2) = -pi**2/12``. + + >>> spence([0, 1, 2]) + array([ 1.64493407, 0. , -0.82246703]) + >>> np.pi**2/6, -np.pi**2/12 + (1.6449340668482264, -0.8224670334241132) + + Verify the identity:: + + spence(z) + spence(1 - z) = pi**2/6 - log(z)*log(1 - z) + + >>> z = 3 + 4j + >>> spence(z) + spence(1 - z) + (-2.6523186143876067+1.8853470951513935j) + >>> np.pi**2/6 - np.log(z)*np.log(1 - z) + (-2.652318614387606+1.885347095151394j) + + Plot the function for positive real input. + + >>> fig, ax = plt.subplots() + >>> x = np.linspace(0, 6, 400) + >>> ax.plot(x, spence(x)) + >>> ax.grid() + >>> ax.set_xlabel('x') + >>> ax.set_title('spence(x)') + >>> plt.show() + """) + +add_newdoc( + "stdtr", + r""" + stdtr(df, t, out=None) + + Student t distribution cumulative distribution function + + Returns the integral: + + .. math:: + \frac{\Gamma((df+1)/2)}{\sqrt{\pi df} \Gamma(df/2)} + \int_{-\infty}^t (1+x^2/df)^{-(df+1)/2}\, dx + + Parameters + ---------- + df : array_like + Degrees of freedom + t : array_like + Upper bound of the integral + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + Value of the Student t CDF at t + + See Also + -------- + stdtridf : inverse of stdtr with respect to `df` + stdtrit : inverse of stdtr with respect to `t` + scipy.stats.t : student t distribution + + Notes + ----- + The student t distribution is also available as `scipy.stats.t`. + Calling `stdtr` directly can improve performance compared to the + ``cdf`` method of `scipy.stats.t` (see last example below). + + Examples + -------- + Calculate the function for ``df=3`` at ``t=1``. + + >>> import numpy as np + >>> from scipy.special import stdtr + >>> import matplotlib.pyplot as plt + >>> stdtr(3, 1) + 0.8044988905221148 + + Plot the function for three different degrees of freedom. + + >>> x = np.linspace(-10, 10, 1000) + >>> fig, ax = plt.subplots() + >>> parameters = [(1, "solid"), (3, "dashed"), (10, "dotted")] + >>> for (df, linestyle) in parameters: + ... ax.plot(x, stdtr(df, x), ls=linestyle, label=f"$df={df}$") + >>> ax.legend() + >>> ax.set_title("Student t distribution cumulative distribution function") + >>> plt.show() + + The function can be computed for several degrees of freedom at the same + time by providing a NumPy array or list for `df`: + + >>> stdtr([1, 2, 3], 1) + array([0.75 , 0.78867513, 0.80449889]) + + It is possible to calculate the function at several points for several + different degrees of freedom simultaneously by providing arrays for `df` + and `t` with shapes compatible for broadcasting. Compute `stdtr` at + 4 points for 3 degrees of freedom resulting in an array of shape 3x4. + + >>> dfs = np.array([[1], [2], [3]]) + >>> t = np.array([2, 4, 6, 8]) + >>> dfs.shape, t.shape + ((3, 1), (4,)) + + >>> stdtr(dfs, t) + array([[0.85241638, 0.92202087, 0.94743154, 0.96041658], + [0.90824829, 0.97140452, 0.98666426, 0.99236596], + [0.93033702, 0.98599577, 0.99536364, 0.99796171]]) + + The t distribution is also available as `scipy.stats.t`. Calling `stdtr` + directly can be much faster than calling the ``cdf`` method of + `scipy.stats.t`. To get the same results, one must use the following + parametrization: ``scipy.stats.t(df).cdf(x) = stdtr(df, x)``. + + >>> from scipy.stats import t + >>> df, x = 3, 1 + >>> stdtr_result = stdtr(df, x) # this can be faster than below + >>> stats_result = t(df).cdf(x) + >>> stats_result == stdtr_result # test that results are equal + True + """) + +add_newdoc("stdtridf", + """ + stdtridf(p, t, out=None) + + Inverse of `stdtr` vs df + + Returns the argument df such that stdtr(df, t) is equal to `p`. + + Parameters + ---------- + p : array_like + Probability + t : array_like + Upper bound of the integral + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + df : scalar or ndarray + Value of `df` such that ``stdtr(df, t) == p`` + + See Also + -------- + stdtr : Student t CDF + stdtrit : inverse of stdtr with respect to `t` + scipy.stats.t : Student t distribution + + Examples + -------- + Compute the student t cumulative distribution function for one + parameter set. + + >>> from scipy.special import stdtr, stdtridf + >>> df, x = 5, 2 + >>> cdf_value = stdtr(df, x) + >>> cdf_value + 0.9490302605850709 + + Verify that `stdtridf` recovers the original value for `df` given + the CDF value and `x`. + + >>> stdtridf(cdf_value, x) + 5.0 + """) + +add_newdoc("stdtrit", + """ + stdtrit(df, p, out=None) + + The `p`-th quantile of the student t distribution. + + This function is the inverse of the student t distribution cumulative + distribution function (CDF), returning `t` such that `stdtr(df, t) = p`. + + Returns the argument `t` such that stdtr(df, t) is equal to `p`. + + Parameters + ---------- + df : array_like + Degrees of freedom + p : array_like + Probability + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + t : scalar or ndarray + Value of `t` such that ``stdtr(df, t) == p`` + + See Also + -------- + stdtr : Student t CDF + stdtridf : inverse of stdtr with respect to `df` + scipy.stats.t : Student t distribution + + Notes + ----- + The student t distribution is also available as `scipy.stats.t`. Calling + `stdtrit` directly can improve performance compared to the ``ppf`` + method of `scipy.stats.t` (see last example below). + + Examples + -------- + `stdtrit` represents the inverse of the student t distribution CDF which + is available as `stdtr`. Here, we calculate the CDF for ``df`` at + ``x=1``. `stdtrit` then returns ``1`` up to floating point errors + given the same value for `df` and the computed CDF value. + + >>> import numpy as np + >>> from scipy.special import stdtr, stdtrit + >>> import matplotlib.pyplot as plt + >>> df = 3 + >>> x = 1 + >>> cdf_value = stdtr(df, x) + >>> stdtrit(df, cdf_value) + 0.9999999994418539 + + Plot the function for three different degrees of freedom. + + >>> x = np.linspace(0, 1, 1000) + >>> parameters = [(1, "solid"), (2, "dashed"), (5, "dotted")] + >>> fig, ax = plt.subplots() + >>> for (df, linestyle) in parameters: + ... ax.plot(x, stdtrit(df, x), ls=linestyle, label=f"$df={df}$") + >>> ax.legend() + >>> ax.set_ylim(-10, 10) + >>> ax.set_title("Student t distribution quantile function") + >>> plt.show() + + The function can be computed for several degrees of freedom at the same + time by providing a NumPy array or list for `df`: + + >>> stdtrit([1, 2, 3], 0.7) + array([0.72654253, 0.6172134 , 0.58438973]) + + It is possible to calculate the function at several points for several + different degrees of freedom simultaneously by providing arrays for `df` + and `p` with shapes compatible for broadcasting. Compute `stdtrit` at + 4 points for 3 degrees of freedom resulting in an array of shape 3x4. + + >>> dfs = np.array([[1], [2], [3]]) + >>> p = np.array([0.2, 0.4, 0.7, 0.8]) + >>> dfs.shape, p.shape + ((3, 1), (4,)) + + >>> stdtrit(dfs, p) + array([[-1.37638192, -0.3249197 , 0.72654253, 1.37638192], + [-1.06066017, -0.28867513, 0.6172134 , 1.06066017], + [-0.97847231, -0.27667066, 0.58438973, 0.97847231]]) + + The t distribution is also available as `scipy.stats.t`. Calling `stdtrit` + directly can be much faster than calling the ``ppf`` method of + `scipy.stats.t`. To get the same results, one must use the following + parametrization: ``scipy.stats.t(df).ppf(x) = stdtrit(df, x)``. + + >>> from scipy.stats import t + >>> df, x = 3, 0.5 + >>> stdtrit_result = stdtrit(df, x) # this can be faster than below + >>> stats_result = t(df).ppf(x) + >>> stats_result == stdtrit_result # test that results are equal + True + """) + +add_newdoc("struve", + r""" + struve(v, x, out=None) + + Struve function. + + Return the value of the Struve function of order `v` at `x`. The Struve + function is defined as, + + .. math:: + H_v(x) = (z/2)^{v + 1} \sum_{n=0}^\infty + \frac{(-1)^n (z/2)^{2n}}{\Gamma(n + \frac{3}{2}) \Gamma(n + v + \frac{3}{2})}, + + where :math:`\Gamma` is the gamma function. + + Parameters + ---------- + v : array_like + Order of the Struve function (float). + x : array_like + Argument of the Struve function (float; must be positive unless `v` is + an integer). + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + H : scalar or ndarray + Value of the Struve function of order `v` at `x`. + + See Also + -------- + modstruve: Modified Struve function + + Notes + ----- + Three methods discussed in [1]_ are used to evaluate the Struve function: + + - power series + - expansion in Bessel functions (if :math:`|z| < |v| + 20`) + - asymptotic large-z expansion (if :math:`z \geq 0.7v + 12`) + + Rounding errors are estimated based on the largest terms in the sums, and + the result associated with the smallest error is returned. + + References + ---------- + .. [1] NIST Digital Library of Mathematical Functions + https://dlmf.nist.gov/11 + + Examples + -------- + Calculate the Struve function of order 1 at 2. + + >>> import numpy as np + >>> from scipy.special import struve + >>> import matplotlib.pyplot as plt + >>> struve(1, 2.) + 0.6467637282835622 + + Calculate the Struve function at 2 for orders 1, 2 and 3 by providing + a list for the order parameter `v`. + + >>> struve([1, 2, 3], 2.) + array([0.64676373, 0.28031806, 0.08363767]) + + Calculate the Struve function of order 1 for several points by providing + an array for `x`. + + >>> points = np.array([2., 5., 8.]) + >>> struve(1, points) + array([0.64676373, 0.80781195, 0.48811605]) + + Compute the Struve function for several orders at several points by + providing arrays for `v` and `z`. The arrays have to be broadcastable + to the correct shapes. + + >>> orders = np.array([[1], [2], [3]]) + >>> points.shape, orders.shape + ((3,), (3, 1)) + + >>> struve(orders, points) + array([[0.64676373, 0.80781195, 0.48811605], + [0.28031806, 1.56937455, 1.51769363], + [0.08363767, 1.50872065, 2.98697513]]) + + Plot the Struve functions of order 0 to 3 from -10 to 10. + + >>> fig, ax = plt.subplots() + >>> x = np.linspace(-10., 10., 1000) + >>> for i in range(4): + ... ax.plot(x, struve(i, x), label=f'$H_{i!r}$') + >>> ax.legend(ncol=2) + >>> ax.set_xlim(-10, 10) + >>> ax.set_title(r"Struve functions $H_{\nu}$") + >>> plt.show() + """) + +add_newdoc("tandg", + """ + tandg(x, out=None) + + Tangent of angle `x` given in degrees. + + Parameters + ---------- + x : array_like + Angle, given in degrees. + out : ndarray, optional + Optional output array for the function results. + + Returns + ------- + scalar or ndarray + Tangent at the input. + + See Also + -------- + sindg, cosdg, cotdg + + Examples + -------- + >>> import numpy as np + >>> import scipy.special as sc + + It is more accurate than using tangent directly. + + >>> x = 180 * np.arange(3) + >>> sc.tandg(x) + array([0., 0., 0.]) + >>> np.tan(x * np.pi / 180) + array([ 0.0000000e+00, -1.2246468e-16, -2.4492936e-16]) + + """) + +add_newdoc( + "tklmbda", + r""" + tklmbda(x, lmbda, out=None) + + Cumulative distribution function of the Tukey lambda distribution. + + Parameters + ---------- + x, lmbda : array_like + Parameters + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + cdf : scalar or ndarray + Value of the Tukey lambda CDF + + See Also + -------- + scipy.stats.tukeylambda : Tukey lambda distribution + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.special import tklmbda, expit + + Compute the cumulative distribution function (CDF) of the Tukey lambda + distribution at several ``x`` values for `lmbda` = -1.5. + + >>> x = np.linspace(-2, 2, 9) + >>> x + array([-2. , -1.5, -1. , -0.5, 0. , 0.5, 1. , 1.5, 2. ]) + >>> tklmbda(x, -1.5) + array([0.34688734, 0.3786554 , 0.41528805, 0.45629737, 0.5 , + 0.54370263, 0.58471195, 0.6213446 , 0.65311266]) + + When `lmbda` is 0, the function is the logistic sigmoid function, + which is implemented in `scipy.special` as `expit`. + + >>> tklmbda(x, 0) + array([0.11920292, 0.18242552, 0.26894142, 0.37754067, 0.5 , + 0.62245933, 0.73105858, 0.81757448, 0.88079708]) + >>> expit(x) + array([0.11920292, 0.18242552, 0.26894142, 0.37754067, 0.5 , + 0.62245933, 0.73105858, 0.81757448, 0.88079708]) + + When `lmbda` is 1, the Tukey lambda distribution is uniform on the + interval [-1, 1], so the CDF increases linearly. + + >>> t = np.linspace(-1, 1, 9) + >>> tklmbda(t, 1) + array([0. , 0.125, 0.25 , 0.375, 0.5 , 0.625, 0.75 , 0.875, 1. ]) + + In the following, we generate plots for several values of `lmbda`. + + The first figure shows graphs for `lmbda` <= 0. + + >>> styles = ['-', '-.', '--', ':'] + >>> fig, ax = plt.subplots() + >>> x = np.linspace(-12, 12, 500) + >>> for k, lmbda in enumerate([-1.0, -0.5, 0.0]): + ... y = tklmbda(x, lmbda) + ... ax.plot(x, y, styles[k], label=rf'$\lambda$ = {lmbda:-4.1f}') + + >>> ax.set_title(r'tklmbda(x, $\lambda$)') + >>> ax.set_label('x') + >>> ax.legend(framealpha=1, shadow=True) + >>> ax.grid(True) + + The second figure shows graphs for `lmbda` > 0. The dots in the + graphs show the bounds of the support of the distribution. + + >>> fig, ax = plt.subplots() + >>> x = np.linspace(-4.2, 4.2, 500) + >>> lmbdas = [0.25, 0.5, 1.0, 1.5] + >>> for k, lmbda in enumerate(lmbdas): + ... y = tklmbda(x, lmbda) + ... ax.plot(x, y, styles[k], label=fr'$\lambda$ = {lmbda}') + + >>> ax.set_prop_cycle(None) + >>> for lmbda in lmbdas: + ... ax.plot([-1/lmbda, 1/lmbda], [0, 1], '.', ms=8) + + >>> ax.set_title(r'tklmbda(x, $\lambda$)') + >>> ax.set_xlabel('x') + >>> ax.legend(framealpha=1, shadow=True) + >>> ax.grid(True) + + >>> plt.tight_layout() + >>> plt.show() + + The CDF of the Tukey lambda distribution is also implemented as the + ``cdf`` method of `scipy.stats.tukeylambda`. In the following, + ``tukeylambda.cdf(x, -0.5)`` and ``tklmbda(x, -0.5)`` compute the + same values: + + >>> from scipy.stats import tukeylambda + >>> x = np.linspace(-2, 2, 9) + + >>> tukeylambda.cdf(x, -0.5) + array([0.21995157, 0.27093858, 0.33541677, 0.41328161, 0.5 , + 0.58671839, 0.66458323, 0.72906142, 0.78004843]) + + >>> tklmbda(x, -0.5) + array([0.21995157, 0.27093858, 0.33541677, 0.41328161, 0.5 , + 0.58671839, 0.66458323, 0.72906142, 0.78004843]) + + The implementation in ``tukeylambda`` also provides location and scale + parameters, and other methods such as ``pdf()`` (the probability + density function) and ``ppf()`` (the inverse of the CDF), so for + working with the Tukey lambda distribution, ``tukeylambda`` is more + generally useful. The primary advantage of ``tklmbda`` is that it is + significantly faster than ``tukeylambda.cdf``. + """) + +add_newdoc("wofz", + """ + wofz(z, out=None) + + Faddeeva function + + Returns the value of the Faddeeva function for complex argument:: + + exp(-z**2) * erfc(-i*z) + + Parameters + ---------- + z : array_like + complex argument + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + Value of the Faddeeva function + + See Also + -------- + dawsn, erf, erfc, erfcx, erfi + + References + ---------- + .. [1] Steven G. Johnson, Faddeeva W function implementation. + http://ab-initio.mit.edu/Faddeeva + + Examples + -------- + >>> import numpy as np + >>> from scipy import special + >>> import matplotlib.pyplot as plt + + >>> x = np.linspace(-3, 3) + >>> z = special.wofz(x) + + >>> plt.plot(x, z.real, label='wofz(x).real') + >>> plt.plot(x, z.imag, label='wofz(x).imag') + >>> plt.xlabel('$x$') + >>> plt.legend(framealpha=1, shadow=True) + >>> plt.grid(alpha=0.25) + >>> plt.show() + + """) + +add_newdoc("xlogy", + """ + xlogy(x, y, out=None) + + Compute ``x*log(y)`` so that the result is 0 if ``x = 0``. + + Parameters + ---------- + x : array_like + Multiplier + y : array_like + Argument + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + z : scalar or ndarray + Computed x*log(y) + + Notes + ----- + The log function used in the computation is the natural log. + + .. versionadded:: 0.13.0 + + Examples + -------- + We can use this function to calculate the binary logistic loss also + known as the binary cross entropy. This loss function is used for + binary classification problems and is defined as: + + .. math:: + L = 1/n * \\sum_{i=0}^n -(y_i*log(y\\_pred_i) + (1-y_i)*log(1-y\\_pred_i)) + + We can define the parameters `x` and `y` as y and y_pred respectively. + y is the array of the actual labels which over here can be either 0 or 1. + y_pred is the array of the predicted probabilities with respect to + the positive class (1). + + >>> import numpy as np + >>> from scipy.special import xlogy + >>> y = np.array([0, 1, 0, 1, 1, 0]) + >>> y_pred = np.array([0.3, 0.8, 0.4, 0.7, 0.9, 0.2]) + >>> n = len(y) + >>> loss = -(xlogy(y, y_pred) + xlogy(1 - y, 1 - y_pred)).sum() + >>> loss /= n + >>> loss + 0.29597052165495025 + + A lower loss is usually better as it indicates that the predictions are + similar to the actual labels. In this example since our predicted + probabilities are close to the actual labels, we get an overall loss + that is reasonably low and appropriate. + + """) + +add_newdoc("xlog1py", + """ + xlog1py(x, y, out=None) + + Compute ``x*log1p(y)`` so that the result is 0 if ``x = 0``. + + Parameters + ---------- + x : array_like + Multiplier + y : array_like + Argument + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + z : scalar or ndarray + Computed x*log1p(y) + + Notes + ----- + + .. versionadded:: 0.13.0 + + Examples + -------- + This example shows how the function can be used to calculate the log of + the probability mass function for a geometric discrete random variable. + The probability mass function of the geometric distribution is defined + as follows: + + .. math:: f(k) = (1-p)^{k-1} p + + where :math:`p` is the probability of a single success + and :math:`1-p` is the probability of a single failure + and :math:`k` is the number of trials to get the first success. + + >>> import numpy as np + >>> from scipy.special import xlog1py + >>> p = 0.5 + >>> k = 100 + >>> _pmf = np.power(1 - p, k - 1) * p + >>> _pmf + 7.888609052210118e-31 + + If we take k as a relatively large number the value of the probability + mass function can become very low. In such cases taking the log of the + pmf would be more suitable as the log function can change the values + to a scale that is more appropriate to work with. + + >>> _log_pmf = xlog1py(k - 1, -p) + np.log(p) + >>> _log_pmf + -69.31471805599453 + + We can confirm that we get a value close to the original pmf value by + taking the exponential of the log pmf. + + >>> _orig_pmf = np.exp(_log_pmf) + >>> np.isclose(_pmf, _orig_pmf) + True + + """) + +add_newdoc("y0", + r""" + y0(x, out=None) + + Bessel function of the second kind of order 0. + + Parameters + ---------- + x : array_like + Argument (float). + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + Y : scalar or ndarray + Value of the Bessel function of the second kind of order 0 at `x`. + + See Also + -------- + j0: Bessel function of the first kind of order 0 + yv: Bessel function of the first kind + + Notes + ----- + The domain is divided into the intervals [0, 5] and (5, infinity). In the + first interval a rational approximation :math:`R(x)` is employed to + compute, + + .. math:: + + Y_0(x) = R(x) + \frac{2 \log(x) J_0(x)}{\pi}, + + where :math:`J_0` is the Bessel function of the first kind of order 0. + + In the second interval, the Hankel asymptotic expansion is employed with + two rational functions of degree 6/6 and 7/7. + + This function is a wrapper for the Cephes [1]_ routine `y0`. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + Examples + -------- + Calculate the function at one point: + + >>> from scipy.special import y0 + >>> y0(1.) + 0.08825696421567697 + + Calculate at several points: + + >>> import numpy as np + >>> y0(np.array([0.5, 2., 3.])) + array([-0.44451873, 0.51037567, 0.37685001]) + + Plot the function from 0 to 10. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> x = np.linspace(0., 10., 1000) + >>> y = y0(x) + >>> ax.plot(x, y) + >>> plt.show() + + """) + +add_newdoc("y1", + """ + y1(x, out=None) + + Bessel function of the second kind of order 1. + + Parameters + ---------- + x : array_like + Argument (float). + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + Y : scalar or ndarray + Value of the Bessel function of the second kind of order 1 at `x`. + + See Also + -------- + j1: Bessel function of the first kind of order 1 + yn: Bessel function of the second kind + yv: Bessel function of the second kind + + Notes + ----- + The domain is divided into the intervals [0, 8] and (8, infinity). In the + first interval a 25 term Chebyshev expansion is used, and computing + :math:`J_1` (the Bessel function of the first kind) is required. In the + second, the asymptotic trigonometric representation is employed using two + rational functions of degree 5/5. + + This function is a wrapper for the Cephes [1]_ routine `y1`. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + Examples + -------- + Calculate the function at one point: + + >>> from scipy.special import y1 + >>> y1(1.) + -0.7812128213002888 + + Calculate at several points: + + >>> import numpy as np + >>> y1(np.array([0.5, 2., 3.])) + array([-1.47147239, -0.10703243, 0.32467442]) + + Plot the function from 0 to 10. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> x = np.linspace(0., 10., 1000) + >>> y = y1(x) + >>> ax.plot(x, y) + >>> plt.show() + + """) + +add_newdoc("yn", + r""" + yn(n, x, out=None) + + Bessel function of the second kind of integer order and real argument. + + Parameters + ---------- + n : array_like + Order (integer). + x : array_like + Argument (float). + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + Y : scalar or ndarray + Value of the Bessel function, :math:`Y_n(x)`. + + See Also + -------- + yv : For real order and real or complex argument. + y0: faster implementation of this function for order 0 + y1: faster implementation of this function for order 1 + + Notes + ----- + Wrapper for the Cephes [1]_ routine `yn`. + + The function is evaluated by forward recurrence on `n`, starting with + values computed by the Cephes routines `y0` and `y1`. If `n = 0` or 1, + the routine for `y0` or `y1` is called directly. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + Examples + -------- + Evaluate the function of order 0 at one point. + + >>> from scipy.special import yn + >>> yn(0, 1.) + 0.08825696421567697 + + Evaluate the function at one point for different orders. + + >>> yn(0, 1.), yn(1, 1.), yn(2, 1.) + (0.08825696421567697, -0.7812128213002888, -1.6506826068162546) + + The evaluation for different orders can be carried out in one call by + providing a list or NumPy array as argument for the `v` parameter: + + >>> yn([0, 1, 2], 1.) + array([ 0.08825696, -0.78121282, -1.65068261]) + + Evaluate the function at several points for order 0 by providing an + array for `z`. + + >>> import numpy as np + >>> points = np.array([0.5, 3., 8.]) + >>> yn(0, points) + array([-0.44451873, 0.37685001, 0.22352149]) + + If `z` is an array, the order parameter `v` must be broadcastable to + the correct shape if different orders shall be computed in one call. + To calculate the orders 0 and 1 for an 1D array: + + >>> orders = np.array([[0], [1]]) + >>> orders.shape + (2, 1) + + >>> yn(orders, points) + array([[-0.44451873, 0.37685001, 0.22352149], + [-1.47147239, 0.32467442, -0.15806046]]) + + Plot the functions of order 0 to 3 from 0 to 10. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> x = np.linspace(0., 10., 1000) + >>> for i in range(4): + ... ax.plot(x, yn(i, x), label=f'$Y_{i!r}$') + >>> ax.set_ylim(-3, 1) + >>> ax.legend() + >>> plt.show() + """) + +add_newdoc("yv", + r""" + yv(v, z, out=None) + + Bessel function of the second kind of real order and complex argument. + + Parameters + ---------- + v : array_like + Order (float). + z : array_like + Argument (float or complex). + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + Y : scalar or ndarray + Value of the Bessel function of the second kind, :math:`Y_v(x)`. + + See Also + -------- + yve : :math:`Y_v` with leading exponential behavior stripped off. + y0: faster implementation of this function for order 0 + y1: faster implementation of this function for order 1 + + Notes + ----- + For positive `v` values, the computation is carried out using the + AMOS [1]_ `zbesy` routine, which exploits the connection to the Hankel + Bessel functions :math:`H_v^{(1)}` and :math:`H_v^{(2)}`, + + .. math:: Y_v(z) = \frac{1}{2\imath} (H_v^{(1)} - H_v^{(2)}). + + For negative `v` values the formula, + + .. math:: Y_{-v}(z) = Y_v(z) \cos(\pi v) + J_v(z) \sin(\pi v) + + is used, where :math:`J_v(z)` is the Bessel function of the first kind, + computed using the AMOS routine `zbesj`. Note that the second term is + exactly zero for integer `v`; to improve accuracy the second term is + explicitly omitted for `v` values such that `v = floor(v)`. + + References + ---------- + .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions + of a Complex Argument and Nonnegative Order", + http://netlib.org/amos/ + + Examples + -------- + Evaluate the function of order 0 at one point. + + >>> from scipy.special import yv + >>> yv(0, 1.) + 0.088256964215677 + + Evaluate the function at one point for different orders. + + >>> yv(0, 1.), yv(1, 1.), yv(1.5, 1.) + (0.088256964215677, -0.7812128213002889, -1.102495575160179) + + The evaluation for different orders can be carried out in one call by + providing a list or NumPy array as argument for the `v` parameter: + + >>> yv([0, 1, 1.5], 1.) + array([ 0.08825696, -0.78121282, -1.10249558]) + + Evaluate the function at several points for order 0 by providing an + array for `z`. + + >>> import numpy as np + >>> points = np.array([0.5, 3., 8.]) + >>> yv(0, points) + array([-0.44451873, 0.37685001, 0.22352149]) + + If `z` is an array, the order parameter `v` must be broadcastable to + the correct shape if different orders shall be computed in one call. + To calculate the orders 0 and 1 for an 1D array: + + >>> orders = np.array([[0], [1]]) + >>> orders.shape + (2, 1) + + >>> yv(orders, points) + array([[-0.44451873, 0.37685001, 0.22352149], + [-1.47147239, 0.32467442, -0.15806046]]) + + Plot the functions of order 0 to 3 from 0 to 10. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> x = np.linspace(0., 10., 1000) + >>> for i in range(4): + ... ax.plot(x, yv(i, x), label=f'$Y_{i!r}$') + >>> ax.set_ylim(-3, 1) + >>> ax.legend() + >>> plt.show() + + """) + +add_newdoc("yve", + r""" + yve(v, z, out=None) + + Exponentially scaled Bessel function of the second kind of real order. + + Returns the exponentially scaled Bessel function of the second + kind of real order `v` at complex `z`:: + + yve(v, z) = yv(v, z) * exp(-abs(z.imag)) + + Parameters + ---------- + v : array_like + Order (float). + z : array_like + Argument (float or complex). + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + Y : scalar or ndarray + Value of the exponentially scaled Bessel function. + + See Also + -------- + yv: Unscaled Bessel function of the second kind of real order. + + Notes + ----- + For positive `v` values, the computation is carried out using the + AMOS [1]_ `zbesy` routine, which exploits the connection to the Hankel + Bessel functions :math:`H_v^{(1)}` and :math:`H_v^{(2)}`, + + .. math:: Y_v(z) = \frac{1}{2\imath} (H_v^{(1)} - H_v^{(2)}). + + For negative `v` values the formula, + + .. math:: Y_{-v}(z) = Y_v(z) \cos(\pi v) + J_v(z) \sin(\pi v) + + is used, where :math:`J_v(z)` is the Bessel function of the first kind, + computed using the AMOS routine `zbesj`. Note that the second term is + exactly zero for integer `v`; to improve accuracy the second term is + explicitly omitted for `v` values such that `v = floor(v)`. + + Exponentially scaled Bessel functions are useful for large `z`: + for these, the unscaled Bessel functions can easily under-or overflow. + + References + ---------- + .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions + of a Complex Argument and Nonnegative Order", + http://netlib.org/amos/ + + Examples + -------- + Compare the output of `yv` and `yve` for large complex arguments for `z` + by computing their values for order ``v=1`` at ``z=1000j``. We see that + `yv` returns nan but `yve` returns a finite number: + + >>> import numpy as np + >>> from scipy.special import yv, yve + >>> v = 1 + >>> z = 1000j + >>> yv(v, z), yve(v, z) + ((nan+nanj), (-0.012610930256928629+7.721967686709076e-19j)) + + For real arguments for `z`, `yve` returns the same as `yv` up to + floating point errors. + + >>> v, z = 1, 1000 + >>> yv(v, z), yve(v, z) + (-0.02478433129235178, -0.02478433129235179) + + The function can be evaluated for several orders at the same time by + providing a list or NumPy array for `v`: + + >>> yve([1, 2, 3], 1j) + array([-0.20791042+0.14096627j, 0.38053618-0.04993878j, + 0.00815531-1.66311097j]) + + In the same way, the function can be evaluated at several points in one + call by providing a list or NumPy array for `z`: + + >>> yve(1, np.array([1j, 2j, 3j])) + array([-0.20791042+0.14096627j, -0.21526929+0.01205044j, + -0.19682671+0.00127278j]) + + It is also possible to evaluate several orders at several points + at the same time by providing arrays for `v` and `z` with + broadcasting compatible shapes. Compute `yve` for two different orders + `v` and three points `z` resulting in a 2x3 array. + + >>> v = np.array([[1], [2]]) + >>> z = np.array([3j, 4j, 5j]) + >>> v.shape, z.shape + ((2, 1), (3,)) + + >>> yve(v, z) + array([[-1.96826713e-01+1.27277544e-03j, -1.78750840e-01+1.45558819e-04j, + -1.63972267e-01+1.73494110e-05j], + [1.94960056e-03-1.11782545e-01j, 2.02902325e-04-1.17626501e-01j, + 2.27727687e-05-1.17951906e-01j]]) + """) + +add_newdoc("zetac", + """ + zetac(x, out=None) + + Riemann zeta function minus 1. + + This function is defined as + + .. math:: \\zeta(x) = \\sum_{k=2}^{\\infty} 1 / k^x, + + where ``x > 1``. For ``x < 1`` the analytic continuation is + computed. For more information on the Riemann zeta function, see + [dlmf]_. + + Parameters + ---------- + x : array_like of float + Values at which to compute zeta(x) - 1 (must be real). + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + Values of zeta(x) - 1. + + See Also + -------- + zeta + + References + ---------- + .. [dlmf] NIST Digital Library of Mathematical Functions + https://dlmf.nist.gov/25 + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import zetac, zeta + + Some special values: + + >>> zetac(2), np.pi**2/6 - 1 + (0.64493406684822641, 0.6449340668482264) + + >>> zetac(-1), -1.0/12 - 1 + (-1.0833333333333333, -1.0833333333333333) + + Compare ``zetac(x)`` to ``zeta(x) - 1`` for large `x`: + + >>> zetac(60), zeta(60) - 1 + (8.673617380119933e-19, 0.0) + """) + +add_newdoc("_riemann_zeta", + """ + Internal function, use `zeta` instead. + """) + +add_newdoc("_struve_asymp_large_z", + """ + _struve_asymp_large_z(v, z, is_h) + + Internal function for testing `struve` & `modstruve` + + Evaluates using asymptotic expansion + + Returns + ------- + v, err + """) + +add_newdoc("_struve_power_series", + """ + _struve_power_series(v, z, is_h) + + Internal function for testing `struve` & `modstruve` + + Evaluates using power series + + Returns + ------- + v, err + """) + +add_newdoc("_struve_bessel_series", + """ + _struve_bessel_series(v, z, is_h) + + Internal function for testing `struve` & `modstruve` + + Evaluates using Bessel function series + + Returns + ------- + v, err + """) + +add_newdoc("_spherical_jn", + """ + Internal function, use `spherical_jn` instead. + """) + +add_newdoc("_spherical_jn_d", + """ + Internal function, use `spherical_jn` instead. + """) + +add_newdoc("_spherical_yn", + """ + Internal function, use `spherical_yn` instead. + """) + +add_newdoc("_spherical_yn_d", + """ + Internal function, use `spherical_yn` instead. + """) + +add_newdoc("_spherical_in", + """ + Internal function, use `spherical_in` instead. + """) + +add_newdoc("_spherical_in_d", + """ + Internal function, use `spherical_in` instead. + """) + +add_newdoc("_spherical_kn", + """ + Internal function, use `spherical_kn` instead. + """) + +add_newdoc("_spherical_kn_d", + """ + Internal function, use `spherical_kn` instead. + """) + +add_newdoc("owens_t", + """ + owens_t(h, a, out=None) + + Owen's T Function. + + The function T(h, a) gives the probability of the event + (X > h and 0 < Y < a * X) where X and Y are independent + standard normal random variables. + + Parameters + ---------- + h: array_like + Input value. + a: array_like + Input value. + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + t: scalar or ndarray + Probability of the event (X > h and 0 < Y < a * X), + where X and Y are independent standard normal random variables. + + References + ---------- + .. [1] M. Patefield and D. Tandy, "Fast and accurate calculation of + Owen's T Function", Statistical Software vol. 5, pp. 1-25, 2000. + + Examples + -------- + >>> from scipy import special + >>> a = 3.5 + >>> h = 0.78 + >>> special.owens_t(h, a) + 0.10877216734852274 + """) + +add_newdoc("_factorial", + """ + Internal function, do not use. + """) + +add_newdoc("ndtri_exp", + r""" + ndtri_exp(y, out=None) + + Inverse of `log_ndtr` vs x. Allows for greater precision than + `ndtri` composed with `numpy.exp` for very small values of y and for + y close to 0. + + Parameters + ---------- + y : array_like of float + Function argument + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + Inverse of the log CDF of the standard normal distribution, evaluated + at y. + + See Also + -------- + log_ndtr : log of the standard normal cumulative distribution function + ndtr : standard normal cumulative distribution function + ndtri : standard normal percentile function + + Examples + -------- + >>> import numpy as np + >>> import scipy.special as sc + + `ndtri_exp` agrees with the naive implementation when the latter does + not suffer from underflow. + + >>> sc.ndtri_exp(-1) + -0.33747496376420244 + >>> sc.ndtri(np.exp(-1)) + -0.33747496376420244 + + For extreme values of y, the naive approach fails + + >>> sc.ndtri(np.exp(-800)) + -inf + >>> sc.ndtri(np.exp(-1e-20)) + inf + + whereas `ndtri_exp` is still able to compute the result to high precision. + + >>> sc.ndtri_exp(-800) + -39.88469483825668 + >>> sc.ndtri_exp(-1e-20) + 9.262340089798409 + """) + + +add_newdoc("_stirling2_inexact", + r""" + Internal function, do not use. + """) + +add_newdoc( + "_beta_pdf", + r""" + _beta_pdf(x, a, b) + + Probability density function of beta distribution. + + Parameters + ---------- + x : array_like + Real-valued such that :math:`0 \leq x \leq 1`, + the upper limit of integration + a, b : array_like + Positive, real-valued parameters + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_beta_ppf", + r""" + _beta_ppf(x, a, b) + + Percent point function of beta distribution. + + Parameters + ---------- + x : array_like + Real-valued such that :math:`0 \leq x \leq 1`, + the upper limit of integration + a, b : array_like + Positive, real-valued parameters + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_invgauss_ppf", + """ + _invgauss_ppf(x, mu) + + Percent point function of inverse gaussian distribution. + + Parameters + ---------- + x : array_like + Positive real-valued + mu : array_like + Positive, real-valued parameters + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_invgauss_isf", + """ + _invgauss_isf(x, mu, s) + + Inverse survival function of inverse gaussian distribution. + + Parameters + ---------- + x : array_like + Positive real-valued + mu : array_like + Positive, real-valued parameters + s : array_like + Positive, real-valued parameters + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_ncx2_pdf", + """ + _ncx2_pdf(x, k, l) + + Probability density function of Non-central chi-squared distribution. + + Parameters + ---------- + x : array_like + Positive real-valued + k, l : array_like + Positive, real-valued parameters + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_ncx2_cdf", + """ + _ncx2_cdf(x, k, l) + + Cumulative density function of Non-central chi-squared distribution. + + Parameters + ---------- + x : array_like + Positive real-valued + k, l : array_like + Positive, real-valued parameters + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_ncx2_ppf", + """ + _ncx2_ppf(x, k, l) + + Percent point function of Non-central chi-squared distribution. + + Parameters + ---------- + x : array_like + Positive real-valued + k, l : array_like + Positive, real-valued parameters + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_ncx2_sf", + """ + _ncx2_sf(x, k, l) + + Survival function of Non-central chi-squared distribution. + + Parameters + ---------- + x : array_like + Positive real-valued + k, l : array_like + Positive, real-valued parameters + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_ncx2_isf", + """ + _ncx2_isf(x, k, l) + + Inverse survival function of Non-central chi-squared distribution. + + Parameters + ---------- + x : array_like + Positive real-valued + k, l : array_like + Positive, real-valued parameters + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_ncf_pdf", + """ + _ncf_pdf(x, v1, v2, l) + + Probability density function of noncentral F-distribution. + + Parameters + ---------- + x : array_like + Positive real-valued + v1, v2, l : array_like + Positive, real-valued parameters + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_ncf_cdf", + """ + _ncf_cdf(x, v1, v2, l) + + Cumulative density function of noncentral F-distribution. + + Parameters + ---------- + x : array_like + Positive real-valued + v1, v2, l : array_like + Positive, real-valued parameters + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_ncf_ppf", + """ + _ncf_ppf(x, v1, v2, l) + + Percent point function of noncentral F-distribution. + + Parameters + ---------- + x : array_like + Positive real-valued + v1, v2, l : array_like + Positive, real-valued parameters + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_ncf_sf", + """ + _ncf_sf(x, v1, v2, l) + + Survival function of noncentral F-distribution. + + Parameters + ---------- + x : array_like + Positive real-valued + v1, v2, l : array_like + Positive, real-valued parameters + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_ncf_isf", + """ + _ncf_isf(x, v1, v2, l) + + Inverse surivial function of noncentral F-distribution. + + Parameters + ---------- + x : array_like + Positive real-valued + v1, v2, l : array_like + Positive, real-valued parameters + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_ncf_mean", + """ + _ncf_mean(v1, v2, l) + + Mean of noncentral F-distribution. + + Parameters + ---------- + v1, v2, l : array_like + Positive, real-valued parameters + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_ncf_variance", + """ + _ncf_variance(v1, v2, l) + + Variance of noncentral F-distribution. + + Parameters + ---------- + v1, v2, l : array_like + Positive, real-valued parameters + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_ncf_skewness", + """ + _ncf_skewness(v1, v2, l) + + Skewness of noncentral F-distribution. + + Parameters + ---------- + v1, v2, l : array_like + Positive, real-valued parameters + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_ncf_kurtosis_excess", + """ + _ncf_kurtosis_excess(v1, v2, l) + + Kurtosis excess of noncentral F-distribution. + + Parameters + ---------- + v1, v2, l : array_like + Positive, real-valued parameters + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_nct_cdf", + """ + _nct_cdf(x, v, l) + + Cumulative density function of noncentral t-distribution. + + Parameters + ---------- + x : array_like + Real-valued + v : array_like + Positive, real-valued parameters + l : array_like + Real-valued parameters + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_nct_ppf", + """ + _nct_ppf(x, v, l) + + Percent point function of noncentral t-distribution. + + Parameters + ---------- + x : array_like + Real-valued + v : array_like + Positive, real-valued parameters + l : array_like + Real-valued parameters + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_nct_sf", + """ + _nct_sf(x, v, l) + + Survival function of noncentral t-distribution. + + Parameters + ---------- + x : array_like + Real-valued + v : array_like + Positive, real-valued parameters + l : array_like + Real-valued parameters + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_nct_isf", + """ + _nct_isf(x, v, l) + + Inverse surivial function of noncentral t-distribution. + + Parameters + ---------- + x : array_like + Real-valued + v : array_like + Positive, real-valued parameters + l : array_like + Real-valued parameters + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_nct_mean", + """ + _nct_mean(v, l) + + Mean of noncentral t-distribution. + + Parameters + ---------- + v : array_like + Positive, real-valued parameters + l : array_like + Real-valued parameters + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_nct_variance", + """ + _nct_variance(v, l) + + Variance of noncentral t-distribution. + + Parameters + ---------- + v : array_like + Positive, real-valued parameters + l : array_like + Real-valued parameters + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_nct_skewness", + """ + _nct_skewness(v, l) + + Skewness of noncentral t-distribution. + + Parameters + ---------- + v : array_like + Positive, real-valued parameters + l : array_like + Real-valued parameters + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_nct_kurtosis_excess", + """ + _nct_kurtosis_excess(v, l) + + Kurtosis excess of noncentral t-distribution. + + Parameters + ---------- + v : array_like + Positive, real-valued parameters + l : array_like + Real-valued parameters + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_skewnorm_cdf", + """ + _skewnorm_cdf(x, l, sc, sh) + + Cumulative density function of skewnorm distribution. + + Parameters + ---------- + x : array_like + Real-valued + l : array_like + Real-valued parameters + sc : array_like + Positive, Real-valued parameters + sh : array_like + Real-valued parameters + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_skewnorm_ppf", + """ + _skewnorm_ppf(x, l, sc, sh) + + Percent point function of skewnorm distribution. + + Parameters + ---------- + x : array_like + Real-valued + l : array_like + Real-valued parameters + sc : array_like + Positive, Real-valued parameters + sh : array_like + Real-valued parameters + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_skewnorm_isf", + """ + _skewnorm_isf(x, l, sc, sh) + + Inverse surivial function of skewnorm distribution. + + Parameters + ---------- + x : array_like + Real-valued + l : array_like + Real-valued parameters + sc : array_like + Positive, Real-valued parameters + sh : array_like + Real-valued parameters + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_binom_pmf", + """ + _binom_pmf(x, n, p) + + Probability mass function of binomial distribution. + + Parameters + ---------- + x : array_like + Real-valued + n : array_like + Positive, integer-valued parameter + p : array_like + Positive, real-valued parameter + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_binom_cdf", + """ + _binom_cdf(x, n, p) + + Cumulative density function of binomial distribution. + + Parameters + ---------- + x : array_like + Real-valued + n : array_like + Positive, integer-valued parameter + p : array_like + Positive, real-valued parameter + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_binom_ppf", + """ + _binom_ppf(x, n, p) + + Percent point function of binomial distribution. + + Parameters + ---------- + x : array_like + Real-valued + n : array_like + Positive, integer-valued parameter + p : array_like + Positive, real-valued parameter + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_binom_sf", + """ + _binom_sf(x, n, p) + + Survival function of binomial distribution. + + Parameters + ---------- + x : array_like + Real-valued + n : array_like + Positive, integer-valued parameter + p : array_like + Positive, real-valued parameter + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_binom_isf", + """ + _binom_isf(x, n, p) + + Inverse survival function of binomial distribution. + + Parameters + ---------- + x : array_like + Real-valued + n : array_like + Positive, integer-valued parameter + p : array_like + Positive, real-valued parameter + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_nbinom_pmf", + """ + _nbinom_pmf(x, r, p) + + Probability mass function of negative binomial distribution. + + Parameters + ---------- + x : array_like + Real-valued + r : array_like + Positive, integer-valued parameter + p : array_like + Positive, real-valued parameter + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_nbinom_cdf", + """ + _nbinom_cdf(x, r, p) + + Cumulative density function of negative binomial distribution. + + Parameters + ---------- + x : array_like + Real-valued + r : array_like + Positive, integer-valued parameter + p : array_like + Positive, real-valued parameter + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_nbinom_ppf", + """ + _nbinom_ppf(x, r, p) + + Percent point function of negative binomial distribution. + + Parameters + ---------- + x : array_like + Real-valued + r : array_like + Positive, integer-valued parameter + p : array_like + Positive, real-valued parameter + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_nbinom_sf", + """ + _nbinom_sf(x, r, p) + + Survival function of negative binomial distribution. + + Parameters + ---------- + x : array_like + Real-valued + r : array_like + Positive, integer-valued parameter + p : array_like + Positive, real-valued parameter + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_nbinom_isf", + """ + _nbinom_isf(x, r, p) + + Inverse survival function of negative binomial distribution. + + Parameters + ---------- + x : array_like + Real-valued + r : array_like + Positive, integer-valued parameter + p : array_like + Positive, real-valued parameter + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_nbinom_mean", + """ + _nbinom_mean(r, p) + + Mean of negative binomial distribution. + + Parameters + ---------- + r : array_like + Positive, integer-valued parameter + p : array_like + Positive, real-valued parameter + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_nbinom_variance", + """ + _nbinom_variance(r, p) + + Variance of negative binomial distribution. + + Parameters + ---------- + r : array_like + Positive, integer-valued parameter + p : array_like + Positive, real-valued parameter + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_nbinom_skewness", + """ + _nbinom_skewness(r, p) + + Skewness of negative binomial distribution. + + Parameters + ---------- + r : array_like + Positive, integer-valued parameter + p : array_like + Positive, real-valued parameter + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_nbinom_kurtosis_excess", + """ + _nbinom_kurtosis_excess(r, p) + + Kurtosis excess of negative binomial distribution. + + Parameters + ---------- + r : array_like + Positive, integer-valued parameter + p : array_like + Positive, real-valued parameter + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_hypergeom_pmf", + """ + _hypergeom_pmf(x, r, N, M) + + Probability mass function of hypergeometric distribution. + + Parameters + ---------- + x : array_like + Real-valued + r, N, M : array_like + Positive, integer-valued parameter + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_hypergeom_cdf", + """ + _hypergeom_cdf(x, r, N, M) + + Cumulative density function of hypergeometric distribution. + + Parameters + ---------- + x : array_like + Real-valued + r, N, M : array_like + Positive, integer-valued parameter + + Returns + ------- + scalar or ndarray + """) + +add_newdoc( + "_hypergeom_sf", + """ + _hypergeom_sf(x, r, N, M) + + Survival function of hypergeometric distribution. + + Parameters + ---------- + x : array_like + Real-valued + r, N, M : array_like + Positive, integer-valued parameter + + Returns + ------- + scalar or ndarray + """) + +add_newdoc( + "_hypergeom_mean", + """ + _hypergeom_mean(r, N, M) + + Mean of hypergeometric distribution. + + Parameters + ---------- + r, N, M : array_like + Positive, integer-valued parameter + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_hypergeom_variance", + """ + _hypergeom_variance(r, N, M) + + Mean of hypergeometric distribution. + + Parameters + ---------- + r, N, M : array_like + Positive, integer-valued parameter + + Returns + ------- + scalar or ndarray + + """) + +add_newdoc( + "_hypergeom_skewness", + """ + _hypergeom_skewness(r, N, M) + + Skewness of hypergeometric distribution. + + Parameters + ---------- + r, N, M : array_like + Positive, integer-valued parameter + + Returns + ------- + scalar or ndarray + + """) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_basic.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_basic.py new file mode 100644 index 0000000000000000000000000000000000000000..d39649f4d259658a4ae5d45047ab4205103f0516 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_basic.py @@ -0,0 +1,3451 @@ +# +# Author: Travis Oliphant, 2002 +# + +import operator +import numpy as np +import math +import warnings +from collections import defaultdict +from heapq import heapify, heappop +from numpy import (pi, asarray, floor, isscalar, sqrt, where, + sin, place, issubdtype, extract, inexact, nan, zeros, sinc) +from . import _ufuncs +from ._ufuncs import (mathieu_a, mathieu_b, iv, jv, gamma, + psi, hankel1, hankel2, yv, kv, poch, binom, + _stirling2_inexact) +from ._gufuncs import (_lpn, _lpmn, _clpmn, _lqn, _lqmn, _rctj, _rcty, + _sph_harm_all as _sph_harm_all_gufunc) +from . import _specfun +from ._comb import _comb_int + + +__all__ = [ + 'ai_zeros', + 'assoc_laguerre', + 'bei_zeros', + 'beip_zeros', + 'ber_zeros', + 'bernoulli', + 'berp_zeros', + 'bi_zeros', + 'clpmn', + 'comb', + 'digamma', + 'diric', + 'erf_zeros', + 'euler', + 'factorial', + 'factorial2', + 'factorialk', + 'fresnel_zeros', + 'fresnelc_zeros', + 'fresnels_zeros', + 'h1vp', + 'h2vp', + 'ivp', + 'jn_zeros', + 'jnjnp_zeros', + 'jnp_zeros', + 'jnyn_zeros', + 'jvp', + 'kei_zeros', + 'keip_zeros', + 'kelvin_zeros', + 'ker_zeros', + 'kerp_zeros', + 'kvp', + 'lmbda', + 'lpmn', + 'lpn', + 'lqmn', + 'lqn', + 'mathieu_even_coef', + 'mathieu_odd_coef', + 'obl_cv_seq', + 'pbdn_seq', + 'pbdv_seq', + 'pbvv_seq', + 'perm', + 'polygamma', + 'pro_cv_seq', + 'riccati_jn', + 'riccati_yn', + 'sinc', + 'stirling2', + 'y0_zeros', + 'y1_zeros', + 'y1p_zeros', + 'yn_zeros', + 'ynp_zeros', + 'yvp', + 'zeta' +] + + +# mapping k to last n such that factorialk(n, k) < np.iinfo(np.int64).max +_FACTORIALK_LIMITS_64BITS = {1: 20, 2: 33, 3: 44, 4: 54, 5: 65, + 6: 74, 7: 84, 8: 93, 9: 101} +# mapping k to last n such that factorialk(n, k) < np.iinfo(np.int32).max +_FACTORIALK_LIMITS_32BITS = {1: 12, 2: 19, 3: 25, 4: 31, 5: 37, + 6: 43, 7: 47, 8: 51, 9: 56} + + +def _nonneg_int_or_fail(n, var_name, strict=True): + try: + if strict: + # Raises an exception if float + n = operator.index(n) + elif n == floor(n): + n = int(n) + else: + raise ValueError() + if n < 0: + raise ValueError() + except (ValueError, TypeError) as err: + raise err.__class__(f"{var_name} must be a non-negative integer") from err + return n + + +def diric(x, n): + """Periodic sinc function, also called the Dirichlet function. + + The Dirichlet function is defined as:: + + diric(x, n) = sin(x * n/2) / (n * sin(x / 2)), + + where `n` is a positive integer. + + Parameters + ---------- + x : array_like + Input data + n : int + Integer defining the periodicity. + + Returns + ------- + diric : ndarray + + Examples + -------- + >>> import numpy as np + >>> from scipy import special + >>> import matplotlib.pyplot as plt + + >>> x = np.linspace(-8*np.pi, 8*np.pi, num=201) + >>> plt.figure(figsize=(8, 8)); + >>> for idx, n in enumerate([2, 3, 4, 9]): + ... plt.subplot(2, 2, idx+1) + ... plt.plot(x, special.diric(x, n)) + ... plt.title('diric, n={}'.format(n)) + >>> plt.show() + + The following example demonstrates that `diric` gives the magnitudes + (modulo the sign and scaling) of the Fourier coefficients of a + rectangular pulse. + + Suppress output of values that are effectively 0: + + >>> np.set_printoptions(suppress=True) + + Create a signal `x` of length `m` with `k` ones: + + >>> m = 8 + >>> k = 3 + >>> x = np.zeros(m) + >>> x[:k] = 1 + + Use the FFT to compute the Fourier transform of `x`, and + inspect the magnitudes of the coefficients: + + >>> np.abs(np.fft.fft(x)) + array([ 3. , 2.41421356, 1. , 0.41421356, 1. , + 0.41421356, 1. , 2.41421356]) + + Now find the same values (up to sign) using `diric`. We multiply + by `k` to account for the different scaling conventions of + `numpy.fft.fft` and `diric`: + + >>> theta = np.linspace(0, 2*np.pi, m, endpoint=False) + >>> k * special.diric(theta, k) + array([ 3. , 2.41421356, 1. , -0.41421356, -1. , + -0.41421356, 1. , 2.41421356]) + """ + x, n = asarray(x), asarray(n) + n = asarray(n + (x-x)) + x = asarray(x + (n-n)) + if issubdtype(x.dtype, inexact): + ytype = x.dtype + else: + ytype = float + y = zeros(x.shape, ytype) + + # empirical minval for 32, 64 or 128 bit float computations + # where sin(x/2) < minval, result is fixed at +1 or -1 + if np.finfo(ytype).eps < 1e-18: + minval = 1e-11 + elif np.finfo(ytype).eps < 1e-15: + minval = 1e-7 + else: + minval = 1e-3 + + mask1 = (n <= 0) | (n != floor(n)) + place(y, mask1, nan) + + x = x / 2 + denom = sin(x) + mask2 = (1-mask1) & (abs(denom) < minval) + xsub = extract(mask2, x) + nsub = extract(mask2, n) + zsub = xsub / pi + place(y, mask2, pow(-1, np.round(zsub)*(nsub-1))) + + mask = (1-mask1) & (1-mask2) + xsub = extract(mask, x) + nsub = extract(mask, n) + dsub = extract(mask, denom) + place(y, mask, sin(nsub*xsub)/(nsub*dsub)) + return y + + +def jnjnp_zeros(nt): + """Compute zeros of integer-order Bessel functions Jn and Jn'. + + Results are arranged in order of the magnitudes of the zeros. + + Parameters + ---------- + nt : int + Number (<=1200) of zeros to compute + + Returns + ------- + zo[l-1] : ndarray + Value of the lth zero of Jn(x) and Jn'(x). Of length `nt`. + n[l-1] : ndarray + Order of the Jn(x) or Jn'(x) associated with lth zero. Of length `nt`. + m[l-1] : ndarray + Serial number of the zeros of Jn(x) or Jn'(x) associated + with lth zero. Of length `nt`. + t[l-1] : ndarray + 0 if lth zero in zo is zero of Jn(x), 1 if it is a zero of Jn'(x). Of + length `nt`. + + See Also + -------- + jn_zeros, jnp_zeros : to get separated arrays of zeros. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 5. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + """ + if not isscalar(nt) or (floor(nt) != nt) or (nt > 1200): + raise ValueError("Number must be integer <= 1200.") + nt = int(nt) + n, m, t, zo = _specfun.jdzo(nt) + return zo[1:nt+1], n[:nt], m[:nt], t[:nt] + + +def jnyn_zeros(n, nt): + """Compute nt zeros of Bessel functions Jn(x), Jn'(x), Yn(x), and Yn'(x). + + Returns 4 arrays of length `nt`, corresponding to the first `nt` + zeros of Jn(x), Jn'(x), Yn(x), and Yn'(x), respectively. The zeros + are returned in ascending order. + + Parameters + ---------- + n : int + Order of the Bessel functions + nt : int + Number (<=1200) of zeros to compute + + Returns + ------- + Jn : ndarray + First `nt` zeros of Jn + Jnp : ndarray + First `nt` zeros of Jn' + Yn : ndarray + First `nt` zeros of Yn + Ynp : ndarray + First `nt` zeros of Yn' + + See Also + -------- + jn_zeros, jnp_zeros, yn_zeros, ynp_zeros + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 5. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + Examples + -------- + Compute the first three roots of :math:`J_1`, :math:`J_1'`, + :math:`Y_1` and :math:`Y_1'`. + + >>> from scipy.special import jnyn_zeros + >>> jn_roots, jnp_roots, yn_roots, ynp_roots = jnyn_zeros(1, 3) + >>> jn_roots, yn_roots + (array([ 3.83170597, 7.01558667, 10.17346814]), + array([2.19714133, 5.42968104, 8.59600587])) + + Plot :math:`J_1`, :math:`J_1'`, :math:`Y_1`, :math:`Y_1'` and their roots. + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.special import jnyn_zeros, jvp, jn, yvp, yn + >>> jn_roots, jnp_roots, yn_roots, ynp_roots = jnyn_zeros(1, 3) + >>> fig, ax = plt.subplots() + >>> xmax= 11 + >>> x = np.linspace(0, xmax) + >>> x[0] += 1e-15 + >>> ax.plot(x, jn(1, x), label=r"$J_1$", c='r') + >>> ax.plot(x, jvp(1, x, 1), label=r"$J_1'$", c='b') + >>> ax.plot(x, yn(1, x), label=r"$Y_1$", c='y') + >>> ax.plot(x, yvp(1, x, 1), label=r"$Y_1'$", c='c') + >>> zeros = np.zeros((3, )) + >>> ax.scatter(jn_roots, zeros, s=30, c='r', zorder=5, + ... label=r"$J_1$ roots") + >>> ax.scatter(jnp_roots, zeros, s=30, c='b', zorder=5, + ... label=r"$J_1'$ roots") + >>> ax.scatter(yn_roots, zeros, s=30, c='y', zorder=5, + ... label=r"$Y_1$ roots") + >>> ax.scatter(ynp_roots, zeros, s=30, c='c', zorder=5, + ... label=r"$Y_1'$ roots") + >>> ax.hlines(0, 0, xmax, color='k') + >>> ax.set_ylim(-0.6, 0.6) + >>> ax.set_xlim(0, xmax) + >>> ax.legend(ncol=2, bbox_to_anchor=(1., 0.75)) + >>> plt.tight_layout() + >>> plt.show() + """ + if not (isscalar(nt) and isscalar(n)): + raise ValueError("Arguments must be scalars.") + if (floor(n) != n) or (floor(nt) != nt): + raise ValueError("Arguments must be integers.") + if (nt <= 0): + raise ValueError("nt > 0") + return _specfun.jyzo(abs(n), nt) + + +def jn_zeros(n, nt): + r"""Compute zeros of integer-order Bessel functions Jn. + + Compute `nt` zeros of the Bessel functions :math:`J_n(x)` on the + interval :math:`(0, \infty)`. The zeros are returned in ascending + order. Note that this interval excludes the zero at :math:`x = 0` + that exists for :math:`n > 0`. + + Parameters + ---------- + n : int + Order of Bessel function + nt : int + Number of zeros to return + + Returns + ------- + ndarray + First `nt` zeros of the Bessel function. + + See Also + -------- + jv: Real-order Bessel functions of the first kind + jnp_zeros: Zeros of :math:`Jn'` + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 5. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + Examples + -------- + Compute the first four positive roots of :math:`J_3`. + + >>> from scipy.special import jn_zeros + >>> jn_zeros(3, 4) + array([ 6.3801619 , 9.76102313, 13.01520072, 16.22346616]) + + Plot :math:`J_3` and its first four positive roots. Note + that the root located at 0 is not returned by `jn_zeros`. + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.special import jn, jn_zeros + >>> j3_roots = jn_zeros(3, 4) + >>> xmax = 18 + >>> xmin = -1 + >>> x = np.linspace(xmin, xmax, 500) + >>> fig, ax = plt.subplots() + >>> ax.plot(x, jn(3, x), label=r'$J_3$') + >>> ax.scatter(j3_roots, np.zeros((4, )), s=30, c='r', + ... label=r"$J_3$_Zeros", zorder=5) + >>> ax.scatter(0, 0, s=30, c='k', + ... label=r"Root at 0", zorder=5) + >>> ax.hlines(0, 0, xmax, color='k') + >>> ax.set_xlim(xmin, xmax) + >>> plt.legend() + >>> plt.show() + """ + return jnyn_zeros(n, nt)[0] + + +def jnp_zeros(n, nt): + r"""Compute zeros of integer-order Bessel function derivatives Jn'. + + Compute `nt` zeros of the functions :math:`J_n'(x)` on the + interval :math:`(0, \infty)`. The zeros are returned in ascending + order. Note that this interval excludes the zero at :math:`x = 0` + that exists for :math:`n > 1`. + + Parameters + ---------- + n : int + Order of Bessel function + nt : int + Number of zeros to return + + Returns + ------- + ndarray + First `nt` zeros of the Bessel function. + + See Also + -------- + jvp: Derivatives of integer-order Bessel functions of the first kind + jv: Float-order Bessel functions of the first kind + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 5. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + Examples + -------- + Compute the first four roots of :math:`J_2'`. + + >>> from scipy.special import jnp_zeros + >>> jnp_zeros(2, 4) + array([ 3.05423693, 6.70613319, 9.96946782, 13.17037086]) + + As `jnp_zeros` yields the roots of :math:`J_n'`, it can be used to + compute the locations of the peaks of :math:`J_n`. Plot + :math:`J_2`, :math:`J_2'` and the locations of the roots of :math:`J_2'`. + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.special import jn, jnp_zeros, jvp + >>> j2_roots = jnp_zeros(2, 4) + >>> xmax = 15 + >>> x = np.linspace(0, xmax, 500) + >>> fig, ax = plt.subplots() + >>> ax.plot(x, jn(2, x), label=r'$J_2$') + >>> ax.plot(x, jvp(2, x, 1), label=r"$J_2'$") + >>> ax.hlines(0, 0, xmax, color='k') + >>> ax.scatter(j2_roots, np.zeros((4, )), s=30, c='r', + ... label=r"Roots of $J_2'$", zorder=5) + >>> ax.set_ylim(-0.4, 0.8) + >>> ax.set_xlim(0, xmax) + >>> plt.legend() + >>> plt.show() + """ + return jnyn_zeros(n, nt)[1] + + +def yn_zeros(n, nt): + r"""Compute zeros of integer-order Bessel function Yn(x). + + Compute `nt` zeros of the functions :math:`Y_n(x)` on the interval + :math:`(0, \infty)`. The zeros are returned in ascending order. + + Parameters + ---------- + n : int + Order of Bessel function + nt : int + Number of zeros to return + + Returns + ------- + ndarray + First `nt` zeros of the Bessel function. + + See Also + -------- + yn: Bessel function of the second kind for integer order + yv: Bessel function of the second kind for real order + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 5. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + Examples + -------- + Compute the first four roots of :math:`Y_2`. + + >>> from scipy.special import yn_zeros + >>> yn_zeros(2, 4) + array([ 3.38424177, 6.79380751, 10.02347798, 13.20998671]) + + Plot :math:`Y_2` and its first four roots. + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.special import yn, yn_zeros + >>> xmin = 2 + >>> xmax = 15 + >>> x = np.linspace(xmin, xmax, 500) + >>> fig, ax = plt.subplots() + >>> ax.hlines(0, xmin, xmax, color='k') + >>> ax.plot(x, yn(2, x), label=r'$Y_2$') + >>> ax.scatter(yn_zeros(2, 4), np.zeros((4, )), s=30, c='r', + ... label='Roots', zorder=5) + >>> ax.set_ylim(-0.4, 0.4) + >>> ax.set_xlim(xmin, xmax) + >>> plt.legend() + >>> plt.show() + """ + return jnyn_zeros(n, nt)[2] + + +def ynp_zeros(n, nt): + r"""Compute zeros of integer-order Bessel function derivatives Yn'(x). + + Compute `nt` zeros of the functions :math:`Y_n'(x)` on the + interval :math:`(0, \infty)`. The zeros are returned in ascending + order. + + Parameters + ---------- + n : int + Order of Bessel function + nt : int + Number of zeros to return + + Returns + ------- + ndarray + First `nt` zeros of the Bessel derivative function. + + + See Also + -------- + yvp + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 5. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + Examples + -------- + Compute the first four roots of the first derivative of the + Bessel function of second kind for order 0 :math:`Y_0'`. + + >>> from scipy.special import ynp_zeros + >>> ynp_zeros(0, 4) + array([ 2.19714133, 5.42968104, 8.59600587, 11.74915483]) + + Plot :math:`Y_0`, :math:`Y_0'` and confirm visually that the roots of + :math:`Y_0'` are located at local extrema of :math:`Y_0`. + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.special import yn, ynp_zeros, yvp + >>> zeros = ynp_zeros(0, 4) + >>> xmax = 13 + >>> x = np.linspace(0, xmax, 500) + >>> fig, ax = plt.subplots() + >>> ax.plot(x, yn(0, x), label=r'$Y_0$') + >>> ax.plot(x, yvp(0, x, 1), label=r"$Y_0'$") + >>> ax.scatter(zeros, np.zeros((4, )), s=30, c='r', + ... label=r"Roots of $Y_0'$", zorder=5) + >>> for root in zeros: + ... y0_extremum = yn(0, root) + ... lower = min(0, y0_extremum) + ... upper = max(0, y0_extremum) + ... ax.vlines(root, lower, upper, color='r') + >>> ax.hlines(0, 0, xmax, color='k') + >>> ax.set_ylim(-0.6, 0.6) + >>> ax.set_xlim(0, xmax) + >>> plt.legend() + >>> plt.show() + """ + return jnyn_zeros(n, nt)[3] + + +def y0_zeros(nt, complex=False): + """Compute nt zeros of Bessel function Y0(z), and derivative at each zero. + + The derivatives are given by Y0'(z0) = -Y1(z0) at each zero z0. + + Parameters + ---------- + nt : int + Number of zeros to return + complex : bool, default False + Set to False to return only the real zeros; set to True to return only + the complex zeros with negative real part and positive imaginary part. + Note that the complex conjugates of the latter are also zeros of the + function, but are not returned by this routine. + + Returns + ------- + z0n : ndarray + Location of nth zero of Y0(z) + y0pz0n : ndarray + Value of derivative Y0'(z0) for nth zero + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 5. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + Examples + -------- + Compute the first 4 real roots and the derivatives at the roots of + :math:`Y_0`: + + >>> import numpy as np + >>> from scipy.special import y0_zeros + >>> zeros, grads = y0_zeros(4) + >>> with np.printoptions(precision=5): + ... print(f"Roots: {zeros}") + ... print(f"Gradients: {grads}") + Roots: [ 0.89358+0.j 3.95768+0.j 7.08605+0.j 10.22235+0.j] + Gradients: [-0.87942+0.j 0.40254+0.j -0.3001 +0.j 0.2497 +0.j] + + Plot the real part of :math:`Y_0` and the first four computed roots. + + >>> import matplotlib.pyplot as plt + >>> from scipy.special import y0 + >>> xmin = 0 + >>> xmax = 11 + >>> x = np.linspace(xmin, xmax, 500) + >>> fig, ax = plt.subplots() + >>> ax.hlines(0, xmin, xmax, color='k') + >>> ax.plot(x, y0(x), label=r'$Y_0$') + >>> zeros, grads = y0_zeros(4) + >>> ax.scatter(zeros.real, np.zeros((4, )), s=30, c='r', + ... label=r'$Y_0$_zeros', zorder=5) + >>> ax.set_ylim(-0.5, 0.6) + >>> ax.set_xlim(xmin, xmax) + >>> plt.legend(ncol=2) + >>> plt.show() + + Compute the first 4 complex roots and the derivatives at the roots of + :math:`Y_0` by setting ``complex=True``: + + >>> y0_zeros(4, True) + (array([ -2.40301663+0.53988231j, -5.5198767 +0.54718001j, + -8.6536724 +0.54841207j, -11.79151203+0.54881912j]), + array([ 0.10074769-0.88196771j, -0.02924642+0.5871695j , + 0.01490806-0.46945875j, -0.00937368+0.40230454j])) + """ + if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): + raise ValueError("Arguments must be scalar positive integer.") + kf = 0 + kc = not complex + return _specfun.cyzo(nt, kf, kc) + + +def y1_zeros(nt, complex=False): + """Compute nt zeros of Bessel function Y1(z), and derivative at each zero. + + The derivatives are given by Y1'(z1) = Y0(z1) at each zero z1. + + Parameters + ---------- + nt : int + Number of zeros to return + complex : bool, default False + Set to False to return only the real zeros; set to True to return only + the complex zeros with negative real part and positive imaginary part. + Note that the complex conjugates of the latter are also zeros of the + function, but are not returned by this routine. + + Returns + ------- + z1n : ndarray + Location of nth zero of Y1(z) + y1pz1n : ndarray + Value of derivative Y1'(z1) for nth zero + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 5. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + Examples + -------- + Compute the first 4 real roots and the derivatives at the roots of + :math:`Y_1`: + + >>> import numpy as np + >>> from scipy.special import y1_zeros + >>> zeros, grads = y1_zeros(4) + >>> with np.printoptions(precision=5): + ... print(f"Roots: {zeros}") + ... print(f"Gradients: {grads}") + Roots: [ 2.19714+0.j 5.42968+0.j 8.59601+0.j 11.74915+0.j] + Gradients: [ 0.52079+0.j -0.34032+0.j 0.27146+0.j -0.23246+0.j] + + Extract the real parts: + + >>> realzeros = zeros.real + >>> realzeros + array([ 2.19714133, 5.42968104, 8.59600587, 11.74915483]) + + Plot :math:`Y_1` and the first four computed roots. + + >>> import matplotlib.pyplot as plt + >>> from scipy.special import y1 + >>> xmin = 0 + >>> xmax = 13 + >>> x = np.linspace(xmin, xmax, 500) + >>> zeros, grads = y1_zeros(4) + >>> fig, ax = plt.subplots() + >>> ax.hlines(0, xmin, xmax, color='k') + >>> ax.plot(x, y1(x), label=r'$Y_1$') + >>> ax.scatter(zeros.real, np.zeros((4, )), s=30, c='r', + ... label=r'$Y_1$_zeros', zorder=5) + >>> ax.set_ylim(-0.5, 0.5) + >>> ax.set_xlim(xmin, xmax) + >>> plt.legend() + >>> plt.show() + + Compute the first 4 complex roots and the derivatives at the roots of + :math:`Y_1` by setting ``complex=True``: + + >>> y1_zeros(4, True) + (array([ -0.50274327+0.78624371j, -3.83353519+0.56235654j, + -7.01590368+0.55339305j, -10.17357383+0.55127339j]), + array([-0.45952768+1.31710194j, 0.04830191-0.69251288j, + -0.02012695+0.51864253j, 0.011614 -0.43203296j])) + """ + if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): + raise ValueError("Arguments must be scalar positive integer.") + kf = 1 + kc = not complex + return _specfun.cyzo(nt, kf, kc) + + +def y1p_zeros(nt, complex=False): + """Compute nt zeros of Bessel derivative Y1'(z), and value at each zero. + + The values are given by Y1(z1) at each z1 where Y1'(z1)=0. + + Parameters + ---------- + nt : int + Number of zeros to return + complex : bool, default False + Set to False to return only the real zeros; set to True to return only + the complex zeros with negative real part and positive imaginary part. + Note that the complex conjugates of the latter are also zeros of the + function, but are not returned by this routine. + + Returns + ------- + z1pn : ndarray + Location of nth zero of Y1'(z) + y1z1pn : ndarray + Value of derivative Y1(z1) for nth zero + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 5. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + Examples + -------- + Compute the first four roots of :math:`Y_1'` and the values of + :math:`Y_1` at these roots. + + >>> import numpy as np + >>> from scipy.special import y1p_zeros + >>> y1grad_roots, y1_values = y1p_zeros(4) + >>> with np.printoptions(precision=5): + ... print(f"Y1' Roots: {y1grad_roots.real}") + ... print(f"Y1 values: {y1_values.real}") + Y1' Roots: [ 3.68302 6.9415 10.1234 13.28576] + Y1 values: [ 0.41673 -0.30317 0.25091 -0.21897] + + `y1p_zeros` can be used to calculate the extremal points of :math:`Y_1` + directly. Here we plot :math:`Y_1` and the first four extrema. + + >>> import matplotlib.pyplot as plt + >>> from scipy.special import y1, yvp + >>> y1_roots, y1_values_at_roots = y1p_zeros(4) + >>> real_roots = y1_roots.real + >>> xmax = 15 + >>> x = np.linspace(0, xmax, 500) + >>> x[0] += 1e-15 + >>> fig, ax = plt.subplots() + >>> ax.plot(x, y1(x), label=r'$Y_1$') + >>> ax.plot(x, yvp(1, x, 1), label=r"$Y_1'$") + >>> ax.scatter(real_roots, np.zeros((4, )), s=30, c='r', + ... label=r"Roots of $Y_1'$", zorder=5) + >>> ax.scatter(real_roots, y1_values_at_roots.real, s=30, c='k', + ... label=r"Extrema of $Y_1$", zorder=5) + >>> ax.hlines(0, 0, xmax, color='k') + >>> ax.set_ylim(-0.5, 0.5) + >>> ax.set_xlim(0, xmax) + >>> ax.legend(ncol=2, bbox_to_anchor=(1., 0.75)) + >>> plt.tight_layout() + >>> plt.show() + """ + if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): + raise ValueError("Arguments must be scalar positive integer.") + kf = 2 + kc = not complex + return _specfun.cyzo(nt, kf, kc) + + +def _bessel_diff_formula(v, z, n, L, phase): + # from AMS55. + # L(v, z) = J(v, z), Y(v, z), H1(v, z), H2(v, z), phase = -1 + # L(v, z) = I(v, z) or exp(v*pi*i)K(v, z), phase = 1 + # For K, you can pull out the exp((v-k)*pi*i) into the caller + v = asarray(v) + p = 1.0 + s = L(v-n, z) + for i in range(1, n+1): + p = phase * (p * (n-i+1)) / i # = choose(k, i) + s += p*L(v-n + i*2, z) + return s / (2.**n) + + +def jvp(v, z, n=1): + """Compute derivatives of Bessel functions of the first kind. + + Compute the nth derivative of the Bessel function `Jv` with + respect to `z`. + + Parameters + ---------- + v : array_like or float + Order of Bessel function + z : complex + Argument at which to evaluate the derivative; can be real or + complex. + n : int, default 1 + Order of derivative. For 0 returns the Bessel function `jv` itself. + + Returns + ------- + scalar or ndarray + Values of the derivative of the Bessel function. + + Notes + ----- + The derivative is computed using the relation DLFM 10.6.7 [2]_. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 5. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + .. [2] NIST Digital Library of Mathematical Functions. + https://dlmf.nist.gov/10.6.E7 + + Examples + -------- + + Compute the Bessel function of the first kind of order 0 and + its first two derivatives at 1. + + >>> from scipy.special import jvp + >>> jvp(0, 1, 0), jvp(0, 1, 1), jvp(0, 1, 2) + (0.7651976865579666, -0.44005058574493355, -0.3251471008130331) + + Compute the first derivative of the Bessel function of the first + kind for several orders at 1 by providing an array for `v`. + + >>> jvp([0, 1, 2], 1, 1) + array([-0.44005059, 0.3251471 , 0.21024362]) + + Compute the first derivative of the Bessel function of the first + kind of order 0 at several points by providing an array for `z`. + + >>> import numpy as np + >>> points = np.array([0., 1.5, 3.]) + >>> jvp(0, points, 1) + array([-0. , -0.55793651, -0.33905896]) + + Plot the Bessel function of the first kind of order 1 and its + first three derivatives. + + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(-10, 10, 1000) + >>> fig, ax = plt.subplots() + >>> ax.plot(x, jvp(1, x, 0), label=r"$J_1$") + >>> ax.plot(x, jvp(1, x, 1), label=r"$J_1'$") + >>> ax.plot(x, jvp(1, x, 2), label=r"$J_1''$") + >>> ax.plot(x, jvp(1, x, 3), label=r"$J_1'''$") + >>> plt.legend() + >>> plt.show() + """ + n = _nonneg_int_or_fail(n, 'n') + if n == 0: + return jv(v, z) + else: + return _bessel_diff_formula(v, z, n, jv, -1) + + +def yvp(v, z, n=1): + """Compute derivatives of Bessel functions of the second kind. + + Compute the nth derivative of the Bessel function `Yv` with + respect to `z`. + + Parameters + ---------- + v : array_like of float + Order of Bessel function + z : complex + Argument at which to evaluate the derivative + n : int, default 1 + Order of derivative. For 0 returns the BEssel function `yv` + + Returns + ------- + scalar or ndarray + nth derivative of the Bessel function. + + See Also + -------- + yv : Bessel functions of the second kind + + Notes + ----- + The derivative is computed using the relation DLFM 10.6.7 [2]_. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 5. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + .. [2] NIST Digital Library of Mathematical Functions. + https://dlmf.nist.gov/10.6.E7 + + Examples + -------- + Compute the Bessel function of the second kind of order 0 and + its first two derivatives at 1. + + >>> from scipy.special import yvp + >>> yvp(0, 1, 0), yvp(0, 1, 1), yvp(0, 1, 2) + (0.088256964215677, 0.7812128213002889, -0.8694697855159659) + + Compute the first derivative of the Bessel function of the second + kind for several orders at 1 by providing an array for `v`. + + >>> yvp([0, 1, 2], 1, 1) + array([0.78121282, 0.86946979, 2.52015239]) + + Compute the first derivative of the Bessel function of the + second kind of order 0 at several points by providing an array for `z`. + + >>> import numpy as np + >>> points = np.array([0.5, 1.5, 3.]) + >>> yvp(0, points, 1) + array([ 1.47147239, 0.41230863, -0.32467442]) + + Plot the Bessel function of the second kind of order 1 and its + first three derivatives. + + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(0, 5, 1000) + >>> x[0] += 1e-15 + >>> fig, ax = plt.subplots() + >>> ax.plot(x, yvp(1, x, 0), label=r"$Y_1$") + >>> ax.plot(x, yvp(1, x, 1), label=r"$Y_1'$") + >>> ax.plot(x, yvp(1, x, 2), label=r"$Y_1''$") + >>> ax.plot(x, yvp(1, x, 3), label=r"$Y_1'''$") + >>> ax.set_ylim(-10, 10) + >>> plt.legend() + >>> plt.show() + """ + n = _nonneg_int_or_fail(n, 'n') + if n == 0: + return yv(v, z) + else: + return _bessel_diff_formula(v, z, n, yv, -1) + + +def kvp(v, z, n=1): + """Compute derivatives of real-order modified Bessel function Kv(z) + + Kv(z) is the modified Bessel function of the second kind. + Derivative is calculated with respect to `z`. + + Parameters + ---------- + v : array_like of float + Order of Bessel function + z : array_like of complex + Argument at which to evaluate the derivative + n : int, default 1 + Order of derivative. For 0 returns the Bessel function `kv` itself. + + Returns + ------- + out : ndarray + The results + + See Also + -------- + kv + + Notes + ----- + The derivative is computed using the relation DLFM 10.29.5 [2]_. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 6. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + .. [2] NIST Digital Library of Mathematical Functions. + https://dlmf.nist.gov/10.29.E5 + + Examples + -------- + Compute the modified bessel function of the second kind of order 0 and + its first two derivatives at 1. + + >>> from scipy.special import kvp + >>> kvp(0, 1, 0), kvp(0, 1, 1), kvp(0, 1, 2) + (0.42102443824070834, -0.6019072301972346, 1.0229316684379428) + + Compute the first derivative of the modified Bessel function of the second + kind for several orders at 1 by providing an array for `v`. + + >>> kvp([0, 1, 2], 1, 1) + array([-0.60190723, -1.02293167, -3.85158503]) + + Compute the first derivative of the modified Bessel function of the + second kind of order 0 at several points by providing an array for `z`. + + >>> import numpy as np + >>> points = np.array([0.5, 1.5, 3.]) + >>> kvp(0, points, 1) + array([-1.65644112, -0.2773878 , -0.04015643]) + + Plot the modified bessel function of the second kind and its + first three derivatives. + + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(0, 5, 1000) + >>> fig, ax = plt.subplots() + >>> ax.plot(x, kvp(1, x, 0), label=r"$K_1$") + >>> ax.plot(x, kvp(1, x, 1), label=r"$K_1'$") + >>> ax.plot(x, kvp(1, x, 2), label=r"$K_1''$") + >>> ax.plot(x, kvp(1, x, 3), label=r"$K_1'''$") + >>> ax.set_ylim(-2.5, 2.5) + >>> plt.legend() + >>> plt.show() + """ + n = _nonneg_int_or_fail(n, 'n') + if n == 0: + return kv(v, z) + else: + return (-1)**n * _bessel_diff_formula(v, z, n, kv, 1) + + +def ivp(v, z, n=1): + """Compute derivatives of modified Bessel functions of the first kind. + + Compute the nth derivative of the modified Bessel function `Iv` + with respect to `z`. + + Parameters + ---------- + v : array_like or float + Order of Bessel function + z : array_like + Argument at which to evaluate the derivative; can be real or + complex. + n : int, default 1 + Order of derivative. For 0, returns the Bessel function `iv` itself. + + Returns + ------- + scalar or ndarray + nth derivative of the modified Bessel function. + + See Also + -------- + iv + + Notes + ----- + The derivative is computed using the relation DLFM 10.29.5 [2]_. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 6. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + .. [2] NIST Digital Library of Mathematical Functions. + https://dlmf.nist.gov/10.29.E5 + + Examples + -------- + Compute the modified Bessel function of the first kind of order 0 and + its first two derivatives at 1. + + >>> from scipy.special import ivp + >>> ivp(0, 1, 0), ivp(0, 1, 1), ivp(0, 1, 2) + (1.2660658777520084, 0.565159103992485, 0.7009067737595233) + + Compute the first derivative of the modified Bessel function of the first + kind for several orders at 1 by providing an array for `v`. + + >>> ivp([0, 1, 2], 1, 1) + array([0.5651591 , 0.70090677, 0.29366376]) + + Compute the first derivative of the modified Bessel function of the + first kind of order 0 at several points by providing an array for `z`. + + >>> import numpy as np + >>> points = np.array([0., 1.5, 3.]) + >>> ivp(0, points, 1) + array([0. , 0.98166643, 3.95337022]) + + Plot the modified Bessel function of the first kind of order 1 and its + first three derivatives. + + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(-5, 5, 1000) + >>> fig, ax = plt.subplots() + >>> ax.plot(x, ivp(1, x, 0), label=r"$I_1$") + >>> ax.plot(x, ivp(1, x, 1), label=r"$I_1'$") + >>> ax.plot(x, ivp(1, x, 2), label=r"$I_1''$") + >>> ax.plot(x, ivp(1, x, 3), label=r"$I_1'''$") + >>> plt.legend() + >>> plt.show() + """ + n = _nonneg_int_or_fail(n, 'n') + if n == 0: + return iv(v, z) + else: + return _bessel_diff_formula(v, z, n, iv, 1) + + +def h1vp(v, z, n=1): + """Compute derivatives of Hankel function H1v(z) with respect to `z`. + + Parameters + ---------- + v : array_like + Order of Hankel function + z : array_like + Argument at which to evaluate the derivative. Can be real or + complex. + n : int, default 1 + Order of derivative. For 0 returns the Hankel function `h1v` itself. + + Returns + ------- + scalar or ndarray + Values of the derivative of the Hankel function. + + See Also + -------- + hankel1 + + Notes + ----- + The derivative is computed using the relation DLFM 10.6.7 [2]_. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 5. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + .. [2] NIST Digital Library of Mathematical Functions. + https://dlmf.nist.gov/10.6.E7 + + Examples + -------- + Compute the Hankel function of the first kind of order 0 and + its first two derivatives at 1. + + >>> from scipy.special import h1vp + >>> h1vp(0, 1, 0), h1vp(0, 1, 1), h1vp(0, 1, 2) + ((0.7651976865579664+0.088256964215677j), + (-0.44005058574493355+0.7812128213002889j), + (-0.3251471008130329-0.8694697855159659j)) + + Compute the first derivative of the Hankel function of the first kind + for several orders at 1 by providing an array for `v`. + + >>> h1vp([0, 1, 2], 1, 1) + array([-0.44005059+0.78121282j, 0.3251471 +0.86946979j, + 0.21024362+2.52015239j]) + + Compute the first derivative of the Hankel function of the first kind + of order 0 at several points by providing an array for `z`. + + >>> import numpy as np + >>> points = np.array([0.5, 1.5, 3.]) + >>> h1vp(0, points, 1) + array([-0.24226846+1.47147239j, -0.55793651+0.41230863j, + -0.33905896-0.32467442j]) + """ + n = _nonneg_int_or_fail(n, 'n') + if n == 0: + return hankel1(v, z) + else: + return _bessel_diff_formula(v, z, n, hankel1, -1) + + +def h2vp(v, z, n=1): + """Compute derivatives of Hankel function H2v(z) with respect to `z`. + + Parameters + ---------- + v : array_like + Order of Hankel function + z : array_like + Argument at which to evaluate the derivative. Can be real or + complex. + n : int, default 1 + Order of derivative. For 0 returns the Hankel function `h2v` itself. + + Returns + ------- + scalar or ndarray + Values of the derivative of the Hankel function. + + See Also + -------- + hankel2 + + Notes + ----- + The derivative is computed using the relation DLFM 10.6.7 [2]_. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 5. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + .. [2] NIST Digital Library of Mathematical Functions. + https://dlmf.nist.gov/10.6.E7 + + Examples + -------- + Compute the Hankel function of the second kind of order 0 and + its first two derivatives at 1. + + >>> from scipy.special import h2vp + >>> h2vp(0, 1, 0), h2vp(0, 1, 1), h2vp(0, 1, 2) + ((0.7651976865579664-0.088256964215677j), + (-0.44005058574493355-0.7812128213002889j), + (-0.3251471008130329+0.8694697855159659j)) + + Compute the first derivative of the Hankel function of the second kind + for several orders at 1 by providing an array for `v`. + + >>> h2vp([0, 1, 2], 1, 1) + array([-0.44005059-0.78121282j, 0.3251471 -0.86946979j, + 0.21024362-2.52015239j]) + + Compute the first derivative of the Hankel function of the second kind + of order 0 at several points by providing an array for `z`. + + >>> import numpy as np + >>> points = np.array([0.5, 1.5, 3.]) + >>> h2vp(0, points, 1) + array([-0.24226846-1.47147239j, -0.55793651-0.41230863j, + -0.33905896+0.32467442j]) + """ + n = _nonneg_int_or_fail(n, 'n') + if n == 0: + return hankel2(v, z) + else: + return _bessel_diff_formula(v, z, n, hankel2, -1) + + +def riccati_jn(n, x): + r"""Compute Ricatti-Bessel function of the first kind and its derivative. + + The Ricatti-Bessel function of the first kind is defined as :math:`x + j_n(x)`, where :math:`j_n` is the spherical Bessel function of the first + kind of order :math:`n`. + + This function computes the value and first derivative of the + Ricatti-Bessel function for all orders up to and including `n`. + + Parameters + ---------- + n : int + Maximum order of function to compute + x : float + Argument at which to evaluate + + Returns + ------- + jn : ndarray + Value of j0(x), ..., jn(x) + jnp : ndarray + First derivative j0'(x), ..., jn'(x) + + Notes + ----- + The computation is carried out via backward recurrence, using the + relation DLMF 10.51.1 [2]_. + + Wrapper for a Fortran routine created by Shanjie Zhang and Jianming + Jin [1]_. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + .. [2] NIST Digital Library of Mathematical Functions. + https://dlmf.nist.gov/10.51.E1 + + """ + if not (isscalar(n) and isscalar(x)): + raise ValueError("arguments must be scalars.") + n = _nonneg_int_or_fail(n, 'n', strict=False) + if (n == 0): + n1 = 1 + else: + n1 = n + + jn = np.empty((n1 + 1,), dtype = np.float64) + jnp = np.empty_like(jn) + + _rctj(x, out = (jn, jnp)) + return jn[:(n+1)], jnp[:(n+1)] + + +def riccati_yn(n, x): + """Compute Ricatti-Bessel function of the second kind and its derivative. + + The Ricatti-Bessel function of the second kind is defined as :math:`x + y_n(x)`, where :math:`y_n` is the spherical Bessel function of the second + kind of order :math:`n`. + + This function computes the value and first derivative of the function for + all orders up to and including `n`. + + Parameters + ---------- + n : int + Maximum order of function to compute + x : float + Argument at which to evaluate + + Returns + ------- + yn : ndarray + Value of y0(x), ..., yn(x) + ynp : ndarray + First derivative y0'(x), ..., yn'(x) + + Notes + ----- + The computation is carried out via ascending recurrence, using the + relation DLMF 10.51.1 [2]_. + + Wrapper for a Fortran routine created by Shanjie Zhang and Jianming + Jin [1]_. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + .. [2] NIST Digital Library of Mathematical Functions. + https://dlmf.nist.gov/10.51.E1 + + """ + if not (isscalar(n) and isscalar(x)): + raise ValueError("arguments must be scalars.") + n = _nonneg_int_or_fail(n, 'n', strict=False) + if (n == 0): + n1 = 1 + else: + n1 = n + + yn = np.empty((n1 + 1,), dtype = np.float64) + ynp = np.empty_like(yn) + _rcty(x, out = (yn, ynp)) + + return yn[:(n+1)], ynp[:(n+1)] + + +def erf_zeros(nt): + """Compute the first nt zero in the first quadrant, ordered by absolute value. + + Zeros in the other quadrants can be obtained by using the symmetries + erf(-z) = erf(z) and erf(conj(z)) = conj(erf(z)). + + + Parameters + ---------- + nt : int + The number of zeros to compute + + Returns + ------- + The locations of the zeros of erf : ndarray (complex) + Complex values at which zeros of erf(z) + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + Examples + -------- + >>> from scipy import special + >>> special.erf_zeros(1) + array([1.45061616+1.880943j]) + + Check that erf is (close to) zero for the value returned by erf_zeros + + >>> special.erf(special.erf_zeros(1)) + array([4.95159469e-14-1.16407394e-16j]) + + """ + if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt): + raise ValueError("Argument must be positive scalar integer.") + return _specfun.cerzo(nt) + + +def fresnelc_zeros(nt): + """Compute nt complex zeros of cosine Fresnel integral C(z). + + Parameters + ---------- + nt : int + Number of zeros to compute + + Returns + ------- + fresnelc_zeros: ndarray + Zeros of the cosine Fresnel integral + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + """ + if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt): + raise ValueError("Argument must be positive scalar integer.") + return _specfun.fcszo(1, nt) + + +def fresnels_zeros(nt): + """Compute nt complex zeros of sine Fresnel integral S(z). + + Parameters + ---------- + nt : int + Number of zeros to compute + + Returns + ------- + fresnels_zeros: ndarray + Zeros of the sine Fresnel integral + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + """ + if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt): + raise ValueError("Argument must be positive scalar integer.") + return _specfun.fcszo(2, nt) + + +def fresnel_zeros(nt): + """Compute nt complex zeros of sine and cosine Fresnel integrals S(z) and C(z). + + Parameters + ---------- + nt : int + Number of zeros to compute + + Returns + ------- + zeros_sine: ndarray + Zeros of the sine Fresnel integral + zeros_cosine : ndarray + Zeros of the cosine Fresnel integral + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + """ + if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt): + raise ValueError("Argument must be positive scalar integer.") + return _specfun.fcszo(2, nt), _specfun.fcszo(1, nt) + + +def assoc_laguerre(x, n, k=0.0): + """Compute the generalized (associated) Laguerre polynomial of degree n and order k. + + The polynomial :math:`L^{(k)}_n(x)` is orthogonal over ``[0, inf)``, + with weighting function ``exp(-x) * x**k`` with ``k > -1``. + + Parameters + ---------- + x : float or ndarray + Points where to evaluate the Laguerre polynomial + n : int + Degree of the Laguerre polynomial + k : int + Order of the Laguerre polynomial + + Returns + ------- + assoc_laguerre: float or ndarray + Associated laguerre polynomial values + + Notes + ----- + `assoc_laguerre` is a simple wrapper around `eval_genlaguerre`, with + reversed argument order ``(x, n, k=0.0) --> (n, k, x)``. + + """ + return _ufuncs.eval_genlaguerre(n, k, x) + + +digamma = psi + + +def polygamma(n, x): + r"""Polygamma functions. + + Defined as :math:`\psi^{(n)}(x)` where :math:`\psi` is the + `digamma` function. See [dlmf]_ for details. + + Parameters + ---------- + n : array_like + The order of the derivative of the digamma function; must be + integral + x : array_like + Real valued input + + Returns + ------- + ndarray + Function results + + See Also + -------- + digamma + + References + ---------- + .. [dlmf] NIST, Digital Library of Mathematical Functions, + https://dlmf.nist.gov/5.15 + + Examples + -------- + >>> from scipy import special + >>> x = [2, 3, 25.5] + >>> special.polygamma(1, x) + array([ 0.64493407, 0.39493407, 0.03999467]) + >>> special.polygamma(0, x) == special.psi(x) + array([ True, True, True], dtype=bool) + + """ + n, x = asarray(n), asarray(x) + fac2 = (-1.0)**(n+1) * gamma(n+1.0) * zeta(n+1, x) + return where(n == 0, psi(x), fac2) + + +def mathieu_even_coef(m, q): + r"""Fourier coefficients for even Mathieu and modified Mathieu functions. + + The Fourier series of the even solutions of the Mathieu differential + equation are of the form + + .. math:: \mathrm{ce}_{2n}(z, q) = \sum_{k=0}^{\infty} A_{(2n)}^{(2k)} \cos 2kz + + .. math:: \mathrm{ce}_{2n+1}(z, q) = + \sum_{k=0}^{\infty} A_{(2n+1)}^{(2k+1)} \cos (2k+1)z + + This function returns the coefficients :math:`A_{(2n)}^{(2k)}` for even + input m=2n, and the coefficients :math:`A_{(2n+1)}^{(2k+1)}` for odd input + m=2n+1. + + Parameters + ---------- + m : int + Order of Mathieu functions. Must be non-negative. + q : float (>=0) + Parameter of Mathieu functions. Must be non-negative. + + Returns + ------- + Ak : ndarray + Even or odd Fourier coefficients, corresponding to even or odd m. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + .. [2] NIST Digital Library of Mathematical Functions + https://dlmf.nist.gov/28.4#i + + """ + if not (isscalar(m) and isscalar(q)): + raise ValueError("m and q must be scalars.") + if (q < 0): + raise ValueError("q >=0") + if (m != floor(m)) or (m < 0): + raise ValueError("m must be an integer >=0.") + + if (q <= 1): + qm = 7.5 + 56.1*sqrt(q) - 134.7*q + 90.7*sqrt(q)*q + else: + qm = 17.0 + 3.1*sqrt(q) - .126*q + .0037*sqrt(q)*q + km = int(qm + 0.5*m) + if km > 251: + warnings.warn("Too many predicted coefficients.", RuntimeWarning, stacklevel=2) + kd = 1 + m = int(floor(m)) + if m % 2: + kd = 2 + + a = mathieu_a(m, q) + fc = _specfun.fcoef(kd, m, q, a) + return fc[:km] + + +def mathieu_odd_coef(m, q): + r"""Fourier coefficients for even Mathieu and modified Mathieu functions. + + The Fourier series of the odd solutions of the Mathieu differential + equation are of the form + + .. math:: \mathrm{se}_{2n+1}(z, q) = + \sum_{k=0}^{\infty} B_{(2n+1)}^{(2k+1)} \sin (2k+1)z + + .. math:: \mathrm{se}_{2n+2}(z, q) = + \sum_{k=0}^{\infty} B_{(2n+2)}^{(2k+2)} \sin (2k+2)z + + This function returns the coefficients :math:`B_{(2n+2)}^{(2k+2)}` for even + input m=2n+2, and the coefficients :math:`B_{(2n+1)}^{(2k+1)}` for odd + input m=2n+1. + + Parameters + ---------- + m : int + Order of Mathieu functions. Must be non-negative. + q : float (>=0) + Parameter of Mathieu functions. Must be non-negative. + + Returns + ------- + Bk : ndarray + Even or odd Fourier coefficients, corresponding to even or odd m. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + """ + if not (isscalar(m) and isscalar(q)): + raise ValueError("m and q must be scalars.") + if (q < 0): + raise ValueError("q >=0") + if (m != floor(m)) or (m <= 0): + raise ValueError("m must be an integer > 0") + + if (q <= 1): + qm = 7.5 + 56.1*sqrt(q) - 134.7*q + 90.7*sqrt(q)*q + else: + qm = 17.0 + 3.1*sqrt(q) - .126*q + .0037*sqrt(q)*q + km = int(qm + 0.5*m) + if km > 251: + warnings.warn("Too many predicted coefficients.", RuntimeWarning, stacklevel=2) + kd = 4 + m = int(floor(m)) + if m % 2: + kd = 3 + + b = mathieu_b(m, q) + fc = _specfun.fcoef(kd, m, q, b) + return fc[:km] + + +def lpmn(m, n, z): + """Sequence of associated Legendre functions of the first kind. + + Computes the associated Legendre function of the first kind of order m and + degree n, ``Pmn(z)`` = :math:`P_n^m(z)`, and its derivative, ``Pmn'(z)``. + Returns two arrays of size ``(m+1, n+1)`` containing ``Pmn(z)`` and + ``Pmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``. + + This function takes a real argument ``z``. For complex arguments ``z`` + use clpmn instead. + + Parameters + ---------- + m : int + ``|m| <= n``; the order of the Legendre function. + n : int + where ``n >= 0``; the degree of the Legendre function. Often + called ``l`` (lower case L) in descriptions of the associated + Legendre function + z : array_like + Input value. + + Returns + ------- + Pmn_z : (m+1, n+1) array + Values for all orders 0..m and degrees 0..n + Pmn_d_z : (m+1, n+1) array + Derivatives for all orders 0..m and degrees 0..n + + See Also + -------- + clpmn: associated Legendre functions of the first kind for complex z + + Notes + ----- + In the interval (-1, 1), Ferrer's function of the first kind is + returned. The phase convention used for the intervals (1, inf) + and (-inf, -1) is such that the result is always real. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + .. [2] NIST Digital Library of Mathematical Functions + https://dlmf.nist.gov/14.3 + + """ + n = _nonneg_int_or_fail(n, 'n', strict=False) + if not isscalar(m) or (abs(m) > n): + raise ValueError("m must be <= n.") + if not isscalar(n) or (n < 0): + raise ValueError("n must be a non-negative integer.") + if np.iscomplexobj(z): + raise ValueError("Argument must be real. Use clpmn instead.") + + m, n = int(m), int(n) # Convert to int to maintain backwards compatibility. + if (m < 0): + m_signbit = True + m_abs = -m + else: + m_signbit = False + m_abs = m + + z = np.asarray(z) + if (not np.issubdtype(z.dtype, np.inexact)): + z = z.astype(np.float64) + + p = np.empty((m_abs + 1, n + 1) + z.shape, dtype=np.float64) + pd = np.empty_like(p) + if (z.ndim == 0): + _lpmn(z, m_signbit, out = (p, pd)) + else: + _lpmn(z, m_signbit, out = (np.moveaxis(p, (0, 1), (-2, -1)), + np.moveaxis(pd, (0, 1), (-2, -1)))) # new axes must be last for the ufunc + + return p, pd + + +def clpmn(m, n, z, type=3): + """Associated Legendre function of the first kind for complex arguments. + + Computes the associated Legendre function of the first kind of order m and + degree n, ``Pmn(z)`` = :math:`P_n^m(z)`, and its derivative, ``Pmn'(z)``. + Returns two arrays of size ``(m+1, n+1)`` containing ``Pmn(z)`` and + ``Pmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``. + + Parameters + ---------- + m : int + ``|m| <= n``; the order of the Legendre function. + n : int + where ``n >= 0``; the degree of the Legendre function. Often + called ``l`` (lower case L) in descriptions of the associated + Legendre function + z : array_like, float or complex + Input value. + type : int, optional + takes values 2 or 3 + 2: cut on the real axis ``|x| > 1`` + 3: cut on the real axis ``-1 < x < 1`` (default) + + Returns + ------- + Pmn_z : (m+1, n+1) array + Values for all orders ``0..m`` and degrees ``0..n`` + Pmn_d_z : (m+1, n+1) array + Derivatives for all orders ``0..m`` and degrees ``0..n`` + + See Also + -------- + lpmn: associated Legendre functions of the first kind for real z + + Notes + ----- + By default, i.e. for ``type=3``, phase conventions are chosen according + to [1]_ such that the function is analytic. The cut lies on the interval + (-1, 1). Approaching the cut from above or below in general yields a phase + factor with respect to Ferrer's function of the first kind + (cf. `lpmn`). + + For ``type=2`` a cut at ``|x| > 1`` is chosen. Approaching the real values + on the interval (-1, 1) in the complex plane yields Ferrer's function + of the first kind. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + .. [2] NIST Digital Library of Mathematical Functions + https://dlmf.nist.gov/14.21 + + """ + if not isscalar(m) or (abs(m) > n): + raise ValueError("m must be <= n.") + if not isscalar(n) or (n < 0): + raise ValueError("n must be a non-negative integer.") + if not (type == 2 or type == 3): + raise ValueError("type must be either 2 or 3.") + + m, n = int(m), int(n) # Convert to int to maintain backwards compatibility. + if (m < 0): + mp = -m + m_signbit = True + else: + mp = m + m_signbit = False + + z = np.asarray(z) + if (not np.issubdtype(z.dtype, np.inexact)): + z = z.astype(np.complex128) + + p = np.empty((mp + 1, n + 1) + z.shape, dtype=np.complex128) + pd = np.empty_like(p) + if (z.ndim == 0): + _clpmn(z, type, m_signbit, out = (p, pd)) + else: + _clpmn(z, type, m_signbit, out = (np.moveaxis(p, (0, 1), (-2, -1)), + np.moveaxis(pd, (0, 1), (-2, -1)))) # new axes must be last for the ufunc + + return p, pd + + +def lqmn(m, n, z): + """Sequence of associated Legendre functions of the second kind. + + Computes the associated Legendre function of the second kind of order m and + degree n, ``Qmn(z)`` = :math:`Q_n^m(z)`, and its derivative, ``Qmn'(z)``. + Returns two arrays of size ``(m+1, n+1)`` containing ``Qmn(z)`` and + ``Qmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``. + + Parameters + ---------- + m : int + ``|m| <= n``; the order of the Legendre function. + n : int + where ``n >= 0``; the degree of the Legendre function. Often + called ``l`` (lower case L) in descriptions of the associated + Legendre function + z : array_like, complex + Input value. + + Returns + ------- + Qmn_z : (m+1, n+1) array + Values for all orders 0..m and degrees 0..n + Qmn_d_z : (m+1, n+1) array + Derivatives for all orders 0..m and degrees 0..n + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + """ + if not isscalar(m) or (m < 0): + raise ValueError("m must be a non-negative integer.") + if not isscalar(n) or (n < 0): + raise ValueError("n must be a non-negative integer.") + + m, n = int(m), int(n) # Convert to int to maintain backwards compatibility. + # Ensure neither m nor n == 0 + mm = max(1, m) + nn = max(1, n) + + z = np.asarray(z) + if (not np.issubdtype(z.dtype, np.inexact)): + z = z.astype(np.float64) + + if np.iscomplexobj(z): + q = np.empty((mm + 1, nn + 1) + z.shape, dtype = np.complex128) + else: + q = np.empty((mm + 1, nn + 1) + z.shape, dtype = np.float64) + qd = np.empty_like(q) + if (z.ndim == 0): + _lqmn(z, out = (q, qd)) + else: + _lqmn(z, out = (np.moveaxis(q, (0, 1), (-2, -1)), + np.moveaxis(qd, (0, 1), (-2, -1)))) # new axes must be last for the ufunc + + return q[:(m+1), :(n+1)], qd[:(m+1), :(n+1)] + + +def bernoulli(n): + """Bernoulli numbers B0..Bn (inclusive). + + Parameters + ---------- + n : int + Indicated the number of terms in the Bernoulli series to generate. + + Returns + ------- + ndarray + The Bernoulli numbers ``[B(0), B(1), ..., B(n)]``. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + .. [2] "Bernoulli number", Wikipedia, https://en.wikipedia.org/wiki/Bernoulli_number + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import bernoulli, zeta + >>> bernoulli(4) + array([ 1. , -0.5 , 0.16666667, 0. , -0.03333333]) + + The Wikipedia article ([2]_) points out the relationship between the + Bernoulli numbers and the zeta function, ``B_n^+ = -n * zeta(1 - n)`` + for ``n > 0``: + + >>> n = np.arange(1, 5) + >>> -n * zeta(1 - n) + array([ 0.5 , 0.16666667, -0. , -0.03333333]) + + Note that, in the notation used in the wikipedia article, + `bernoulli` computes ``B_n^-`` (i.e. it used the convention that + ``B_1`` is -1/2). The relation given above is for ``B_n^+``, so the + sign of 0.5 does not match the output of ``bernoulli(4)``. + + """ + if not isscalar(n) or (n < 0): + raise ValueError("n must be a non-negative integer.") + n = int(n) + if (n < 2): + n1 = 2 + else: + n1 = n + return _specfun.bernob(int(n1))[:(n+1)] + + +def euler(n): + """Euler numbers E(0), E(1), ..., E(n). + + The Euler numbers [1]_ are also known as the secant numbers. + + Because ``euler(n)`` returns floating point values, it does not give + exact values for large `n`. The first inexact value is E(22). + + Parameters + ---------- + n : int + The highest index of the Euler number to be returned. + + Returns + ------- + ndarray + The Euler numbers [E(0), E(1), ..., E(n)]. + The odd Euler numbers, which are all zero, are included. + + References + ---------- + .. [1] Sequence A122045, The On-Line Encyclopedia of Integer Sequences, + https://oeis.org/A122045 + .. [2] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import euler + >>> euler(6) + array([ 1., 0., -1., 0., 5., 0., -61.]) + + >>> euler(13).astype(np.int64) + array([ 1, 0, -1, 0, 5, 0, -61, + 0, 1385, 0, -50521, 0, 2702765, 0]) + + >>> euler(22)[-1] # Exact value of E(22) is -69348874393137901. + -69348874393137976.0 + + """ + if not isscalar(n) or (n < 0): + raise ValueError("n must be a non-negative integer.") + n = int(n) + if (n < 2): + n1 = 2 + else: + n1 = n + return _specfun.eulerb(n1)[:(n+1)] + + +def lpn(n, z): + """Legendre function of the first kind. + + Compute sequence of Legendre functions of the first kind (polynomials), + Pn(z) and derivatives for all degrees from 0 to n (inclusive). + + See also special.legendre for polynomial class. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + """ + n = _nonneg_int_or_fail(n, 'n', strict=False) + + z = np.asarray(z) + if (not np.issubdtype(z.dtype, np.inexact)): + z = z.astype(np.float64) + + pn = np.empty((n + 1,) + z.shape, dtype=z.dtype) + pd = np.empty_like(pn) + if (z.ndim == 0): + _lpn(z, out = (pn, pd)) + else: + _lpn(z, out = (np.moveaxis(pn, 0, -1), + np.moveaxis(pd, 0, -1))) # new axes must be last for the ufunc + + return pn, pd + + +def lqn(n, z): + """Legendre function of the second kind. + + Compute sequence of Legendre functions of the second kind, Qn(z) and + derivatives for all degrees from 0 to n (inclusive). + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + """ + n = _nonneg_int_or_fail(n, 'n', strict=False) + if (n < 1): + n1 = 1 + else: + n1 = n + + z = np.asarray(z) + if (not np.issubdtype(z.dtype, np.inexact)): + z = z.astype(float) + + if np.iscomplexobj(z): + qn = np.empty((n1 + 1,) + z.shape, dtype=np.complex128) + else: + qn = np.empty((n1 + 1,) + z.shape, dtype=np.float64) + qd = np.empty_like(qn) + if (z.ndim == 0): + _lqn(z, out = (qn, qd)) + else: + _lqn(z, out = (np.moveaxis(qn, 0, -1), + np.moveaxis(qd, 0, -1))) # new axes must be last for the ufunc + + return qn[:(n+1)], qd[:(n+1)] + + +def ai_zeros(nt): + """ + Compute `nt` zeros and values of the Airy function Ai and its derivative. + + Computes the first `nt` zeros, `a`, of the Airy function Ai(x); + first `nt` zeros, `ap`, of the derivative of the Airy function Ai'(x); + the corresponding values Ai(a'); + and the corresponding values Ai'(a). + + Parameters + ---------- + nt : int + Number of zeros to compute + + Returns + ------- + a : ndarray + First `nt` zeros of Ai(x) + ap : ndarray + First `nt` zeros of Ai'(x) + ai : ndarray + Values of Ai(x) evaluated at first `nt` zeros of Ai'(x) + aip : ndarray + Values of Ai'(x) evaluated at first `nt` zeros of Ai(x) + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + Examples + -------- + >>> from scipy import special + >>> a, ap, ai, aip = special.ai_zeros(3) + >>> a + array([-2.33810741, -4.08794944, -5.52055983]) + >>> ap + array([-1.01879297, -3.24819758, -4.82009921]) + >>> ai + array([ 0.53565666, -0.41901548, 0.38040647]) + >>> aip + array([ 0.70121082, -0.80311137, 0.86520403]) + + """ + kf = 1 + if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): + raise ValueError("nt must be a positive integer scalar.") + return _specfun.airyzo(nt, kf) + + +def bi_zeros(nt): + """ + Compute `nt` zeros and values of the Airy function Bi and its derivative. + + Computes the first `nt` zeros, b, of the Airy function Bi(x); + first `nt` zeros, b', of the derivative of the Airy function Bi'(x); + the corresponding values Bi(b'); + and the corresponding values Bi'(b). + + Parameters + ---------- + nt : int + Number of zeros to compute + + Returns + ------- + b : ndarray + First `nt` zeros of Bi(x) + bp : ndarray + First `nt` zeros of Bi'(x) + bi : ndarray + Values of Bi(x) evaluated at first `nt` zeros of Bi'(x) + bip : ndarray + Values of Bi'(x) evaluated at first `nt` zeros of Bi(x) + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + Examples + -------- + >>> from scipy import special + >>> b, bp, bi, bip = special.bi_zeros(3) + >>> b + array([-1.17371322, -3.2710933 , -4.83073784]) + >>> bp + array([-2.29443968, -4.07315509, -5.51239573]) + >>> bi + array([-0.45494438, 0.39652284, -0.36796916]) + >>> bip + array([ 0.60195789, -0.76031014, 0.83699101]) + + """ + kf = 2 + if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): + raise ValueError("nt must be a positive integer scalar.") + return _specfun.airyzo(nt, kf) + + +def lmbda(v, x): + r"""Jahnke-Emden Lambda function, Lambdav(x). + + This function is defined as [2]_, + + .. math:: \Lambda_v(x) = \Gamma(v+1) \frac{J_v(x)}{(x/2)^v}, + + where :math:`\Gamma` is the gamma function and :math:`J_v` is the + Bessel function of the first kind. + + Parameters + ---------- + v : float + Order of the Lambda function + x : float + Value at which to evaluate the function and derivatives + + Returns + ------- + vl : ndarray + Values of Lambda_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v. + dl : ndarray + Derivatives Lambda_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + .. [2] Jahnke, E. and Emde, F. "Tables of Functions with Formulae and + Curves" (4th ed.), Dover, 1945 + """ + if not (isscalar(v) and isscalar(x)): + raise ValueError("arguments must be scalars.") + if (v < 0): + raise ValueError("argument must be > 0.") + n = int(v) + v0 = v - n + if (n < 1): + n1 = 1 + else: + n1 = n + v1 = n1 + v0 + if (v != floor(v)): + vm, vl, dl = _specfun.lamv(v1, x) + else: + vm, vl, dl = _specfun.lamn(v1, x) + return vl[:(n+1)], dl[:(n+1)] + + +def pbdv_seq(v, x): + """Parabolic cylinder functions Dv(x) and derivatives. + + Parameters + ---------- + v : float + Order of the parabolic cylinder function + x : float + Value at which to evaluate the function and derivatives + + Returns + ------- + dv : ndarray + Values of D_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v. + dp : ndarray + Derivatives D_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 13. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + """ + if not (isscalar(v) and isscalar(x)): + raise ValueError("arguments must be scalars.") + n = int(v) + v0 = v-n + if (n < 1): + n1 = 1 + else: + n1 = n + v1 = n1 + v0 + dv, dp, pdf, pdd = _specfun.pbdv(v1, x) + return dv[:n1+1], dp[:n1+1] + + +def pbvv_seq(v, x): + """Parabolic cylinder functions Vv(x) and derivatives. + + Parameters + ---------- + v : float + Order of the parabolic cylinder function + x : float + Value at which to evaluate the function and derivatives + + Returns + ------- + dv : ndarray + Values of V_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v. + dp : ndarray + Derivatives V_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 13. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + """ + if not (isscalar(v) and isscalar(x)): + raise ValueError("arguments must be scalars.") + n = int(v) + v0 = v-n + if (n <= 1): + n1 = 1 + else: + n1 = n + v1 = n1 + v0 + dv, dp, pdf, pdd = _specfun.pbvv(v1, x) + return dv[:n1+1], dp[:n1+1] + + +def pbdn_seq(n, z): + """Parabolic cylinder functions Dn(z) and derivatives. + + Parameters + ---------- + n : int + Order of the parabolic cylinder function + z : complex + Value at which to evaluate the function and derivatives + + Returns + ------- + dv : ndarray + Values of D_i(z), for i=0, ..., i=n. + dp : ndarray + Derivatives D_i'(z), for i=0, ..., i=n. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 13. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + """ + if not (isscalar(n) and isscalar(z)): + raise ValueError("arguments must be scalars.") + if (floor(n) != n): + raise ValueError("n must be an integer.") + if (abs(n) <= 1): + n1 = 1 + else: + n1 = n + cpb, cpd = _specfun.cpbdn(n1, z) + return cpb[:n1+1], cpd[:n1+1] + + +def ber_zeros(nt): + """Compute nt zeros of the Kelvin function ber. + + Parameters + ---------- + nt : int + Number of zeros to compute. Must be positive. + + Returns + ------- + ndarray + First `nt` zeros of the Kelvin function. + + See Also + -------- + ber + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + """ + if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): + raise ValueError("nt must be positive integer scalar.") + return _specfun.klvnzo(nt, 1) + + +def bei_zeros(nt): + """Compute nt zeros of the Kelvin function bei. + + Parameters + ---------- + nt : int + Number of zeros to compute. Must be positive. + + Returns + ------- + ndarray + First `nt` zeros of the Kelvin function. + + See Also + -------- + bei + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + """ + if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): + raise ValueError("nt must be positive integer scalar.") + return _specfun.klvnzo(nt, 2) + + +def ker_zeros(nt): + """Compute nt zeros of the Kelvin function ker. + + Parameters + ---------- + nt : int + Number of zeros to compute. Must be positive. + + Returns + ------- + ndarray + First `nt` zeros of the Kelvin function. + + See Also + -------- + ker + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + """ + if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): + raise ValueError("nt must be positive integer scalar.") + return _specfun.klvnzo(nt, 3) + + +def kei_zeros(nt): + """Compute nt zeros of the Kelvin function kei. + + Parameters + ---------- + nt : int + Number of zeros to compute. Must be positive. + + Returns + ------- + ndarray + First `nt` zeros of the Kelvin function. + + See Also + -------- + kei + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + """ + if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): + raise ValueError("nt must be positive integer scalar.") + return _specfun.klvnzo(nt, 4) + + +def berp_zeros(nt): + """Compute nt zeros of the derivative of the Kelvin function ber. + + Parameters + ---------- + nt : int + Number of zeros to compute. Must be positive. + + Returns + ------- + ndarray + First `nt` zeros of the derivative of the Kelvin function. + + See Also + -------- + ber, berp + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + """ + if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): + raise ValueError("nt must be positive integer scalar.") + return _specfun.klvnzo(nt, 5) + + +def beip_zeros(nt): + """Compute nt zeros of the derivative of the Kelvin function bei. + + Parameters + ---------- + nt : int + Number of zeros to compute. Must be positive. + + Returns + ------- + ndarray + First `nt` zeros of the derivative of the Kelvin function. + + See Also + -------- + bei, beip + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + """ + if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): + raise ValueError("nt must be positive integer scalar.") + return _specfun.klvnzo(nt, 6) + + +def kerp_zeros(nt): + """Compute nt zeros of the derivative of the Kelvin function ker. + + Parameters + ---------- + nt : int + Number of zeros to compute. Must be positive. + + Returns + ------- + ndarray + First `nt` zeros of the derivative of the Kelvin function. + + See Also + -------- + ker, kerp + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + """ + if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): + raise ValueError("nt must be positive integer scalar.") + return _specfun.klvnzo(nt, 7) + + +def keip_zeros(nt): + """Compute nt zeros of the derivative of the Kelvin function kei. + + Parameters + ---------- + nt : int + Number of zeros to compute. Must be positive. + + Returns + ------- + ndarray + First `nt` zeros of the derivative of the Kelvin function. + + See Also + -------- + kei, keip + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + """ + if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): + raise ValueError("nt must be positive integer scalar.") + return _specfun.klvnzo(nt, 8) + + +def kelvin_zeros(nt): + """Compute nt zeros of all Kelvin functions. + + Returned in a length-8 tuple of arrays of length nt. The tuple contains + the arrays of zeros of (ber, bei, ker, kei, ber', bei', ker', kei'). + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + """ + if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): + raise ValueError("nt must be positive integer scalar.") + return (_specfun.klvnzo(nt, 1), + _specfun.klvnzo(nt, 2), + _specfun.klvnzo(nt, 3), + _specfun.klvnzo(nt, 4), + _specfun.klvnzo(nt, 5), + _specfun.klvnzo(nt, 6), + _specfun.klvnzo(nt, 7), + _specfun.klvnzo(nt, 8)) + + +def pro_cv_seq(m, n, c): + """Characteristic values for prolate spheroidal wave functions. + + Compute a sequence of characteristic values for the prolate + spheroidal wave functions for mode m and n'=m..n and spheroidal + parameter c. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + """ + if not (isscalar(m) and isscalar(n) and isscalar(c)): + raise ValueError("Arguments must be scalars.") + if (n != floor(n)) or (m != floor(m)): + raise ValueError("Modes must be integers.") + if (n-m > 199): + raise ValueError("Difference between n and m is too large.") + maxL = n-m+1 + return _specfun.segv(m, n, c, 1)[1][:maxL] + + +def obl_cv_seq(m, n, c): + """Characteristic values for oblate spheroidal wave functions. + + Compute a sequence of characteristic values for the oblate + spheroidal wave functions for mode m and n'=m..n and spheroidal + parameter c. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + """ + if not (isscalar(m) and isscalar(n) and isscalar(c)): + raise ValueError("Arguments must be scalars.") + if (n != floor(n)) or (m != floor(m)): + raise ValueError("Modes must be integers.") + if (n-m > 199): + raise ValueError("Difference between n and m is too large.") + maxL = n-m+1 + return _specfun.segv(m, n, c, -1)[1][:maxL] + + +def comb(N, k, *, exact=False, repetition=False): + """The number of combinations of N things taken k at a time. + + This is often expressed as "N choose k". + + Parameters + ---------- + N : int, ndarray + Number of things. + k : int, ndarray + Number of elements taken. + exact : bool, optional + For integers, if `exact` is False, then floating point precision is + used, otherwise the result is computed exactly. + + .. deprecated:: 1.14.0 + ``exact=True`` is deprecated for non-integer `N` and `k` and will raise an + error in SciPy 1.16.0 + repetition : bool, optional + If `repetition` is True, then the number of combinations with + repetition is computed. + + Returns + ------- + val : int, float, ndarray + The total number of combinations. + + See Also + -------- + binom : Binomial coefficient considered as a function of two real + variables. + + Notes + ----- + - Array arguments accepted only for exact=False case. + - If N < 0, or k < 0, then 0 is returned. + - If k > N and repetition=False, then 0 is returned. + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import comb + >>> k = np.array([3, 4]) + >>> n = np.array([10, 10]) + >>> comb(n, k, exact=False) + array([ 120., 210.]) + >>> comb(10, 3, exact=True) + 120 + >>> comb(10, 3, exact=True, repetition=True) + 220 + + """ + if repetition: + return comb(N + k - 1, k, exact=exact) + if exact: + if int(N) == N and int(k) == k: + # _comb_int casts inputs to integers, which is safe & intended here + return _comb_int(N, k) + # otherwise, we disregard `exact=True`; it makes no sense for + # non-integral arguments + msg = ("`exact=True` is deprecated for non-integer `N` and `k` and will raise " + "an error in SciPy 1.16.0") + warnings.warn(msg, DeprecationWarning, stacklevel=2) + return comb(N, k) + else: + k, N = asarray(k), asarray(N) + cond = (k <= N) & (N >= 0) & (k >= 0) + vals = binom(N, k) + if isinstance(vals, np.ndarray): + vals[~cond] = 0 + elif not cond: + vals = np.float64(0) + return vals + + +def perm(N, k, exact=False): + """Permutations of N things taken k at a time, i.e., k-permutations of N. + + It's also known as "partial permutations". + + Parameters + ---------- + N : int, ndarray + Number of things. + k : int, ndarray + Number of elements taken. + exact : bool, optional + If ``True``, calculate the answer exactly using long integer arithmetic (`N` + and `k` must be scalar integers). If ``False``, a floating point approximation + is calculated (more rapidly) using `poch`. Default is ``False``. + + Returns + ------- + val : int, ndarray + The number of k-permutations of N. + + Notes + ----- + - Array arguments accepted only for exact=False case. + - If k > N, N < 0, or k < 0, then a 0 is returned. + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import perm + >>> k = np.array([3, 4]) + >>> n = np.array([10, 10]) + >>> perm(n, k) + array([ 720., 5040.]) + >>> perm(10, 3, exact=True) + 720 + + """ + if exact: + N = np.squeeze(N)[()] # for backward compatibility (accepted size 1 arrays) + k = np.squeeze(k)[()] + if not (isscalar(N) and isscalar(k)): + raise ValueError("`N` and `k` must scalar integers be with `exact=True`.") + + floor_N, floor_k = int(N), int(k) + non_integral = not (floor_N == N and floor_k == k) + if (k > N) or (N < 0) or (k < 0): + if non_integral: + msg = ("Non-integer `N` and `k` with `exact=True` is deprecated and " + "will raise an error in SciPy 1.16.0.") + warnings.warn(msg, DeprecationWarning, stacklevel=2) + return 0 + if non_integral: + raise ValueError("Non-integer `N` and `k` with `exact=True` is not " + "supported.") + val = 1 + for i in range(floor_N - floor_k + 1, floor_N + 1): + val *= i + return val + else: + k, N = asarray(k), asarray(N) + cond = (k <= N) & (N >= 0) & (k >= 0) + vals = poch(N - k + 1, k) + if isinstance(vals, np.ndarray): + vals[~cond] = 0 + elif not cond: + vals = np.float64(0) + return vals + + +# https://stackoverflow.com/a/16327037 +def _range_prod(lo, hi, k=1): + """ + Product of a range of numbers spaced k apart (from hi). + + For k=1, this returns the product of + lo * (lo+1) * (lo+2) * ... * (hi-2) * (hi-1) * hi + = hi! / (lo-1)! + + For k>1, it correspond to taking only every k'th number when + counting down from hi - e.g. 18!!!! = _range_prod(1, 18, 4). + + Breaks into smaller products first for speed: + _range_prod(2, 9) = ((2*3)*(4*5))*((6*7)*(8*9)) + """ + if lo + k < hi: + mid = (hi + lo) // 2 + if k > 1: + # make sure mid is a multiple of k away from hi + mid = mid - ((mid - hi) % k) + return _range_prod(lo, mid, k) * _range_prod(mid + k, hi, k) + elif lo + k == hi: + return lo * hi + else: + return hi + + +def _factorialx_array_exact(n, k=1): + """ + Exact computation of factorial for an array. + + The factorials are computed in incremental fashion, by taking + the sorted unique values of n and multiplying the intervening + numbers between the different unique values. + + In other words, the factorial for the largest input is only + computed once, with each other result computed in the process. + + k > 1 corresponds to the multifactorial. + """ + un = np.unique(n) + # numpy changed nan-sorting behaviour with 1.21, see numpy/numpy#18070; + # to unify the behaviour, we remove the nan's here; the respective + # values will be set separately at the end + un = un[~np.isnan(un)] + + # Convert to object array if np.int64 can't handle size + if np.isnan(n).any(): + dt = float + elif k in _FACTORIALK_LIMITS_64BITS.keys(): + if un[-1] > _FACTORIALK_LIMITS_64BITS[k]: + # e.g. k=1: 21! > np.iinfo(np.int64).max + dt = object + elif un[-1] > _FACTORIALK_LIMITS_32BITS[k]: + # e.g. k=3: 26!!! > np.iinfo(np.int32).max + dt = np.int64 + else: + dt = np.dtype("long") + else: + # for k >= 10, we always use object + dt = object + + out = np.empty_like(n, dtype=dt) + + # Handle invalid/trivial values + un = un[un > 1] + out[n < 2] = 1 + out[n < 0] = 0 + + # Calculate products of each range of numbers + # we can only multiply incrementally if the values are k apart; + # therefore we partition `un` into "lanes", i.e. its residues modulo k + for lane in range(0, k): + ul = un[(un % k) == lane] if k > 1 else un + if ul.size: + # after np.unique, un resp. ul are sorted, ul[0] is the smallest; + # cast to python ints to avoid overflow with np.int-types + val = _range_prod(1, int(ul[0]), k=k) + out[n == ul[0]] = val + for i in range(len(ul) - 1): + # by the filtering above, we have ensured that prev & current + # are a multiple of k apart + prev = ul[i] + current = ul[i + 1] + # we already multiplied all factors until prev; continue + # building the full factorial from the following (`prev + 1`); + # use int() for the same reason as above + val *= _range_prod(int(prev + 1), int(current), k=k) + out[n == current] = val + + if np.isnan(n).any(): + out = out.astype(np.float64) + out[np.isnan(n)] = np.nan + return out + + +def _factorialx_array_approx(n, k): + """ + Calculate approximation to multifactorial for array n and integer k. + + Ensure we only call _factorialx_approx_core where necessary/required. + """ + result = zeros(n.shape) + # keep nans as nans + place(result, np.isnan(n), np.nan) + # only compute where n >= 0 (excludes nans), everything else is 0 + cond = (n >= 0) + n_to_compute = extract(cond, n) + place(result, cond, _factorialx_approx_core(n_to_compute, k=k)) + return result + + +def _factorialx_approx_core(n, k): + """ + Core approximation to multifactorial for array n and integer k. + """ + if k == 1: + # shortcut for k=1 + result = gamma(n + 1) + if isinstance(n, np.ndarray): + # gamma does not maintain 0-dim arrays + result = np.array(result) + return result + + n_mod_k = n % k + # scalar case separately, unified handling would be inefficient for arrays; + # don't use isscalar due to numpy/numpy#23574; 0-dim arrays treated below + if not isinstance(n, np.ndarray): + return ( + np.power(k, (n - n_mod_k) / k) + * gamma(n / k + 1) / gamma(n_mod_k / k + 1) + * max(n_mod_k, 1) + ) + + # factor that's independent of the residue class (see factorialk docstring) + result = np.power(k, n / k) * gamma(n / k + 1) + # factor dependent on residue r (for `r=0` it's 1, so we skip `r=0` + # below and thus also avoid evaluating `max(r, 1)`) + def corr(k, r): return np.power(k, -r / k) / gamma(r / k + 1) * r + for r in np.unique(n_mod_k): + if r == 0: + continue + # cast to int because uint types break on `-r` + result[n_mod_k == r] *= corr(k, int(r)) + return result + + +def factorial(n, exact=False): + """ + The factorial of a number or array of numbers. + + The factorial of non-negative integer `n` is the product of all + positive integers less than or equal to `n`:: + + n! = n * (n - 1) * (n - 2) * ... * 1 + + Parameters + ---------- + n : int or array_like of ints + Input values. If ``n < 0``, the return value is 0. + exact : bool, optional + If True, calculate the answer exactly using long integer arithmetic. + If False, result is approximated in floating point rapidly using the + `gamma` function. + Default is False. + + Returns + ------- + nf : float or int or ndarray + Factorial of `n`, as integer or float depending on `exact`. + + Notes + ----- + For arrays with ``exact=True``, the factorial is computed only once, for + the largest input, with each other result computed in the process. + The output dtype is increased to ``int64`` or ``object`` if necessary. + + With ``exact=False`` the factorial is approximated using the gamma + function: + + .. math:: n! = \\Gamma(n+1) + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import factorial + >>> arr = np.array([3, 4, 5]) + >>> factorial(arr, exact=False) + array([ 6., 24., 120.]) + >>> factorial(arr, exact=True) + array([ 6, 24, 120]) + >>> factorial(5, exact=True) + 120 + + """ + # don't use isscalar due to numpy/numpy#23574; 0-dim arrays treated below + if np.ndim(n) == 0 and not isinstance(n, np.ndarray): + # scalar cases + if n is None or np.isnan(n): + return np.nan + elif not (np.issubdtype(type(n), np.integer) + or np.issubdtype(type(n), np.floating)): + raise ValueError( + f"Unsupported datatype for factorial: {type(n)}\n" + "Permitted data types are integers and floating point numbers" + ) + elif n < 0: + return 0 + elif exact and np.issubdtype(type(n), np.integer): + return math.factorial(n) + elif exact: + msg = ("Non-integer values of `n` together with `exact=True` are " + "deprecated. Either ensure integer `n` or use `exact=False`.") + warnings.warn(msg, DeprecationWarning, stacklevel=2) + return _factorialx_approx_core(n, k=1) + + # arrays & array-likes + n = asarray(n) + if n.size == 0: + # return empty arrays unchanged + return n + if not (np.issubdtype(n.dtype, np.integer) + or np.issubdtype(n.dtype, np.floating)): + raise ValueError( + f"Unsupported datatype for factorial: {n.dtype}\n" + "Permitted data types are integers and floating point numbers" + ) + if exact and not np.issubdtype(n.dtype, np.integer): + msg = ("factorial with `exact=True` does not " + "support non-integral arrays") + raise ValueError(msg) + + if exact: + return _factorialx_array_exact(n, k=1) + return _factorialx_array_approx(n, k=1) + + +def factorial2(n, exact=False): + """Double factorial. + + This is the factorial with every second value skipped. E.g., ``7!! = 7 * 5 + * 3 * 1``. It can be approximated numerically as:: + + n!! = 2 ** (n / 2) * gamma(n / 2 + 1) * sqrt(2 / pi) n odd + = 2 ** (n / 2) * gamma(n / 2 + 1) n even + = 2 ** (n / 2) * (n / 2)! n even + + Parameters + ---------- + n : int or array_like + Calculate ``n!!``. If ``n < 0``, the return value is 0. + exact : bool, optional + The result can be approximated rapidly using the gamma-formula + above (default). If `exact` is set to True, calculate the + answer exactly using integer arithmetic. + + Returns + ------- + nff : float or int + Double factorial of `n`, as an int or a float depending on + `exact`. + + Examples + -------- + >>> from scipy.special import factorial2 + >>> factorial2(7, exact=False) + array(105.00000000000001) + >>> factorial2(7, exact=True) + 105 + + """ + + # don't use isscalar due to numpy/numpy#23574; 0-dim arrays treated below + if np.ndim(n) == 0 and not isinstance(n, np.ndarray): + # scalar cases + if n is None or np.isnan(n): + return np.nan + elif not np.issubdtype(type(n), np.integer): + msg = "factorial2 does not support non-integral scalar arguments" + raise ValueError(msg) + elif n < 0: + return 0 + elif n in {0, 1}: + return 1 + # general integer case + if exact: + return _range_prod(1, n, k=2) + return _factorialx_approx_core(n, k=2) + # arrays & array-likes + n = asarray(n) + if n.size == 0: + # return empty arrays unchanged + return n + if not np.issubdtype(n.dtype, np.integer): + raise ValueError("factorial2 does not support non-integral arrays") + if exact: + return _factorialx_array_exact(n, k=2) + return _factorialx_array_approx(n, k=2) + + +def factorialk(n, k, exact=None): + """Multifactorial of n of order k, n(!!...!). + + This is the multifactorial of n skipping k values. For example, + + factorialk(17, 4) = 17!!!! = 17 * 13 * 9 * 5 * 1 + + In particular, for any integer ``n``, we have + + factorialk(n, 1) = factorial(n) + + factorialk(n, 2) = factorial2(n) + + Parameters + ---------- + n : int or array_like + Calculate multifactorial. If ``n < 0``, the return value is 0. + k : int + Order of multifactorial. + exact : bool, optional + If exact is set to True, calculate the answer exactly using + integer arithmetic, otherwise use an approximation (faster, + but yields floats instead of integers) + + .. warning:: + The default value for ``exact`` will be changed to + ``False`` in SciPy 1.15.0. + + Returns + ------- + val : int + Multifactorial of `n`. + + Examples + -------- + >>> from scipy.special import factorialk + >>> factorialk(5, k=1, exact=True) + 120 + >>> factorialk(5, k=3, exact=True) + 10 + >>> factorialk([5, 7, 9], k=3, exact=True) + array([ 10, 28, 162]) + >>> factorialk([5, 7, 9], k=3, exact=False) + array([ 10., 28., 162.]) + + Notes + ----- + While less straight-forward than for the double-factorial, it's possible to + calculate a general approximation formula of n!(k) by studying ``n`` for a given + remainder ``r < k`` (thus ``n = m * k + r``, resp. ``r = n % k``), which can be + put together into something valid for all integer values ``n >= 0`` & ``k > 0``:: + + n!(k) = k ** ((n - r)/k) * gamma(n/k + 1) / gamma(r/k + 1) * max(r, 1) + + This is the basis of the approximation when ``exact=False``. Compare also [1]. + + References + ---------- + .. [1] Complex extension to multifactorial + https://en.wikipedia.org/wiki/Double_factorial#Alternative_extension_of_the_multifactorial + """ + if not np.issubdtype(type(k), np.integer) or k < 1: + raise ValueError(f"k must be a positive integer, received: {k}") + if exact is None: + msg = ( + "factorialk will default to `exact=False` starting from SciPy " + "1.15.0. To avoid behaviour changes due to this, explicitly " + "specify either `exact=False` (faster, returns floats), or the " + "past default `exact=True` (slower, lossless result as integer)." + ) + warnings.warn(msg, DeprecationWarning, stacklevel=2) + exact = True + + helpmsg = "" + if k in {1, 2}: + func = "factorial" if k == 1 else "factorial2" + helpmsg = f"\nYou can try to use {func} instead" + + # don't use isscalar due to numpy/numpy#23574; 0-dim arrays treated below + if np.ndim(n) == 0 and not isinstance(n, np.ndarray): + # scalar cases + if n is None or np.isnan(n): + return np.nan + elif not np.issubdtype(type(n), np.integer): + msg = "factorialk does not support non-integral scalar arguments!" + raise ValueError(msg + helpmsg) + elif n < 0: + return 0 + elif n in {0, 1}: + return 1 + # general integer case + if exact: + return _range_prod(1, n, k=k) + return _factorialx_approx_core(n, k=k) + # arrays & array-likes + n = asarray(n) + if n.size == 0: + # return empty arrays unchanged + return n + if not np.issubdtype(n.dtype, np.integer): + msg = "factorialk does not support non-integral arrays!" + raise ValueError(msg + helpmsg) + if exact: + return _factorialx_array_exact(n, k=k) + return _factorialx_array_approx(n, k=k) + + +def stirling2(N, K, *, exact=False): + r"""Generate Stirling number(s) of the second kind. + + Stirling numbers of the second kind count the number of ways to + partition a set with N elements into K non-empty subsets. + + The values this function returns are calculated using a dynamic + program which avoids redundant computation across the subproblems + in the solution. For array-like input, this implementation also + avoids redundant computation across the different Stirling number + calculations. + + The numbers are sometimes denoted + + .. math:: + + {N \brace{K}} + + see [1]_ for details. This is often expressed-verbally-as + "N subset K". + + Parameters + ---------- + N : int, ndarray + Number of things. + K : int, ndarray + Number of non-empty subsets taken. + exact : bool, optional + Uses dynamic programming (DP) with floating point + numbers for smaller arrays and uses a second order approximation due to + Temme for larger entries of `N` and `K` that allows trading speed for + accuracy. See [2]_ for a description. Temme approximation is used for + values `n>50`. The max error from the DP has max relative error + `4.5*10^-16` for `n<=50` and the max error from the Temme approximation + has max relative error `5*10^-5` for `51 <= n < 70` and + `9*10^-6` for `70 <= n < 101`. Note that these max relative errors will + decrease further as `n` increases. + + Returns + ------- + val : int, float, ndarray + The number of partitions. + + See Also + -------- + comb : The number of combinations of N things taken k at a time. + + Notes + ----- + - If N < 0, or K < 0, then 0 is returned. + - If K > N, then 0 is returned. + + The output type will always be `int` or ndarray of `object`. + The input must contain either numpy or python integers otherwise a + TypeError is raised. + + References + ---------- + .. [1] R. L. Graham, D. E. Knuth and O. Patashnik, "Concrete + Mathematics: A Foundation for Computer Science," Addison-Wesley + Publishing Company, Boston, 1989. Chapter 6, page 258. + + .. [2] Temme, Nico M. "Asymptotic estimates of Stirling numbers." + Studies in Applied Mathematics 89.3 (1993): 233-243. + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import stirling2 + >>> k = np.array([3, -1, 3]) + >>> n = np.array([10, 10, 9]) + >>> stirling2(n, k) + array([9330, 0, 3025], dtype=object) + + """ + output_is_scalar = np.isscalar(N) and np.isscalar(K) + # make a min-heap of unique (n,k) pairs + N, K = asarray(N), asarray(K) + if not np.issubdtype(N.dtype, np.integer): + raise TypeError("Argument `N` must contain only integers") + if not np.issubdtype(K.dtype, np.integer): + raise TypeError("Argument `K` must contain only integers") + if not exact: + # NOTE: here we allow np.uint via casting to double types prior to + # passing to private ufunc dispatcher. All dispatched functions + # take double type for (n,k) arguments and return double. + return _stirling2_inexact(N.astype(float), K.astype(float)) + nk_pairs = list( + set([(n.take(0), k.take(0)) + for n, k in np.nditer([N, K], ['refs_ok'])]) + ) + heapify(nk_pairs) + # base mapping for small values + snsk_vals = defaultdict(int) + for pair in [(0, 0), (1, 1), (2, 1), (2, 2)]: + snsk_vals[pair] = 1 + # for each pair in the min-heap, calculate the value, store for later + n_old, n_row = 2, [0, 1, 1] + while nk_pairs: + n, k = heappop(nk_pairs) + if n < 2 or k > n or k <= 0: + continue + elif k == n or k == 1: + snsk_vals[(n, k)] = 1 + continue + elif n != n_old: + num_iters = n - n_old + while num_iters > 0: + n_row.append(1) + # traverse from back to remove second row + for j in range(len(n_row)-2, 1, -1): + n_row[j] = n_row[j]*j + n_row[j-1] + num_iters -= 1 + snsk_vals[(n, k)] = n_row[k] + else: + snsk_vals[(n, k)] = n_row[k] + n_old, n_row = n, n_row + out_types = [object, object, object] if exact else [float, float, float] + # for each pair in the map, fetch the value, and populate the array + it = np.nditer( + [N, K, None], + ['buffered', 'refs_ok'], + [['readonly'], ['readonly'], ['writeonly', 'allocate']], + op_dtypes=out_types, + ) + with it: + while not it.finished: + it[2] = snsk_vals[(int(it[0]), int(it[1]))] + it.iternext() + output = it.operands[2] + # If N and K were both scalars, convert output to scalar. + if output_is_scalar: + output = output.take(0) + return output + + +def zeta(x, q=None, out=None): + r""" + Riemann or Hurwitz zeta function. + + Parameters + ---------- + x : array_like of float + Input data, must be real + q : array_like of float, optional + Input data, must be real. Defaults to Riemann zeta. + out : ndarray, optional + Output array for the computed values. + + Returns + ------- + out : array_like + Values of zeta(x). + + See Also + -------- + zetac + + Notes + ----- + The two-argument version is the Hurwitz zeta function + + .. math:: + + \zeta(x, q) = \sum_{k=0}^{\infty} \frac{1}{(k + q)^x}; + + see [dlmf]_ for details. The Riemann zeta function corresponds to + the case when ``q = 1``. + + References + ---------- + .. [dlmf] NIST, Digital Library of Mathematical Functions, + https://dlmf.nist.gov/25.11#i + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import zeta, polygamma, factorial + + Some specific values: + + >>> zeta(2), np.pi**2/6 + (1.6449340668482266, 1.6449340668482264) + + >>> zeta(4), np.pi**4/90 + (1.0823232337111381, 1.082323233711138) + + Relation to the `polygamma` function: + + >>> m = 3 + >>> x = 1.25 + >>> polygamma(m, x) + array(2.782144009188397) + >>> (-1)**(m+1) * factorial(m) * zeta(m+1, x) + 2.7821440091883969 + + """ + if q is None: + return _ufuncs._riemann_zeta(x, out) + else: + return _ufuncs._zeta(x, q, out) + + +def _sph_harm_all(m, n, theta, phi): + """Private function. This may be removed or modified at any time.""" + + theta = np.asarray(theta) + if (not np.issubdtype(theta.dtype, np.inexact)): + theta = theta.astype(np.float64) + + phi = np.asarray(phi) + if (not np.issubdtype(phi.dtype, np.inexact)): + phi = phi.astype(np.float64) + + out = np.empty((2 * m + 1, n + 1) + np.broadcast_shapes(theta.shape, phi.shape), + dtype = np.result_type(1j, theta.dtype, phi.dtype)) + _sph_harm_all_gufunc(theta, phi, out = np.moveaxis(out, (0, 1), (-2, -1))) + + return out diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_comb.cpython-310-x86_64-linux-gnu.so b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_comb.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..13b43dfb31a9c86e2349baf128daf4fa490b2b9d Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_comb.cpython-310-x86_64-linux-gnu.so differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_ellip_harm.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_ellip_harm.py new file mode 100644 index 0000000000000000000000000000000000000000..1b1ce34aa58054be13edfd5d87f2059e8a0d9224 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_ellip_harm.py @@ -0,0 +1,214 @@ +import numpy as np + +from ._ufuncs import _ellip_harm +from ._ellip_harm_2 import _ellipsoid, _ellipsoid_norm + + +def ellip_harm(h2, k2, n, p, s, signm=1, signn=1): + r""" + Ellipsoidal harmonic functions E^p_n(l) + + These are also known as Lame functions of the first kind, and are + solutions to the Lame equation: + + .. math:: (s^2 - h^2)(s^2 - k^2)E''(s) + + s(2s^2 - h^2 - k^2)E'(s) + (a - q s^2)E(s) = 0 + + where :math:`q = (n+1)n` and :math:`a` is the eigenvalue (not + returned) corresponding to the solutions. + + Parameters + ---------- + h2 : float + ``h**2`` + k2 : float + ``k**2``; should be larger than ``h**2`` + n : int + Degree + s : float + Coordinate + p : int + Order, can range between [1,2n+1] + signm : {1, -1}, optional + Sign of prefactor of functions. Can be +/-1. See Notes. + signn : {1, -1}, optional + Sign of prefactor of functions. Can be +/-1. See Notes. + + Returns + ------- + E : float + the harmonic :math:`E^p_n(s)` + + See Also + -------- + ellip_harm_2, ellip_normal + + Notes + ----- + The geometric interpretation of the ellipsoidal functions is + explained in [2]_, [3]_, [4]_. The `signm` and `signn` arguments control the + sign of prefactors for functions according to their type:: + + K : +1 + L : signm + M : signn + N : signm*signn + + .. versionadded:: 0.15.0 + + References + ---------- + .. [1] Digital Library of Mathematical Functions 29.12 + https://dlmf.nist.gov/29.12 + .. [2] Bardhan and Knepley, "Computational science and + re-discovery: open-source implementations of + ellipsoidal harmonics for problems in potential theory", + Comput. Sci. Disc. 5, 014006 (2012) + :doi:`10.1088/1749-4699/5/1/014006`. + .. [3] David J.and Dechambre P, "Computation of Ellipsoidal + Gravity Field Harmonics for small solar system bodies" + pp. 30-36, 2000 + .. [4] George Dassios, "Ellipsoidal Harmonics: Theory and Applications" + pp. 418, 2012 + + Examples + -------- + >>> from scipy.special import ellip_harm + >>> w = ellip_harm(5,8,1,1,2.5) + >>> w + 2.5 + + Check that the functions indeed are solutions to the Lame equation: + + >>> import numpy as np + >>> from scipy.interpolate import UnivariateSpline + >>> def eigenvalue(f, df, ddf): + ... r = (((s**2 - h**2) * (s**2 - k**2) * ddf + ... + s * (2*s**2 - h**2 - k**2) * df + ... - n * (n + 1)*s**2*f) / f) + ... return -r.mean(), r.std() + >>> s = np.linspace(0.1, 10, 200) + >>> k, h, n, p = 8.0, 2.2, 3, 2 + >>> E = ellip_harm(h**2, k**2, n, p, s) + >>> E_spl = UnivariateSpline(s, E) + >>> a, a_err = eigenvalue(E_spl(s), E_spl(s,1), E_spl(s,2)) + >>> a, a_err + (583.44366156701483, 6.4580890640310646e-11) + + """ # noqa: E501 + return _ellip_harm(h2, k2, n, p, s, signm, signn) + + +_ellip_harm_2_vec = np.vectorize(_ellipsoid, otypes='d') + + +def ellip_harm_2(h2, k2, n, p, s): + r""" + Ellipsoidal harmonic functions F^p_n(l) + + These are also known as Lame functions of the second kind, and are + solutions to the Lame equation: + + .. math:: (s^2 - h^2)(s^2 - k^2)F''(s) + + s(2s^2 - h^2 - k^2)F'(s) + (a - q s^2)F(s) = 0 + + where :math:`q = (n+1)n` and :math:`a` is the eigenvalue (not + returned) corresponding to the solutions. + + Parameters + ---------- + h2 : float + ``h**2`` + k2 : float + ``k**2``; should be larger than ``h**2`` + n : int + Degree. + p : int + Order, can range between [1,2n+1]. + s : float + Coordinate + + Returns + ------- + F : float + The harmonic :math:`F^p_n(s)` + + See Also + -------- + ellip_harm, ellip_normal + + Notes + ----- + Lame functions of the second kind are related to the functions of the first kind: + + .. math:: + + F^p_n(s)=(2n + 1)E^p_n(s)\int_{0}^{1/s} + \frac{du}{(E^p_n(1/u))^2\sqrt{(1-u^2k^2)(1-u^2h^2)}} + + .. versionadded:: 0.15.0 + + Examples + -------- + >>> from scipy.special import ellip_harm_2 + >>> w = ellip_harm_2(5,8,2,1,10) + >>> w + 0.00108056853382 + + """ + with np.errstate(all='ignore'): + return _ellip_harm_2_vec(h2, k2, n, p, s) + + +def _ellip_normal_vec(h2, k2, n, p): + return _ellipsoid_norm(h2, k2, n, p) + + +_ellip_normal_vec = np.vectorize(_ellip_normal_vec, otypes='d') + + +def ellip_normal(h2, k2, n, p): + r""" + Ellipsoidal harmonic normalization constants gamma^p_n + + The normalization constant is defined as + + .. math:: + + \gamma^p_n=8\int_{0}^{h}dx\int_{h}^{k}dy + \frac{(y^2-x^2)(E^p_n(y)E^p_n(x))^2}{\sqrt((k^2-y^2)(y^2-h^2)(h^2-x^2)(k^2-x^2)} + + Parameters + ---------- + h2 : float + ``h**2`` + k2 : float + ``k**2``; should be larger than ``h**2`` + n : int + Degree. + p : int + Order, can range between [1,2n+1]. + + Returns + ------- + gamma : float + The normalization constant :math:`\gamma^p_n` + + See Also + -------- + ellip_harm, ellip_harm_2 + + Notes + ----- + .. versionadded:: 0.15.0 + + Examples + -------- + >>> from scipy.special import ellip_normal + >>> w = ellip_normal(5,8,3,7) + >>> w + 1723.38796997 + + """ + with np.errstate(all='ignore'): + return _ellip_normal_vec(h2, k2, n, p) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_lambertw.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_lambertw.py new file mode 100644 index 0000000000000000000000000000000000000000..f758c7c21fdddc0ec1b84727d90c6de7f34a094e --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_lambertw.py @@ -0,0 +1,149 @@ +from ._ufuncs import _lambertw + +import numpy as np + + +def lambertw(z, k=0, tol=1e-8): + r""" + lambertw(z, k=0, tol=1e-8) + + Lambert W function. + + The Lambert W function `W(z)` is defined as the inverse function + of ``w * exp(w)``. In other words, the value of ``W(z)`` is + such that ``z = W(z) * exp(W(z))`` for any complex number + ``z``. + + The Lambert W function is a multivalued function with infinitely + many branches. Each branch gives a separate solution of the + equation ``z = w exp(w)``. Here, the branches are indexed by the + integer `k`. + + Parameters + ---------- + z : array_like + Input argument. + k : int, optional + Branch index. + tol : float, optional + Evaluation tolerance. + + Returns + ------- + w : array + `w` will have the same shape as `z`. + + See Also + -------- + wrightomega : the Wright Omega function + + Notes + ----- + All branches are supported by `lambertw`: + + * ``lambertw(z)`` gives the principal solution (branch 0) + * ``lambertw(z, k)`` gives the solution on branch `k` + + The Lambert W function has two partially real branches: the + principal branch (`k = 0`) is real for real ``z > -1/e``, and the + ``k = -1`` branch is real for ``-1/e < z < 0``. All branches except + ``k = 0`` have a logarithmic singularity at ``z = 0``. + + **Possible issues** + + The evaluation can become inaccurate very close to the branch point + at ``-1/e``. In some corner cases, `lambertw` might currently + fail to converge, or can end up on the wrong branch. + + **Algorithm** + + Halley's iteration is used to invert ``w * exp(w)``, using a first-order + asymptotic approximation (O(log(w)) or `O(w)`) as the initial estimate. + + The definition, implementation and choice of branches is based on [2]_. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Lambert_W_function + .. [2] Corless et al, "On the Lambert W function", Adv. Comp. Math. 5 + (1996) 329-359. + https://cs.uwaterloo.ca/research/tr/1993/03/W.pdf + + Examples + -------- + The Lambert W function is the inverse of ``w exp(w)``: + + >>> import numpy as np + >>> from scipy.special import lambertw + >>> w = lambertw(1) + >>> w + (0.56714329040978384+0j) + >>> w * np.exp(w) + (1.0+0j) + + Any branch gives a valid inverse: + + >>> w = lambertw(1, k=3) + >>> w + (-2.8535817554090377+17.113535539412148j) + >>> w*np.exp(w) + (1.0000000000000002+1.609823385706477e-15j) + + **Applications to equation-solving** + + The Lambert W function may be used to solve various kinds of + equations. We give two examples here. + + First, the function can be used to solve implicit equations of the + form + + :math:`x = a + b e^{c x}` + + for :math:`x`. We assume :math:`c` is not zero. After a little + algebra, the equation may be written + + :math:`z e^z = -b c e^{a c}` + + where :math:`z = c (a - x)`. :math:`z` may then be expressed using + the Lambert W function + + :math:`z = W(-b c e^{a c})` + + giving + + :math:`x = a - W(-b c e^{a c})/c` + + For example, + + >>> a = 3 + >>> b = 2 + >>> c = -0.5 + + The solution to :math:`x = a + b e^{c x}` is: + + >>> x = a - lambertw(-b*c*np.exp(a*c))/c + >>> x + (3.3707498368978794+0j) + + Verify that it solves the equation: + + >>> a + b*np.exp(c*x) + (3.37074983689788+0j) + + The Lambert W function may also be used find the value of the infinite + power tower :math:`z^{z^{z^{\ldots}}}`: + + >>> def tower(z, n): + ... if n == 0: + ... return z + ... return z ** tower(z, n-1) + ... + >>> tower(0.5, 100) + 0.641185744504986 + >>> -lambertw(-np.log(0.5)) / np.log(0.5) + (0.64118574450498589+0j) + """ + # TODO: special expert should inspect this + # interception; better place to do it? + k = np.asarray(k, dtype=np.dtype("long")) + return _lambertw(z, k, tol) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_logsumexp.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_logsumexp.py new file mode 100644 index 0000000000000000000000000000000000000000..50b59102822d64b794c7fe995df8ab64f40c145a --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_logsumexp.py @@ -0,0 +1,308 @@ +import numpy as np +from scipy._lib._util import _asarray_validated + +__all__ = ["logsumexp", "softmax", "log_softmax"] + + +def logsumexp(a, axis=None, b=None, keepdims=False, return_sign=False): + """Compute the log of the sum of exponentials of input elements. + + Parameters + ---------- + a : array_like + Input array. + axis : None or int or tuple of ints, optional + Axis or axes over which the sum is taken. By default `axis` is None, + and all elements are summed. + + .. versionadded:: 0.11.0 + b : array-like, optional + Scaling factor for exp(`a`) must be of the same shape as `a` or + broadcastable to `a`. These values may be negative in order to + implement subtraction. + + .. versionadded:: 0.12.0 + keepdims : bool, optional + If this is set to True, the axes which are reduced are left in the + result as dimensions with size one. With this option, the result + will broadcast correctly against the original array. + + .. versionadded:: 0.15.0 + return_sign : bool, optional + If this is set to True, the result will be a pair containing sign + information; if False, results that are negative will be returned + as NaN. Default is False (no sign information). + + .. versionadded:: 0.16.0 + + Returns + ------- + res : ndarray + The result, ``np.log(np.sum(np.exp(a)))`` calculated in a numerically + more stable way. If `b` is given then ``np.log(np.sum(b*np.exp(a)))`` + is returned. If ``return_sign`` is True, ``res`` contains the log of + the absolute value of the argument. + sgn : ndarray + If ``return_sign`` is True, this will be an array of floating-point + numbers matching res containing +1, 0, -1 (for real-valued inputs) + or a complex phase (for complex inputs). This gives the sign of the + argument of the logarithm in ``res``. + If ``return_sign`` is False, only one result is returned. + + See Also + -------- + numpy.logaddexp, numpy.logaddexp2 + + Notes + ----- + NumPy has a logaddexp function which is very similar to `logsumexp`, but + only handles two arguments. `logaddexp.reduce` is similar to this + function, but may be less stable. + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import logsumexp + >>> a = np.arange(10) + >>> logsumexp(a) + 9.4586297444267107 + >>> np.log(np.sum(np.exp(a))) + 9.4586297444267107 + + With weights + + >>> a = np.arange(10) + >>> b = np.arange(10, 0, -1) + >>> logsumexp(a, b=b) + 9.9170178533034665 + >>> np.log(np.sum(b*np.exp(a))) + 9.9170178533034647 + + Returning a sign flag + + >>> logsumexp([1,2],b=[1,-1],return_sign=True) + (1.5413248546129181, -1.0) + + Notice that `logsumexp` does not directly support masked arrays. To use it + on a masked array, convert the mask into zero weights: + + >>> a = np.ma.array([np.log(2), 2, np.log(3)], + ... mask=[False, True, False]) + >>> b = (~a.mask).astype(int) + >>> logsumexp(a.data, b=b), np.log(5) + 1.6094379124341005, 1.6094379124341005 + + """ + a = _asarray_validated(a, check_finite=False) + if b is not None: + a, b = np.broadcast_arrays(a, b) + if np.any(b == 0): + a = a + 0. # promote to at least float + a[b == 0] = -np.inf + + # Scale by real part for complex inputs, because this affects + # the magnitude of the exponential. + initial_value = -np.inf if np.size(a) == 0 else None + a_max = np.amax(a.real, axis=axis, keepdims=True, initial=initial_value) + + if a_max.ndim > 0: + a_max[~np.isfinite(a_max)] = 0 + elif not np.isfinite(a_max): + a_max = 0 + + if b is not None: + b = np.asarray(b) + tmp = b * np.exp(a - a_max) + else: + tmp = np.exp(a - a_max) + + # suppress warnings about log of zero + with np.errstate(divide='ignore'): + s = np.sum(tmp, axis=axis, keepdims=keepdims) + if return_sign: + # For complex, use the numpy>=2.0 convention for sign. + if np.issubdtype(s.dtype, np.complexfloating): + sgn = s / np.where(s == 0, 1, abs(s)) + else: + sgn = np.sign(s) + s = abs(s) + out = np.log(s) + + if not keepdims: + a_max = np.squeeze(a_max, axis=axis) + out += a_max + + if return_sign: + return out, sgn + else: + return out + + +def softmax(x, axis=None): + r"""Compute the softmax function. + + The softmax function transforms each element of a collection by + computing the exponential of each element divided by the sum of the + exponentials of all the elements. That is, if `x` is a one-dimensional + numpy array:: + + softmax(x) = np.exp(x)/sum(np.exp(x)) + + Parameters + ---------- + x : array_like + Input array. + axis : int or tuple of ints, optional + Axis to compute values along. Default is None and softmax will be + computed over the entire array `x`. + + Returns + ------- + s : ndarray + An array the same shape as `x`. The result will sum to 1 along the + specified axis. + + Notes + ----- + The formula for the softmax function :math:`\sigma(x)` for a vector + :math:`x = \{x_0, x_1, ..., x_{n-1}\}` is + + .. math:: \sigma(x)_j = \frac{e^{x_j}}{\sum_k e^{x_k}} + + The `softmax` function is the gradient of `logsumexp`. + + The implementation uses shifting to avoid overflow. See [1]_ for more + details. + + .. versionadded:: 1.2.0 + + References + ---------- + .. [1] P. Blanchard, D.J. Higham, N.J. Higham, "Accurately computing the + log-sum-exp and softmax functions", IMA Journal of Numerical Analysis, + Vol.41(4), :doi:`10.1093/imanum/draa038`. + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import softmax + >>> np.set_printoptions(precision=5) + + >>> x = np.array([[1, 0.5, 0.2, 3], + ... [1, -1, 7, 3], + ... [2, 12, 13, 3]]) + ... + + Compute the softmax transformation over the entire array. + + >>> m = softmax(x) + >>> m + array([[ 4.48309e-06, 2.71913e-06, 2.01438e-06, 3.31258e-05], + [ 4.48309e-06, 6.06720e-07, 1.80861e-03, 3.31258e-05], + [ 1.21863e-05, 2.68421e-01, 7.29644e-01, 3.31258e-05]]) + + >>> m.sum() + 1.0 + + Compute the softmax transformation along the first axis (i.e., the + columns). + + >>> m = softmax(x, axis=0) + + >>> m + array([[ 2.11942e-01, 1.01300e-05, 2.75394e-06, 3.33333e-01], + [ 2.11942e-01, 2.26030e-06, 2.47262e-03, 3.33333e-01], + [ 5.76117e-01, 9.99988e-01, 9.97525e-01, 3.33333e-01]]) + + >>> m.sum(axis=0) + array([ 1., 1., 1., 1.]) + + Compute the softmax transformation along the second axis (i.e., the rows). + + >>> m = softmax(x, axis=1) + >>> m + array([[ 1.05877e-01, 6.42177e-02, 4.75736e-02, 7.82332e-01], + [ 2.42746e-03, 3.28521e-04, 9.79307e-01, 1.79366e-02], + [ 1.22094e-05, 2.68929e-01, 7.31025e-01, 3.31885e-05]]) + + >>> m.sum(axis=1) + array([ 1., 1., 1.]) + + """ + x = _asarray_validated(x, check_finite=False) + x_max = np.amax(x, axis=axis, keepdims=True) + exp_x_shifted = np.exp(x - x_max) + return exp_x_shifted / np.sum(exp_x_shifted, axis=axis, keepdims=True) + + +def log_softmax(x, axis=None): + r"""Compute the logarithm of the softmax function. + + In principle:: + + log_softmax(x) = log(softmax(x)) + + but using a more accurate implementation. + + Parameters + ---------- + x : array_like + Input array. + axis : int or tuple of ints, optional + Axis to compute values along. Default is None and softmax will be + computed over the entire array `x`. + + Returns + ------- + s : ndarray or scalar + An array with the same shape as `x`. Exponential of the result will + sum to 1 along the specified axis. If `x` is a scalar, a scalar is + returned. + + Notes + ----- + `log_softmax` is more accurate than ``np.log(softmax(x))`` with inputs that + make `softmax` saturate (see examples below). + + .. versionadded:: 1.5.0 + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import log_softmax + >>> from scipy.special import softmax + >>> np.set_printoptions(precision=5) + + >>> x = np.array([1000.0, 1.0]) + + >>> y = log_softmax(x) + >>> y + array([ 0., -999.]) + + >>> with np.errstate(divide='ignore'): + ... y = np.log(softmax(x)) + ... + >>> y + array([ 0., -inf]) + + """ + + x = _asarray_validated(x, check_finite=False) + + x_max = np.amax(x, axis=axis, keepdims=True) + + if x_max.ndim > 0: + x_max[~np.isfinite(x_max)] = 0 + elif not np.isfinite(x_max): + x_max = 0 + + tmp = x - x_max + exp_tmp = np.exp(tmp) + + # suppress warnings about log of zero + with np.errstate(divide='ignore'): + s = np.sum(exp_tmp, axis=axis, keepdims=True) + out = np.log(s) + + out = tmp - out + return out diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_mptestutils.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_mptestutils.py new file mode 100644 index 0000000000000000000000000000000000000000..f7b88f6b244bc5ff95af04a241f1959030df2568 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_mptestutils.py @@ -0,0 +1,453 @@ +import os +import sys +import time +from itertools import zip_longest + +import numpy as np +from numpy.testing import assert_ +import pytest + +from scipy.special._testutils import assert_func_equal + +try: + import mpmath +except ImportError: + pass + + +# ------------------------------------------------------------------------------ +# Machinery for systematic tests with mpmath +# ------------------------------------------------------------------------------ + +class Arg: + """Generate a set of numbers on the real axis, concentrating on + 'interesting' regions and covering all orders of magnitude. + + """ + + def __init__(self, a=-np.inf, b=np.inf, inclusive_a=True, inclusive_b=True): + if a > b: + raise ValueError("a should be less than or equal to b") + if a == -np.inf: + a = -0.5*np.finfo(float).max + if b == np.inf: + b = 0.5*np.finfo(float).max + self.a, self.b = a, b + + self.inclusive_a, self.inclusive_b = inclusive_a, inclusive_b + + def _positive_values(self, a, b, n): + if a < 0: + raise ValueError("a should be positive") + + # Try to put half of the points into a linspace between a and + # 10 the other half in a logspace. + if n % 2 == 0: + nlogpts = n//2 + nlinpts = nlogpts + else: + nlogpts = n//2 + nlinpts = nlogpts + 1 + + if a >= 10: + # Outside of linspace range; just return a logspace. + pts = np.logspace(np.log10(a), np.log10(b), n) + elif a > 0 and b < 10: + # Outside of logspace range; just return a linspace + pts = np.linspace(a, b, n) + elif a > 0: + # Linspace between a and 10 and a logspace between 10 and + # b. + linpts = np.linspace(a, 10, nlinpts, endpoint=False) + logpts = np.logspace(1, np.log10(b), nlogpts) + pts = np.hstack((linpts, logpts)) + elif a == 0 and b <= 10: + # Linspace between 0 and b and a logspace between 0 and + # the smallest positive point of the linspace + linpts = np.linspace(0, b, nlinpts) + if linpts.size > 1: + right = np.log10(linpts[1]) + else: + right = -30 + logpts = np.logspace(-30, right, nlogpts, endpoint=False) + pts = np.hstack((logpts, linpts)) + else: + # Linspace between 0 and 10, logspace between 0 and the + # smallest positive point of the linspace, and a logspace + # between 10 and b. + if nlogpts % 2 == 0: + nlogpts1 = nlogpts//2 + nlogpts2 = nlogpts1 + else: + nlogpts1 = nlogpts//2 + nlogpts2 = nlogpts1 + 1 + linpts = np.linspace(0, 10, nlinpts, endpoint=False) + if linpts.size > 1: + right = np.log10(linpts[1]) + else: + right = -30 + logpts1 = np.logspace(-30, right, nlogpts1, endpoint=False) + logpts2 = np.logspace(1, np.log10(b), nlogpts2) + pts = np.hstack((logpts1, linpts, logpts2)) + + return np.sort(pts) + + def values(self, n): + """Return an array containing n numbers.""" + a, b = self.a, self.b + if a == b: + return np.zeros(n) + + if not self.inclusive_a: + n += 1 + if not self.inclusive_b: + n += 1 + + if n % 2 == 0: + n1 = n//2 + n2 = n1 + else: + n1 = n//2 + n2 = n1 + 1 + + if a >= 0: + pospts = self._positive_values(a, b, n) + negpts = [] + elif b <= 0: + pospts = [] + negpts = -self._positive_values(-b, -a, n) + else: + pospts = self._positive_values(0, b, n1) + negpts = -self._positive_values(0, -a, n2 + 1) + # Don't want to get zero twice + negpts = negpts[1:] + pts = np.hstack((negpts[::-1], pospts)) + + if not self.inclusive_a: + pts = pts[1:] + if not self.inclusive_b: + pts = pts[:-1] + return pts + + +class FixedArg: + def __init__(self, values): + self._values = np.asarray(values) + + def values(self, n): + return self._values + + +class ComplexArg: + def __init__(self, a=complex(-np.inf, -np.inf), b=complex(np.inf, np.inf)): + self.real = Arg(a.real, b.real) + self.imag = Arg(a.imag, b.imag) + + def values(self, n): + m = int(np.floor(np.sqrt(n))) + x = self.real.values(m) + y = self.imag.values(m + 1) + return (x[:,None] + 1j*y[None,:]).ravel() + + +class IntArg: + def __init__(self, a=-1000, b=1000): + self.a = a + self.b = b + + def values(self, n): + v1 = Arg(self.a, self.b).values(max(1 + n//2, n-5)).astype(int) + v2 = np.arange(-5, 5) + v = np.unique(np.r_[v1, v2]) + v = v[(v >= self.a) & (v < self.b)] + return v + + +def get_args(argspec, n): + if isinstance(argspec, np.ndarray): + args = argspec.copy() + else: + nargs = len(argspec) + ms = np.asarray( + [1.5 if isinstance(spec, ComplexArg) else 1.0 for spec in argspec] + ) + ms = (n**(ms/sum(ms))).astype(int) + 1 + + args = [spec.values(m) for spec, m in zip(argspec, ms)] + args = np.array(np.broadcast_arrays(*np.ix_(*args))).reshape(nargs, -1).T + + return args + + +class MpmathData: + def __init__(self, scipy_func, mpmath_func, arg_spec, name=None, + dps=None, prec=None, n=None, rtol=1e-7, atol=1e-300, + ignore_inf_sign=False, distinguish_nan_and_inf=True, + nan_ok=True, param_filter=None): + + # mpmath tests are really slow (see gh-6989). Use a small number of + # points by default, increase back to 5000 (old default) if XSLOW is + # set + if n is None: + try: + is_xslow = int(os.environ.get('SCIPY_XSLOW', '0')) + except ValueError: + is_xslow = False + + n = 5000 if is_xslow else 500 + + self.scipy_func = scipy_func + self.mpmath_func = mpmath_func + self.arg_spec = arg_spec + self.dps = dps + self.prec = prec + self.n = n + self.rtol = rtol + self.atol = atol + self.ignore_inf_sign = ignore_inf_sign + self.nan_ok = nan_ok + if isinstance(self.arg_spec, np.ndarray): + self.is_complex = np.issubdtype(self.arg_spec.dtype, np.complexfloating) + else: + self.is_complex = any( + [isinstance(arg, ComplexArg) for arg in self.arg_spec] + ) + self.ignore_inf_sign = ignore_inf_sign + self.distinguish_nan_and_inf = distinguish_nan_and_inf + if not name or name == '': + name = getattr(scipy_func, '__name__', None) + if not name or name == '': + name = getattr(mpmath_func, '__name__', None) + self.name = name + self.param_filter = param_filter + + def check(self): + np.random.seed(1234) + + # Generate values for the arguments + argarr = get_args(self.arg_spec, self.n) + + # Check + old_dps, old_prec = mpmath.mp.dps, mpmath.mp.prec + try: + if self.dps is not None: + dps_list = [self.dps] + else: + dps_list = [20] + if self.prec is not None: + mpmath.mp.prec = self.prec + + # Proper casting of mpmath input and output types. Using + # native mpmath types as inputs gives improved precision + # in some cases. + if np.issubdtype(argarr.dtype, np.complexfloating): + pytype = mpc2complex + + def mptype(x): + return mpmath.mpc(complex(x)) + else: + def mptype(x): + return mpmath.mpf(float(x)) + + def pytype(x): + if abs(x.imag) > 1e-16*(1 + abs(x.real)): + return np.nan + else: + return mpf2float(x.real) + + # Try out different dps until one (or none) works + for j, dps in enumerate(dps_list): + mpmath.mp.dps = dps + + try: + assert_func_equal( + self.scipy_func, + lambda *a: pytype(self.mpmath_func(*map(mptype, a))), + argarr, + vectorized=False, + rtol=self.rtol, + atol=self.atol, + ignore_inf_sign=self.ignore_inf_sign, + distinguish_nan_and_inf=self.distinguish_nan_and_inf, + nan_ok=self.nan_ok, + param_filter=self.param_filter + ) + break + except AssertionError: + if j >= len(dps_list)-1: + # reraise the Exception + tp, value, tb = sys.exc_info() + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value + finally: + mpmath.mp.dps, mpmath.mp.prec = old_dps, old_prec + + def __repr__(self): + if self.is_complex: + return f"" + else: + return f"" + + +def assert_mpmath_equal(*a, **kw): + d = MpmathData(*a, **kw) + d.check() + + +def nonfunctional_tooslow(func): + return pytest.mark.skip( + reason=" Test not yet functional (too slow), needs more work." + )(func) + + +# ------------------------------------------------------------------------------ +# Tools for dealing with mpmath quirks +# ------------------------------------------------------------------------------ + +def mpf2float(x): + """ + Convert an mpf to the nearest floating point number. Just using + float directly doesn't work because of results like this: + + with mp.workdps(50): + float(mpf("0.99999999999999999")) = 0.9999999999999999 + + """ + return float(mpmath.nstr(x, 17, min_fixed=0, max_fixed=0)) + + +def mpc2complex(x): + return complex(mpf2float(x.real), mpf2float(x.imag)) + + +def trace_args(func): + def tofloat(x): + if isinstance(x, mpmath.mpc): + return complex(x) + else: + return float(x) + + def wrap(*a, **kw): + sys.stderr.write(f"{tuple(map(tofloat, a))!r}: ") + sys.stderr.flush() + try: + r = func(*a, **kw) + sys.stderr.write("-> %r" % r) + finally: + sys.stderr.write("\n") + sys.stderr.flush() + return r + return wrap + + +try: + import signal + POSIX = ('setitimer' in dir(signal)) +except ImportError: + POSIX = False + + +class TimeoutError(Exception): + pass + + +def time_limited(timeout=0.5, return_val=np.nan, use_sigalrm=True): + """ + Decorator for setting a timeout for pure-Python functions. + + If the function does not return within `timeout` seconds, the + value `return_val` is returned instead. + + On POSIX this uses SIGALRM by default. On non-POSIX, settrace is + used. Do not use this with threads: the SIGALRM implementation + does probably not work well. The settrace implementation only + traces the current thread. + + The settrace implementation slows down execution speed. Slowdown + by a factor around 10 is probably typical. + """ + if POSIX and use_sigalrm: + def sigalrm_handler(signum, frame): + raise TimeoutError() + + def deco(func): + def wrap(*a, **kw): + old_handler = signal.signal(signal.SIGALRM, sigalrm_handler) + signal.setitimer(signal.ITIMER_REAL, timeout) + try: + return func(*a, **kw) + except TimeoutError: + return return_val + finally: + signal.setitimer(signal.ITIMER_REAL, 0) + signal.signal(signal.SIGALRM, old_handler) + return wrap + else: + def deco(func): + def wrap(*a, **kw): + start_time = time.time() + + def trace(frame, event, arg): + if time.time() - start_time > timeout: + raise TimeoutError() + return trace + sys.settrace(trace) + try: + return func(*a, **kw) + except TimeoutError: + sys.settrace(None) + return return_val + finally: + sys.settrace(None) + return wrap + return deco + + +def exception_to_nan(func): + """Decorate function to return nan if it raises an exception""" + def wrap(*a, **kw): + try: + return func(*a, **kw) + except Exception: + return np.nan + return wrap + + +def inf_to_nan(func): + """Decorate function to return nan if it returns inf""" + def wrap(*a, **kw): + v = func(*a, **kw) + if not np.isfinite(v): + return np.nan + return v + return wrap + + +def mp_assert_allclose(res, std, atol=0, rtol=1e-17): + """ + Compare lists of mpmath.mpf's or mpmath.mpc's directly so that it + can be done to higher precision than double. + """ + failures = [] + for k, (resval, stdval) in enumerate(zip_longest(res, std)): + if resval is None or stdval is None: + raise ValueError('Lengths of inputs res and std are not equal.') + if mpmath.fabs(resval - stdval) > atol + rtol*mpmath.fabs(stdval): + failures.append((k, resval, stdval)) + + nfail = len(failures) + if nfail > 0: + ndigits = int(abs(np.log10(rtol))) + msg = [""] + msg.append(f"Bad results ({nfail} out of {k + 1}) for the following points:") + for k, resval, stdval in failures: + resrep = mpmath.nstr(resval, ndigits, min_fixed=0, max_fixed=0) + stdrep = mpmath.nstr(stdval, ndigits, min_fixed=0, max_fixed=0) + if stdval == 0: + rdiff = "inf" + else: + rdiff = mpmath.fabs((resval - stdval)/stdval) + rdiff = mpmath.nstr(rdiff, 3) + msg.append(f"{k}: {resrep} != {stdrep} (rdiff {rdiff})") + assert_(False, "\n".join(msg)) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_sf_error.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_sf_error.py new file mode 100644 index 0000000000000000000000000000000000000000..e1edc9800759dfda9e49bde1becc775a64bce958 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_sf_error.py @@ -0,0 +1,15 @@ +"""Warnings and Exceptions that can be raised by special functions.""" +import warnings + + +class SpecialFunctionWarning(Warning): + """Warning that can be emitted by special functions.""" + pass + + +warnings.simplefilter("always", category=SpecialFunctionWarning) + + +class SpecialFunctionError(Exception): + """Exception that can be raised by special functions.""" + pass diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_spfun_stats.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_spfun_stats.py new file mode 100644 index 0000000000000000000000000000000000000000..2525eceb47ec2b20b45ca693e19e741f4a666597 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_spfun_stats.py @@ -0,0 +1,106 @@ +# Last Change: Sat Mar 21 02:00 PM 2009 J + +# Copyright (c) 2001, 2002 Enthought, Inc. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# a. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# b. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# c. Neither the name of the Enthought nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR +# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +# DAMAGE. + +"""Some more special functions which may be useful for multivariate statistical +analysis.""" + +import numpy as np +from scipy.special import gammaln as loggam + + +__all__ = ['multigammaln'] + + +def multigammaln(a, d): + r"""Returns the log of multivariate gamma, also sometimes called the + generalized gamma. + + Parameters + ---------- + a : ndarray + The multivariate gamma is computed for each item of `a`. + d : int + The dimension of the space of integration. + + Returns + ------- + res : ndarray + The values of the log multivariate gamma at the given points `a`. + + Notes + ----- + The formal definition of the multivariate gamma of dimension d for a real + `a` is + + .. math:: + + \Gamma_d(a) = \int_{A>0} e^{-tr(A)} |A|^{a - (d+1)/2} dA + + with the condition :math:`a > (d-1)/2`, and :math:`A > 0` being the set of + all the positive definite matrices of dimension `d`. Note that `a` is a + scalar: the integrand only is multivariate, the argument is not (the + function is defined over a subset of the real set). + + This can be proven to be equal to the much friendlier equation + + .. math:: + + \Gamma_d(a) = \pi^{d(d-1)/4} \prod_{i=1}^{d} \Gamma(a - (i-1)/2). + + References + ---------- + R. J. Muirhead, Aspects of multivariate statistical theory (Wiley Series in + probability and mathematical statistics). + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import multigammaln, gammaln + >>> a = 23.5 + >>> d = 10 + >>> multigammaln(a, d) + 454.1488605074416 + + Verify that the result agrees with the logarithm of the equation + shown above: + + >>> d*(d-1)/4*np.log(np.pi) + gammaln(a - 0.5*np.arange(0, d)).sum() + 454.1488605074416 + """ + a = np.asarray(a) + if not np.isscalar(d) or (np.floor(d) != d): + raise ValueError("d should be a positive integer (dimension)") + if np.any(a <= 0.5 * (d - 1)): + raise ValueError(f"condition a ({a:f}) > 0.5 * (d-1) ({0.5 * (d-1):f}) not met") + + res = (d * (d-1) * 0.25) * np.log(np.pi) + res += np.sum(loggam([(a - (j - 1.)/2) for j in range(1, d+1)]), axis=0) + return res diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_spherical_bessel.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_spherical_bessel.py new file mode 100644 index 0000000000000000000000000000000000000000..1f4feb3fa4a2dfaea75a8e8a37ae3b87565db1bc --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_spherical_bessel.py @@ -0,0 +1,354 @@ +import numpy as np +from ._ufuncs import (_spherical_jn, _spherical_yn, _spherical_in, + _spherical_kn, _spherical_jn_d, _spherical_yn_d, + _spherical_in_d, _spherical_kn_d) + +def spherical_jn(n, z, derivative=False): + r"""Spherical Bessel function of the first kind or its derivative. + + Defined as [1]_, + + .. math:: j_n(z) = \sqrt{\frac{\pi}{2z}} J_{n + 1/2}(z), + + where :math:`J_n` is the Bessel function of the first kind. + + Parameters + ---------- + n : int, array_like + Order of the Bessel function (n >= 0). + z : complex or float, array_like + Argument of the Bessel function. + derivative : bool, optional + If True, the value of the derivative (rather than the function + itself) is returned. + + Returns + ------- + jn : ndarray + + Notes + ----- + For real arguments greater than the order, the function is computed + using the ascending recurrence [2]_. For small real or complex + arguments, the definitional relation to the cylindrical Bessel function + of the first kind is used. + + The derivative is computed using the relations [3]_, + + .. math:: + j_n'(z) = j_{n-1}(z) - \frac{n + 1}{z} j_n(z). + + j_0'(z) = -j_1(z) + + + .. versionadded:: 0.18.0 + + References + ---------- + .. [1] https://dlmf.nist.gov/10.47.E3 + .. [2] https://dlmf.nist.gov/10.51.E1 + .. [3] https://dlmf.nist.gov/10.51.E2 + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + Examples + -------- + The spherical Bessel functions of the first kind :math:`j_n` accept + both real and complex second argument. They can return a complex type: + + >>> from scipy.special import spherical_jn + >>> spherical_jn(0, 3+5j) + (-9.878987731663194-8.021894345786002j) + >>> type(spherical_jn(0, 3+5j)) + + + We can verify the relation for the derivative from the Notes + for :math:`n=3` in the interval :math:`[1, 2]`: + + >>> import numpy as np + >>> x = np.arange(1.0, 2.0, 0.01) + >>> np.allclose(spherical_jn(3, x, True), + ... spherical_jn(2, x) - 4/x * spherical_jn(3, x)) + True + + The first few :math:`j_n` with real argument: + + >>> import matplotlib.pyplot as plt + >>> x = np.arange(0.0, 10.0, 0.01) + >>> fig, ax = plt.subplots() + >>> ax.set_ylim(-0.5, 1.5) + >>> ax.set_title(r'Spherical Bessel functions $j_n$') + >>> for n in np.arange(0, 4): + ... ax.plot(x, spherical_jn(n, x), label=rf'$j_{n}$') + >>> plt.legend(loc='best') + >>> plt.show() + + """ + n = np.asarray(n, dtype=np.dtype("long")) + if derivative: + return _spherical_jn_d(n, z) + else: + return _spherical_jn(n, z) + + +def spherical_yn(n, z, derivative=False): + r"""Spherical Bessel function of the second kind or its derivative. + + Defined as [1]_, + + .. math:: y_n(z) = \sqrt{\frac{\pi}{2z}} Y_{n + 1/2}(z), + + where :math:`Y_n` is the Bessel function of the second kind. + + Parameters + ---------- + n : int, array_like + Order of the Bessel function (n >= 0). + z : complex or float, array_like + Argument of the Bessel function. + derivative : bool, optional + If True, the value of the derivative (rather than the function + itself) is returned. + + Returns + ------- + yn : ndarray + + Notes + ----- + For real arguments, the function is computed using the ascending + recurrence [2]_. For complex arguments, the definitional relation to + the cylindrical Bessel function of the second kind is used. + + The derivative is computed using the relations [3]_, + + .. math:: + y_n' = y_{n-1} - \frac{n + 1}{z} y_n. + + y_0' = -y_1 + + + .. versionadded:: 0.18.0 + + References + ---------- + .. [1] https://dlmf.nist.gov/10.47.E4 + .. [2] https://dlmf.nist.gov/10.51.E1 + .. [3] https://dlmf.nist.gov/10.51.E2 + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + Examples + -------- + The spherical Bessel functions of the second kind :math:`y_n` accept + both real and complex second argument. They can return a complex type: + + >>> from scipy.special import spherical_yn + >>> spherical_yn(0, 3+5j) + (8.022343088587197-9.880052589376795j) + >>> type(spherical_yn(0, 3+5j)) + + + We can verify the relation for the derivative from the Notes + for :math:`n=3` in the interval :math:`[1, 2]`: + + >>> import numpy as np + >>> x = np.arange(1.0, 2.0, 0.01) + >>> np.allclose(spherical_yn(3, x, True), + ... spherical_yn(2, x) - 4/x * spherical_yn(3, x)) + True + + The first few :math:`y_n` with real argument: + + >>> import matplotlib.pyplot as plt + >>> x = np.arange(0.0, 10.0, 0.01) + >>> fig, ax = plt.subplots() + >>> ax.set_ylim(-2.0, 1.0) + >>> ax.set_title(r'Spherical Bessel functions $y_n$') + >>> for n in np.arange(0, 4): + ... ax.plot(x, spherical_yn(n, x), label=rf'$y_{n}$') + >>> plt.legend(loc='best') + >>> plt.show() + + """ + n = np.asarray(n, dtype=np.dtype("long")) + if derivative: + return _spherical_yn_d(n, z) + else: + return _spherical_yn(n, z) + + +def spherical_in(n, z, derivative=False): + r"""Modified spherical Bessel function of the first kind or its derivative. + + Defined as [1]_, + + .. math:: i_n(z) = \sqrt{\frac{\pi}{2z}} I_{n + 1/2}(z), + + where :math:`I_n` is the modified Bessel function of the first kind. + + Parameters + ---------- + n : int, array_like + Order of the Bessel function (n >= 0). + z : complex or float, array_like + Argument of the Bessel function. + derivative : bool, optional + If True, the value of the derivative (rather than the function + itself) is returned. + + Returns + ------- + in : ndarray + + Notes + ----- + The function is computed using its definitional relation to the + modified cylindrical Bessel function of the first kind. + + The derivative is computed using the relations [2]_, + + .. math:: + i_n' = i_{n-1} - \frac{n + 1}{z} i_n. + + i_1' = i_0 + + + .. versionadded:: 0.18.0 + + References + ---------- + .. [1] https://dlmf.nist.gov/10.47.E7 + .. [2] https://dlmf.nist.gov/10.51.E5 + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + Examples + -------- + The modified spherical Bessel functions of the first kind :math:`i_n` + accept both real and complex second argument. + They can return a complex type: + + >>> from scipy.special import spherical_in + >>> spherical_in(0, 3+5j) + (-1.1689867793369182-1.2697305267234222j) + >>> type(spherical_in(0, 3+5j)) + + + We can verify the relation for the derivative from the Notes + for :math:`n=3` in the interval :math:`[1, 2]`: + + >>> import numpy as np + >>> x = np.arange(1.0, 2.0, 0.01) + >>> np.allclose(spherical_in(3, x, True), + ... spherical_in(2, x) - 4/x * spherical_in(3, x)) + True + + The first few :math:`i_n` with real argument: + + >>> import matplotlib.pyplot as plt + >>> x = np.arange(0.0, 6.0, 0.01) + >>> fig, ax = plt.subplots() + >>> ax.set_ylim(-0.5, 5.0) + >>> ax.set_title(r'Modified spherical Bessel functions $i_n$') + >>> for n in np.arange(0, 4): + ... ax.plot(x, spherical_in(n, x), label=rf'$i_{n}$') + >>> plt.legend(loc='best') + >>> plt.show() + + """ + n = np.asarray(n, dtype=np.dtype("long")) + if derivative: + return _spherical_in_d(n, z) + else: + return _spherical_in(n, z) + + +def spherical_kn(n, z, derivative=False): + r"""Modified spherical Bessel function of the second kind or its derivative. + + Defined as [1]_, + + .. math:: k_n(z) = \sqrt{\frac{\pi}{2z}} K_{n + 1/2}(z), + + where :math:`K_n` is the modified Bessel function of the second kind. + + Parameters + ---------- + n : int, array_like + Order of the Bessel function (n >= 0). + z : complex or float, array_like + Argument of the Bessel function. + derivative : bool, optional + If True, the value of the derivative (rather than the function + itself) is returned. + + Returns + ------- + kn : ndarray + + Notes + ----- + The function is computed using its definitional relation to the + modified cylindrical Bessel function of the second kind. + + The derivative is computed using the relations [2]_, + + .. math:: + k_n' = -k_{n-1} - \frac{n + 1}{z} k_n. + + k_0' = -k_1 + + + .. versionadded:: 0.18.0 + + References + ---------- + .. [1] https://dlmf.nist.gov/10.47.E9 + .. [2] https://dlmf.nist.gov/10.51.E5 + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + Examples + -------- + The modified spherical Bessel functions of the second kind :math:`k_n` + accept both real and complex second argument. + They can return a complex type: + + >>> from scipy.special import spherical_kn + >>> spherical_kn(0, 3+5j) + (0.012985785614001561+0.003354691603137546j) + >>> type(spherical_kn(0, 3+5j)) + + + We can verify the relation for the derivative from the Notes + for :math:`n=3` in the interval :math:`[1, 2]`: + + >>> import numpy as np + >>> x = np.arange(1.0, 2.0, 0.01) + >>> np.allclose(spherical_kn(3, x, True), + ... - 4/x * spherical_kn(3, x) - spherical_kn(2, x)) + True + + The first few :math:`k_n` with real argument: + + >>> import matplotlib.pyplot as plt + >>> x = np.arange(0.0, 4.0, 0.01) + >>> fig, ax = plt.subplots() + >>> ax.set_ylim(0.0, 5.0) + >>> ax.set_title(r'Modified spherical Bessel functions $k_n$') + >>> for n in np.arange(0, 4): + ... ax.plot(x, spherical_kn(n, x), label=rf'$k_{n}$') + >>> plt.legend(loc='best') + >>> plt.show() + + """ + n = np.asarray(n, dtype=np.dtype("long")) + if derivative: + return _spherical_kn_d(n, z) + else: + return _spherical_kn(n, z) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_support_alternative_backends.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_support_alternative_backends.py new file mode 100644 index 0000000000000000000000000000000000000000..1be09d29cc2f3b9283c4cf851133d5c74fcbcb17 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_support_alternative_backends.py @@ -0,0 +1,148 @@ +import os +import sys +import functools + +import numpy as np +import scipy +from scipy._lib._array_api import ( + array_namespace, scipy_namespace_for, is_numpy, is_torch +) +from . import _ufuncs +# These don't really need to be imported, but otherwise IDEs might not realize +# that these are defined in this file / report an error in __init__.py +from ._ufuncs import ( + log_ndtr, ndtr, ndtri, erf, erfc, i0, i0e, i1, i1e, gammaln, # noqa: F401 + gammainc, gammaincc, logit, expit, entr, rel_entr, xlogy, # noqa: F401 + chdtrc # noqa: F401 +) + +_SCIPY_ARRAY_API = os.environ.get("SCIPY_ARRAY_API", False) +array_api_compat_prefix = "scipy._lib.array_api_compat" + + +def get_array_special_func(f_name, xp, n_array_args): + spx = scipy_namespace_for(xp) + f = None + if is_numpy(xp): + f = getattr(_ufuncs, f_name, None) + elif is_torch(xp): + f = getattr(xp.special, f_name, None) + elif spx is not scipy: + f = getattr(spx.special, f_name, None) + + if f is not None: + return f + + # if generic array-API implementation is available, use that; + # otherwise, fall back to NumPy/SciPy + if f_name in _generic_implementations: + _f = _generic_implementations[f_name](xp=xp, spx=spx) + if _f is not None: + return _f + + _f = getattr(_ufuncs, f_name, None) + def f(*args, _f=_f, _xp=xp, **kwargs): + array_args = args[:n_array_args] + other_args = args[n_array_args:] + array_args = [np.asarray(arg) for arg in array_args] + out = _f(*array_args, *other_args, **kwargs) + return _xp.asarray(out) + + return f + + +def _get_shape_dtype(*args, xp): + args = xp.broadcast_arrays(*args) + shape = args[0].shape + dtype = xp.result_type(*args) + if xp.isdtype(dtype, 'integral'): + dtype = xp.float64 + args = [xp.asarray(arg, dtype=dtype) for arg in args] + return args, shape, dtype + + +def _rel_entr(xp, spx): + def __rel_entr(x, y, *, xp=xp): + args, shape, dtype = _get_shape_dtype(x, y, xp=xp) + x, y = args + res = xp.full(x.shape, xp.inf, dtype=dtype) + res[(x == 0) & (y >= 0)] = xp.asarray(0, dtype=dtype) + i = (x > 0) & (y > 0) + res[i] = x[i] * (xp.log(x[i]) - xp.log(y[i])) + return res + return __rel_entr + + +def _xlogy(xp, spx): + def __xlogy(x, y, *, xp=xp): + with np.errstate(divide='ignore', invalid='ignore'): + temp = x * xp.log(y) + return xp.where(x == 0., xp.asarray(0., dtype=temp.dtype), temp) + return __xlogy + + +def _chdtrc(xp, spx): + # The difference between this and just using `gammaincc` + # defined by `get_array_special_func` is that if `gammaincc` + # isn't found, we don't want to use the SciPy version; we'll + # return None here and use the SciPy version of `chdtrc`.. + gammaincc = getattr(spx, 'gammaincc', None) # noqa: F811 + if gammaincc is None and hasattr(xp, 'special'): + gammaincc = getattr(xp.special, 'gammaincc', None) + if gammaincc is None: + return None + + def __chdtrc(v, x): + res = xp.where(x >= 0, gammaincc(v/2, x/2), 1) + i_nan = ((x == 0) & (v == 0)) | xp.isnan(x) | xp.isnan(v) + res = xp.where(i_nan, xp.nan, res) + return res + return __chdtrc + + +_generic_implementations = {'rel_entr': _rel_entr, + 'xlogy': _xlogy, + 'chdtrc': _chdtrc} + + +# functools.wraps doesn't work because: +# 'numpy.ufunc' object has no attribute '__module__' +def support_alternative_backends(f_name, n_array_args): + func = getattr(_ufuncs, f_name) + + @functools.wraps(func) + def wrapped(*args, **kwargs): + xp = array_namespace(*args[:n_array_args]) + f = get_array_special_func(f_name, xp, n_array_args) + return f(*args, **kwargs) + + return wrapped + + +array_special_func_map = { + 'log_ndtr': 1, + 'ndtr': 1, + 'ndtri': 1, + 'erf': 1, + 'erfc': 1, + 'i0': 1, + 'i0e': 1, + 'i1': 1, + 'i1e': 1, + 'gammaln': 1, + 'gammainc': 2, + 'gammaincc': 2, + 'logit': 1, + 'expit': 1, + 'entr': 1, + 'rel_entr': 2, + 'xlogy': 2, + 'chdtrc': 2, +} + +for f_name, n_array_args in array_special_func_map.items(): + f = (support_alternative_backends(f_name, n_array_args) if _SCIPY_ARRAY_API + else getattr(_ufuncs, f_name)) + sys.modules[__name__].__dict__[f_name] = f + +__all__ = list(array_special_func_map) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_test_internal.pyi b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_test_internal.pyi new file mode 100644 index 0000000000000000000000000000000000000000..0e209e366f0b37415159083434a053545bc78fae --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_test_internal.pyi @@ -0,0 +1,9 @@ +import numpy as np + +def have_fenv() -> bool: ... +def random_double(size: int) -> np.float64: ... +def test_add_round(size: int, mode: str): ... + +def _dd_exp(xhi: float, xlo: float) -> tuple[float, float]: ... +def _dd_log(xhi: float, xlo: float) -> tuple[float, float]: ... +def _dd_expm1(xhi: float, xlo: float) -> tuple[float, float]: ... diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_testutils.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_testutils.py new file mode 100644 index 0000000000000000000000000000000000000000..68c1eb3611143b3d6a4b7c02ca492d3b5d03bbcc --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_testutils.py @@ -0,0 +1,321 @@ +import os +import functools +import operator +from scipy._lib import _pep440 + +import numpy as np +from numpy.testing import assert_ +import pytest + +import scipy.special as sc + +__all__ = ['with_special_errors', 'assert_func_equal', 'FuncData'] + + +#------------------------------------------------------------------------------ +# Check if a module is present to be used in tests +#------------------------------------------------------------------------------ + +class MissingModule: + def __init__(self, name): + self.name = name + + +def check_version(module, min_ver): + if type(module) == MissingModule: + return pytest.mark.skip(reason=f"{module.name} is not installed") + return pytest.mark.skipif( + _pep440.parse(module.__version__) < _pep440.Version(min_ver), + reason=f"{module.__name__} version >= {min_ver} required" + ) + + +#------------------------------------------------------------------------------ +# Enable convergence and loss of precision warnings -- turn off one by one +#------------------------------------------------------------------------------ + +def with_special_errors(func): + """ + Enable special function errors (such as underflow, overflow, + loss of precision, etc.) + """ + @functools.wraps(func) + def wrapper(*a, **kw): + with sc.errstate(all='raise'): + res = func(*a, **kw) + return res + return wrapper + + +#------------------------------------------------------------------------------ +# Comparing function values at many data points at once, with helpful +# error reports +#------------------------------------------------------------------------------ + +def assert_func_equal(func, results, points, rtol=None, atol=None, + param_filter=None, knownfailure=None, + vectorized=True, dtype=None, nan_ok=False, + ignore_inf_sign=False, distinguish_nan_and_inf=True): + if hasattr(points, 'next'): + # it's a generator + points = list(points) + + points = np.asarray(points) + if points.ndim == 1: + points = points[:,None] + nparams = points.shape[1] + + if hasattr(results, '__name__'): + # function + data = points + result_columns = None + result_func = results + else: + # dataset + data = np.c_[points, results] + result_columns = list(range(nparams, data.shape[1])) + result_func = None + + fdata = FuncData(func, data, list(range(nparams)), + result_columns=result_columns, result_func=result_func, + rtol=rtol, atol=atol, param_filter=param_filter, + knownfailure=knownfailure, nan_ok=nan_ok, vectorized=vectorized, + ignore_inf_sign=ignore_inf_sign, + distinguish_nan_and_inf=distinguish_nan_and_inf) + fdata.check() + + +class FuncData: + """ + Data set for checking a special function. + + Parameters + ---------- + func : function + Function to test + data : numpy array + columnar data to use for testing + param_columns : int or tuple of ints + Columns indices in which the parameters to `func` lie. + Can be imaginary integers to indicate that the parameter + should be cast to complex. + result_columns : int or tuple of ints, optional + Column indices for expected results from `func`. + result_func : callable, optional + Function to call to obtain results. + rtol : float, optional + Required relative tolerance. Default is 5*eps. + atol : float, optional + Required absolute tolerance. Default is 5*tiny. + param_filter : function, or tuple of functions/Nones, optional + Filter functions to exclude some parameter ranges. + If omitted, no filtering is done. + knownfailure : str, optional + Known failure error message to raise when the test is run. + If omitted, no exception is raised. + nan_ok : bool, optional + If nan is always an accepted result. + vectorized : bool, optional + Whether all functions passed in are vectorized. + ignore_inf_sign : bool, optional + Whether to ignore signs of infinities. + (Doesn't matter for complex-valued functions.) + distinguish_nan_and_inf : bool, optional + If True, treat numbers which contain nans or infs as + equal. Sets ignore_inf_sign to be True. + + """ + + def __init__(self, func, data, param_columns, result_columns=None, + result_func=None, rtol=None, atol=None, param_filter=None, + knownfailure=None, dataname=None, nan_ok=False, vectorized=True, + ignore_inf_sign=False, distinguish_nan_and_inf=True): + self.func = func + self.data = data + self.dataname = dataname + if not hasattr(param_columns, '__len__'): + param_columns = (param_columns,) + self.param_columns = tuple(param_columns) + if result_columns is not None: + if not hasattr(result_columns, '__len__'): + result_columns = (result_columns,) + self.result_columns = tuple(result_columns) + if result_func is not None: + message = "Only result_func or result_columns should be provided" + raise ValueError(message) + elif result_func is not None: + self.result_columns = None + else: + raise ValueError("Either result_func or result_columns should be provided") + self.result_func = result_func + self.rtol = rtol + self.atol = atol + if not hasattr(param_filter, '__len__'): + param_filter = (param_filter,) + self.param_filter = param_filter + self.knownfailure = knownfailure + self.nan_ok = nan_ok + self.vectorized = vectorized + self.ignore_inf_sign = ignore_inf_sign + self.distinguish_nan_and_inf = distinguish_nan_and_inf + if not self.distinguish_nan_and_inf: + self.ignore_inf_sign = True + + def get_tolerances(self, dtype): + if not np.issubdtype(dtype, np.inexact): + dtype = np.dtype(float) + info = np.finfo(dtype) + rtol, atol = self.rtol, self.atol + if rtol is None: + rtol = 5*info.eps + if atol is None: + atol = 5*info.tiny + return rtol, atol + + def check(self, data=None, dtype=None, dtypes=None): + """Check the special function against the data.""" + __tracebackhide__ = operator.methodcaller( + 'errisinstance', AssertionError + ) + + if self.knownfailure: + pytest.xfail(reason=self.knownfailure) + + if data is None: + data = self.data + + if dtype is None: + dtype = data.dtype + else: + data = data.astype(dtype) + + rtol, atol = self.get_tolerances(dtype) + + # Apply given filter functions + if self.param_filter: + param_mask = np.ones((data.shape[0],), np.bool_) + for j, filter in zip(self.param_columns, self.param_filter): + if filter: + param_mask &= list(filter(data[:,j])) + data = data[param_mask] + + # Pick parameters from the correct columns + params = [] + for idx, j in enumerate(self.param_columns): + if np.iscomplexobj(j): + j = int(j.imag) + params.append(data[:,j].astype(complex)) + elif dtypes and idx < len(dtypes): + params.append(data[:, j].astype(dtypes[idx])) + else: + params.append(data[:,j]) + + # Helper for evaluating results + def eval_func_at_params(func, skip_mask=None): + if self.vectorized: + got = func(*params) + else: + got = [] + for j in range(len(params[0])): + if skip_mask is not None and skip_mask[j]: + got.append(np.nan) + continue + got.append(func(*tuple([params[i][j] for i in range(len(params))]))) + got = np.asarray(got) + if not isinstance(got, tuple): + got = (got,) + return got + + # Evaluate function to be tested + got = eval_func_at_params(self.func) + + # Grab the correct results + if self.result_columns is not None: + # Correct results passed in with the data + wanted = tuple([data[:,icol] for icol in self.result_columns]) + else: + # Function producing correct results passed in + skip_mask = None + if self.nan_ok and len(got) == 1: + # Don't spend time evaluating what doesn't need to be evaluated + skip_mask = np.isnan(got[0]) + wanted = eval_func_at_params(self.result_func, skip_mask=skip_mask) + + # Check the validity of each output returned + assert_(len(got) == len(wanted)) + + for output_num, (x, y) in enumerate(zip(got, wanted)): + if np.issubdtype(x.dtype, np.complexfloating) or self.ignore_inf_sign: + pinf_x = np.isinf(x) + pinf_y = np.isinf(y) + minf_x = np.isinf(x) + minf_y = np.isinf(y) + else: + pinf_x = np.isposinf(x) + pinf_y = np.isposinf(y) + minf_x = np.isneginf(x) + minf_y = np.isneginf(y) + nan_x = np.isnan(x) + nan_y = np.isnan(y) + + with np.errstate(all='ignore'): + abs_y = np.absolute(y) + abs_y[~np.isfinite(abs_y)] = 0 + diff = np.absolute(x - y) + diff[~np.isfinite(diff)] = 0 + + rdiff = diff / np.absolute(y) + rdiff[~np.isfinite(rdiff)] = 0 + + tol_mask = (diff <= atol + rtol*abs_y) + pinf_mask = (pinf_x == pinf_y) + minf_mask = (minf_x == minf_y) + + nan_mask = (nan_x == nan_y) + + bad_j = ~(tol_mask & pinf_mask & minf_mask & nan_mask) + + point_count = bad_j.size + if self.nan_ok: + bad_j &= ~nan_x + bad_j &= ~nan_y + point_count -= (nan_x | nan_y).sum() + + if not self.distinguish_nan_and_inf and not self.nan_ok: + # If nan's are okay we've already covered all these cases + inf_x = np.isinf(x) + inf_y = np.isinf(y) + both_nonfinite = (inf_x & nan_y) | (nan_x & inf_y) + bad_j &= ~both_nonfinite + point_count -= both_nonfinite.sum() + + if np.any(bad_j): + # Some bad results: inform what, where, and how bad + msg = [""] + msg.append("Max |adiff|: %g" % diff[bad_j].max()) + msg.append("Max |rdiff|: %g" % rdiff[bad_j].max()) + msg.append("Bad results (%d out of %d) for the following points " + "(in output %d):" + % (np.sum(bad_j), point_count, output_num,)) + for j in np.nonzero(bad_j)[0]: + j = int(j) + def fmt(x): + return '%30s' % np.array2string(x[j], precision=18) + a = " ".join(map(fmt, params)) + b = " ".join(map(fmt, got)) + c = " ".join(map(fmt, wanted)) + d = fmt(rdiff) + msg.append(f"{a} => {b} != {c} (rdiff {d})") + assert_(False, "\n".join(msg)) + + def __repr__(self): + """Pretty-printing, esp. for Nose output""" + if np.any(list(map(np.iscomplexobj, self.param_columns))): + is_complex = " (complex)" + else: + is_complex = "" + if self.dataname: + return "".format(self.func.__name__, is_complex, + os.path.basename(self.dataname)) + else: + return f"" diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_ufuncs.pyi b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_ufuncs.pyi new file mode 100644 index 0000000000000000000000000000000000000000..3f0cf70149ae9e797c9e14ef8f51024ca5b9933f --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_ufuncs.pyi @@ -0,0 +1,525 @@ +from typing import Any, Dict + +import numpy as np + +__all__ = [ + 'geterr', + 'seterr', + 'errstate', + 'agm', + 'airy', + 'airye', + 'bdtr', + 'bdtrc', + 'bdtri', + 'bdtrik', + 'bdtrin', + 'bei', + 'beip', + 'ber', + 'berp', + 'besselpoly', + 'beta', + 'betainc', + 'betaincc', + 'betainccinv', + 'betaincinv', + 'betaln', + 'binom', + 'boxcox', + 'boxcox1p', + 'btdtr', + 'btdtri', + 'btdtria', + 'btdtrib', + 'cbrt', + 'chdtr', + 'chdtrc', + 'chdtri', + 'chdtriv', + 'chndtr', + 'chndtridf', + 'chndtrinc', + 'chndtrix', + 'cosdg', + 'cosm1', + 'cotdg', + 'dawsn', + 'ellipe', + 'ellipeinc', + 'ellipj', + 'ellipk', + 'ellipkinc', + 'ellipkm1', + 'elliprc', + 'elliprd', + 'elliprf', + 'elliprg', + 'elliprj', + 'entr', + 'erf', + 'erfc', + 'erfcinv', + 'erfcx', + 'erfi', + 'erfinv', + 'eval_chebyc', + 'eval_chebys', + 'eval_chebyt', + 'eval_chebyu', + 'eval_gegenbauer', + 'eval_genlaguerre', + 'eval_hermite', + 'eval_hermitenorm', + 'eval_jacobi', + 'eval_laguerre', + 'eval_legendre', + 'eval_sh_chebyt', + 'eval_sh_chebyu', + 'eval_sh_jacobi', + 'eval_sh_legendre', + 'exp1', + 'exp10', + 'exp2', + 'expi', + 'expit', + 'expm1', + 'expn', + 'exprel', + 'fdtr', + 'fdtrc', + 'fdtri', + 'fdtridfd', + 'fresnel', + 'gamma', + 'gammainc', + 'gammaincc', + 'gammainccinv', + 'gammaincinv', + 'gammaln', + 'gammasgn', + 'gdtr', + 'gdtrc', + 'gdtria', + 'gdtrib', + 'gdtrix', + 'hankel1', + 'hankel1e', + 'hankel2', + 'hankel2e', + 'huber', + 'hyp0f1', + 'hyp1f1', + 'hyp2f1', + 'hyperu', + 'i0', + 'i0e', + 'i1', + 'i1e', + 'inv_boxcox', + 'inv_boxcox1p', + 'it2i0k0', + 'it2j0y0', + 'it2struve0', + 'itairy', + 'iti0k0', + 'itj0y0', + 'itmodstruve0', + 'itstruve0', + 'iv', + 'ive', + 'j0', + 'j1', + 'jn', + 'jv', + 'jve', + 'k0', + 'k0e', + 'k1', + 'k1e', + 'kei', + 'keip', + 'kelvin', + 'ker', + 'kerp', + 'kl_div', + 'kn', + 'kolmogi', + 'kolmogorov', + 'kv', + 'kve', + 'log1p', + 'log_expit', + 'log_ndtr', + 'log_wright_bessel', + 'loggamma', + 'logit', + 'lpmv', + 'mathieu_a', + 'mathieu_b', + 'mathieu_cem', + 'mathieu_modcem1', + 'mathieu_modcem2', + 'mathieu_modsem1', + 'mathieu_modsem2', + 'mathieu_sem', + 'modfresnelm', + 'modfresnelp', + 'modstruve', + 'nbdtr', + 'nbdtrc', + 'nbdtri', + 'nbdtrik', + 'nbdtrin', + 'ncfdtr', + 'ncfdtri', + 'ncfdtridfd', + 'ncfdtridfn', + 'ncfdtrinc', + 'nctdtr', + 'nctdtridf', + 'nctdtrinc', + 'nctdtrit', + 'ndtr', + 'ndtri', + 'ndtri_exp', + 'nrdtrimn', + 'nrdtrisd', + 'obl_ang1', + 'obl_ang1_cv', + 'obl_cv', + 'obl_rad1', + 'obl_rad1_cv', + 'obl_rad2', + 'obl_rad2_cv', + 'owens_t', + 'pbdv', + 'pbvv', + 'pbwa', + 'pdtr', + 'pdtrc', + 'pdtri', + 'pdtrik', + 'poch', + 'powm1', + 'pro_ang1', + 'pro_ang1_cv', + 'pro_cv', + 'pro_rad1', + 'pro_rad1_cv', + 'pro_rad2', + 'pro_rad2_cv', + 'pseudo_huber', + 'psi', + 'radian', + 'rel_entr', + 'rgamma', + 'round', + 'shichi', + 'sici', + 'sindg', + 'smirnov', + 'smirnovi', + 'spence', + 'sph_harm', + 'stdtr', + 'stdtridf', + 'stdtrit', + 'struve', + 'tandg', + 'tklmbda', + 'voigt_profile', + 'wofz', + 'wright_bessel', + 'wrightomega', + 'xlog1py', + 'xlogy', + 'y0', + 'y1', + 'yn', + 'yv', + 'yve', + 'zetac' +] + +def geterr() -> Dict[str, str]: ... +def seterr(**kwargs: str) -> Dict[str, str]: ... + +class errstate: + def __init__(self, **kargs: str) -> None: ... + def __enter__(self) -> None: ... + def __exit__( + self, + exc_type: Any, # Unused + exc_value: Any, # Unused + traceback: Any, # Unused + ) -> None: ... + +_cosine_cdf: np.ufunc +_cosine_invcdf: np.ufunc +_cospi: np.ufunc +_ellip_harm: np.ufunc +_factorial: np.ufunc +_igam_fac: np.ufunc +_kolmogc: np.ufunc +_kolmogci: np.ufunc +_kolmogp: np.ufunc +_lambertw: np.ufunc +_lanczos_sum_expg_scaled: np.ufunc +_lgam1p: np.ufunc +_log1pmx: np.ufunc +_riemann_zeta: np.ufunc +_scaled_exp1: np.ufunc +_sf_error_test_function: np.ufunc +_sinpi: np.ufunc +_smirnovc: np.ufunc +_smirnovci: np.ufunc +_smirnovp: np.ufunc +_spherical_in: np.ufunc +_spherical_in_d: np.ufunc +_spherical_jn: np.ufunc +_spherical_jn_d: np.ufunc +_spherical_kn: np.ufunc +_spherical_kn_d: np.ufunc +_spherical_yn: np.ufunc +_spherical_yn_d: np.ufunc +_stirling2_inexact: np.ufunc +_struve_asymp_large_z: np.ufunc +_struve_bessel_series: np.ufunc +_struve_power_series: np.ufunc +_zeta: np.ufunc +agm: np.ufunc +airy: np.ufunc +airye: np.ufunc +bdtr: np.ufunc +bdtrc: np.ufunc +bdtri: np.ufunc +bdtrik: np.ufunc +bdtrin: np.ufunc +bei: np.ufunc +beip: np.ufunc +ber: np.ufunc +berp: np.ufunc +besselpoly: np.ufunc +beta: np.ufunc +betainc: np.ufunc +betaincc: np.ufunc +betainccinv: np.ufunc +betaincinv: np.ufunc +betaln: np.ufunc +binom: np.ufunc +boxcox1p: np.ufunc +boxcox: np.ufunc +btdtr: np.ufunc +btdtri: np.ufunc +btdtria: np.ufunc +btdtrib: np.ufunc +cbrt: np.ufunc +chdtr: np.ufunc +chdtrc: np.ufunc +chdtri: np.ufunc +chdtriv: np.ufunc +chndtr: np.ufunc +chndtridf: np.ufunc +chndtrinc: np.ufunc +chndtrix: np.ufunc +cosdg: np.ufunc +cosm1: np.ufunc +cotdg: np.ufunc +dawsn: np.ufunc +ellipe: np.ufunc +ellipeinc: np.ufunc +ellipj: np.ufunc +ellipk: np.ufunc +ellipkinc: np.ufunc +ellipkm1: np.ufunc +elliprc: np.ufunc +elliprd: np.ufunc +elliprf: np.ufunc +elliprg: np.ufunc +elliprj: np.ufunc +entr: np.ufunc +erf: np.ufunc +erfc: np.ufunc +erfcinv: np.ufunc +erfcx: np.ufunc +erfi: np.ufunc +erfinv: np.ufunc +eval_chebyc: np.ufunc +eval_chebys: np.ufunc +eval_chebyt: np.ufunc +eval_chebyu: np.ufunc +eval_gegenbauer: np.ufunc +eval_genlaguerre: np.ufunc +eval_hermite: np.ufunc +eval_hermitenorm: np.ufunc +eval_jacobi: np.ufunc +eval_laguerre: np.ufunc +eval_legendre: np.ufunc +eval_sh_chebyt: np.ufunc +eval_sh_chebyu: np.ufunc +eval_sh_jacobi: np.ufunc +eval_sh_legendre: np.ufunc +exp10: np.ufunc +exp1: np.ufunc +exp2: np.ufunc +expi: np.ufunc +expit: np.ufunc +expm1: np.ufunc +expn: np.ufunc +exprel: np.ufunc +fdtr: np.ufunc +fdtrc: np.ufunc +fdtri: np.ufunc +fdtridfd: np.ufunc +fresnel: np.ufunc +gamma: np.ufunc +gammainc: np.ufunc +gammaincc: np.ufunc +gammainccinv: np.ufunc +gammaincinv: np.ufunc +gammaln: np.ufunc +gammasgn: np.ufunc +gdtr: np.ufunc +gdtrc: np.ufunc +gdtria: np.ufunc +gdtrib: np.ufunc +gdtrix: np.ufunc +hankel1: np.ufunc +hankel1e: np.ufunc +hankel2: np.ufunc +hankel2e: np.ufunc +huber: np.ufunc +hyp0f1: np.ufunc +hyp1f1: np.ufunc +hyp2f1: np.ufunc +hyperu: np.ufunc +i0: np.ufunc +i0e: np.ufunc +i1: np.ufunc +i1e: np.ufunc +inv_boxcox1p: np.ufunc +inv_boxcox: np.ufunc +it2i0k0: np.ufunc +it2j0y0: np.ufunc +it2struve0: np.ufunc +itairy: np.ufunc +iti0k0: np.ufunc +itj0y0: np.ufunc +itmodstruve0: np.ufunc +itstruve0: np.ufunc +iv: np.ufunc +ive: np.ufunc +j0: np.ufunc +j1: np.ufunc +jn: np.ufunc +jv: np.ufunc +jve: np.ufunc +k0: np.ufunc +k0e: np.ufunc +k1: np.ufunc +k1e: np.ufunc +kei: np.ufunc +keip: np.ufunc +kelvin: np.ufunc +ker: np.ufunc +kerp: np.ufunc +kl_div: np.ufunc +kn: np.ufunc +kolmogi: np.ufunc +kolmogorov: np.ufunc +kv: np.ufunc +kve: np.ufunc +log1p: np.ufunc +log_expit: np.ufunc +log_ndtr: np.ufunc +log_wright_bessel: np.ufunc +loggamma: np.ufunc +logit: np.ufunc +lpmv: np.ufunc +mathieu_a: np.ufunc +mathieu_b: np.ufunc +mathieu_cem: np.ufunc +mathieu_modcem1: np.ufunc +mathieu_modcem2: np.ufunc +mathieu_modsem1: np.ufunc +mathieu_modsem2: np.ufunc +mathieu_sem: np.ufunc +modfresnelm: np.ufunc +modfresnelp: np.ufunc +modstruve: np.ufunc +nbdtr: np.ufunc +nbdtrc: np.ufunc +nbdtri: np.ufunc +nbdtrik: np.ufunc +nbdtrin: np.ufunc +ncfdtr: np.ufunc +ncfdtri: np.ufunc +ncfdtridfd: np.ufunc +ncfdtridfn: np.ufunc +ncfdtrinc: np.ufunc +nctdtr: np.ufunc +nctdtridf: np.ufunc +nctdtrinc: np.ufunc +nctdtrit: np.ufunc +ndtr: np.ufunc +ndtri: np.ufunc +ndtri_exp: np.ufunc +nrdtrimn: np.ufunc +nrdtrisd: np.ufunc +obl_ang1: np.ufunc +obl_ang1_cv: np.ufunc +obl_cv: np.ufunc +obl_rad1: np.ufunc +obl_rad1_cv: np.ufunc +obl_rad2: np.ufunc +obl_rad2_cv: np.ufunc +owens_t: np.ufunc +pbdv: np.ufunc +pbvv: np.ufunc +pbwa: np.ufunc +pdtr: np.ufunc +pdtrc: np.ufunc +pdtri: np.ufunc +pdtrik: np.ufunc +poch: np.ufunc +powm1: np.ufunc +pro_ang1: np.ufunc +pro_ang1_cv: np.ufunc +pro_cv: np.ufunc +pro_rad1: np.ufunc +pro_rad1_cv: np.ufunc +pro_rad2: np.ufunc +pro_rad2_cv: np.ufunc +pseudo_huber: np.ufunc +psi: np.ufunc +radian: np.ufunc +rel_entr: np.ufunc +rgamma: np.ufunc +round: np.ufunc +shichi: np.ufunc +sici: np.ufunc +sindg: np.ufunc +smirnov: np.ufunc +smirnovi: np.ufunc +spence: np.ufunc +sph_harm: np.ufunc +stdtr: np.ufunc +stdtridf: np.ufunc +stdtrit: np.ufunc +struve: np.ufunc +tandg: np.ufunc +tklmbda: np.ufunc +voigt_profile: np.ufunc +wofz: np.ufunc +wright_bessel: np.ufunc +wrightomega: np.ufunc +xlog1py: np.ufunc +xlogy: np.ufunc +y0: np.ufunc +y1: np.ufunc +yn: np.ufunc +yv: np.ufunc +yve: np.ufunc +zetac: np.ufunc + diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_ufuncs.pyx b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_ufuncs.pyx new file mode 100644 index 0000000000000000000000000000000000000000..bdf10e7500b9b1757a0e160def9ba4de446b5471 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_ufuncs.pyx @@ -0,0 +1,17266 @@ +# This file is automatically generated by _generate_pyx.py. +# Do not edit manually! + +from libc.math cimport NAN + +include "_ufuncs_extra_code_common.pxi" +include "_ufuncs_extra_code.pxi" +__all__ = ['agm', 'bdtr', 'bdtrc', 'bdtri', 'bdtrik', 'bdtrin', 'besselpoly', 'beta', 'betainc', 'betaincc', 'betainccinv', 'betaincinv', 'betaln', 'boxcox', 'boxcox1p', 'btdtr', 'btdtri', 'btdtria', 'btdtrib', 'cbrt', 'chdtr', 'chdtrc', 'chdtri', 'chdtriv', 'chndtr', 'chndtridf', 'chndtrinc', 'chndtrix', 'cosdg', 'cosm1', 'cotdg', 'dawsn', 'ellipe', 'ellipeinc', 'ellipj', 'ellipk', 'ellipkinc', 'ellipkm1', 'elliprc', 'elliprd', 'elliprf', 'elliprg', 'elliprj', 'entr', 'erf', 'erfc', 'erfcinv', 'erfcx', 'erfi', 'erfinv', 'eval_chebyc', 'eval_chebys', 'eval_chebyt', 'eval_chebyu', 'eval_gegenbauer', 'eval_genlaguerre', 'eval_hermite', 'eval_hermitenorm', 'eval_jacobi', 'eval_laguerre', 'eval_legendre', 'eval_sh_chebyt', 'eval_sh_chebyu', 'eval_sh_jacobi', 'eval_sh_legendre', 'exp10', 'exp2', 'expm1', 'expn', 'fdtr', 'fdtrc', 'fdtri', 'fdtridfd', 'fresnel', 'gammainc', 'gammaincc', 'gammainccinv', 'gammaincinv', 'gammasgn', 'gdtr', 'gdtrc', 'gdtria', 'gdtrib', 'gdtrix', 'huber', 'hyp0f1', 'hyp1f1', 'hyperu', 'i0', 'i0e', 'i1', 'i1e', 'inv_boxcox', 'inv_boxcox1p', 'j0', 'j1', 'k0', 'k0e', 'k1', 'k1e', 'kl_div', 'kn', 'kolmogi', 'kolmogorov', 'log1p', 'log_ndtr', 'lpmv', 'modstruve', 'nbdtr', 'nbdtrc', 'nbdtri', 'nbdtrik', 'nbdtrin', 'ncfdtr', 'ncfdtri', 'ncfdtridfd', 'ncfdtridfn', 'ncfdtrinc', 'nctdtr', 'nctdtridf', 'nctdtrinc', 'nctdtrit', 'ndtr', 'ndtri', 'ndtri_exp', 'nrdtrimn', 'nrdtrisd', 'owens_t', 'pdtr', 'pdtrc', 'pdtri', 'pdtrik', 'poch', 'powm1', 'pseudo_huber', 'radian', 'rel_entr', 'round', 'shichi', 'sici', 'sindg', 'smirnov', 'smirnovi', 'spence', 'stdtr', 'stdtridf', 'stdtrit', 'struve', 'tandg', 'tklmbda', 'voigt_profile', 'wofz', 'wrightomega', 'xlog1py', 'xlogy', 'y0', 'y1', 'yn', 'zetac', 'geterr', 'seterr', 'errstate', 'jn', 'airy', 'airye', 'bei', 'beip', 'ber', 'berp', 'binom', 'exp1', 'expi', 'expit', 'exprel', 'gamma', 'gammaln', 'hankel1', 'hankel1e', 'hankel2', 'hankel2e', 'hyp2f1', 'it2i0k0', 'it2j0y0', 'it2struve0', 'itairy', 'iti0k0', 'itj0y0', 'itmodstruve0', 'itstruve0', 'iv', 'ive', 'jv', 'jve', 'kei', 'keip', 'kelvin', 'ker', 'kerp', 'kv', 'kve', 'log_expit', 'log_wright_bessel', 'loggamma', 'logit', 'mathieu_a', 'mathieu_b', 'mathieu_cem', 'mathieu_modcem1', 'mathieu_modcem2', 'mathieu_modsem1', 'mathieu_modsem2', 'mathieu_sem', 'modfresnelm', 'modfresnelp', 'obl_ang1', 'obl_ang1_cv', 'obl_cv', 'obl_rad1', 'obl_rad1_cv', 'obl_rad2', 'obl_rad2_cv', 'pbdv', 'pbvv', 'pbwa', 'pro_ang1', 'pro_ang1_cv', 'pro_cv', 'pro_rad1', 'pro_rad1_cv', 'pro_rad2', 'pro_rad2_cv', 'psi', 'rgamma', 'sph_harm', 'wright_bessel', 'yv', 'yve'] +cdef void loop_D_DDDD__As_DDDD_D(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *ip3 = args[3] + cdef char *op0 = args[4] + cdef double complex ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0], (ip3)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + ip3 += steps[3] + op0 += steps[4] + sf_error.check_fpe(func_name) + +cdef void loop_D_DDDD__As_FFFF_F(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *ip3 = args[3] + cdef char *op0 = args[4] + cdef double complex ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0], (ip3)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + ip3 += steps[3] + op0 += steps[4] + sf_error.check_fpe(func_name) + +cdef void loop_D_DDD__As_DDD_D(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *op0 = args[3] + cdef double complex ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + op0 += steps[3] + sf_error.check_fpe(func_name) + +cdef void loop_D_DDD__As_FFF_F(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *op0 = args[3] + cdef double complex ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + op0 += steps[3] + sf_error.check_fpe(func_name) + +cdef void loop_D_DD__As_DD_D(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *op0 = args[2] + cdef double complex ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + op0 += steps[2] + sf_error.check_fpe(func_name) + +cdef void loop_D_DD__As_FF_F(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *op0 = args[2] + cdef double complex ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + op0 += steps[2] + sf_error.check_fpe(func_name) + +cdef void loop_D_D__As_D_D(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *op0 = args[1] + cdef double complex ov0 + for i in range(n): + ov0 = (func)((ip0)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + op0 += steps[1] + sf_error.check_fpe(func_name) + +cdef void loop_D_D__As_F_F(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *op0 = args[1] + cdef double complex ov0 + for i in range(n): + ov0 = (func)((ip0)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + op0 += steps[1] + sf_error.check_fpe(func_name) + +cdef void loop_D_dD__As_dD_D(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *op0 = args[2] + cdef double complex ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + op0 += steps[2] + sf_error.check_fpe(func_name) + +cdef void loop_D_dD__As_fF_F(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *op0 = args[2] + cdef double complex ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + op0 += steps[2] + sf_error.check_fpe(func_name) + +cdef void loop_D_ddD__As_ddD_D(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *op0 = args[3] + cdef double complex ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + op0 += steps[3] + sf_error.check_fpe(func_name) + +cdef void loop_D_ddD__As_ffF_F(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *op0 = args[3] + cdef double complex ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + op0 += steps[3] + sf_error.check_fpe(func_name) + +cdef void loop_D_dddD__As_dddD_D(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *ip3 = args[3] + cdef char *op0 = args[4] + cdef double complex ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0], (ip3)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + ip3 += steps[3] + op0 += steps[4] + sf_error.check_fpe(func_name) + +cdef void loop_D_dddD__As_fffF_F(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *ip3 = args[3] + cdef char *op0 = args[4] + cdef double complex ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0], (ip3)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + ip3 += steps[3] + op0 += steps[4] + sf_error.check_fpe(func_name) + +cdef void loop_d_d__As_d_d(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *op0 = args[1] + cdef double ov0 + for i in range(n): + ov0 = (func)((ip0)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + op0 += steps[1] + sf_error.check_fpe(func_name) + +cdef void loop_d_d__As_f_f(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *op0 = args[1] + cdef double ov0 + for i in range(n): + ov0 = (func)((ip0)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + op0 += steps[1] + sf_error.check_fpe(func_name) + +cdef void loop_d_dd__As_dd_d(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *op0 = args[2] + cdef double ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + op0 += steps[2] + sf_error.check_fpe(func_name) + +cdef void loop_d_dd__As_ff_f(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *op0 = args[2] + cdef double ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + op0 += steps[2] + sf_error.check_fpe(func_name) + +cdef void loop_d_ddd__As_ddd_d(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *op0 = args[3] + cdef double ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + op0 += steps[3] + sf_error.check_fpe(func_name) + +cdef void loop_d_ddd__As_fff_f(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *op0 = args[3] + cdef double ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + op0 += steps[3] + sf_error.check_fpe(func_name) + +cdef void loop_d_dddd__As_dddd_d(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *ip3 = args[3] + cdef char *op0 = args[4] + cdef double ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0], (ip3)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + ip3 += steps[3] + op0 += steps[4] + sf_error.check_fpe(func_name) + +cdef void loop_d_dddd__As_ffff_f(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *ip3 = args[3] + cdef char *op0 = args[4] + cdef double ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0], (ip3)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + ip3 += steps[3] + op0 += steps[4] + sf_error.check_fpe(func_name) + +cdef void loop_d_ddddddd__As_ddddddd_d(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *ip3 = args[3] + cdef char *ip4 = args[4] + cdef char *ip5 = args[5] + cdef char *ip6 = args[6] + cdef char *op0 = args[7] + cdef double ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0], (ip3)[0], (ip4)[0], (ip5)[0], (ip6)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + ip3 += steps[3] + ip4 += steps[4] + ip5 += steps[5] + ip6 += steps[6] + op0 += steps[7] + sf_error.check_fpe(func_name) + +cdef void loop_d_ddddddd__As_fffffff_f(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *ip3 = args[3] + cdef char *ip4 = args[4] + cdef char *ip5 = args[5] + cdef char *ip6 = args[6] + cdef char *op0 = args[7] + cdef double ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0], (ip3)[0], (ip4)[0], (ip5)[0], (ip6)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + ip3 += steps[3] + ip4 += steps[4] + ip5 += steps[5] + ip6 += steps[6] + op0 += steps[7] + sf_error.check_fpe(func_name) + +cdef void loop_d_ddiiddd__As_ddllddd_d(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *ip3 = args[3] + cdef char *ip4 = args[4] + cdef char *ip5 = args[5] + cdef char *ip6 = args[6] + cdef char *op0 = args[7] + cdef double ov0 + for i in range(n): + if (ip2)[0] == (ip2)[0] and (ip3)[0] == (ip3)[0]: + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0], (ip3)[0], (ip4)[0], (ip5)[0], (ip6)[0]) + else: + sf_error.error(func_name, sf_error.DOMAIN, "invalid input argument") + ov0 = NAN + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + ip3 += steps[3] + ip4 += steps[4] + ip5 += steps[5] + ip6 += steps[6] + op0 += steps[7] + sf_error.check_fpe(func_name) + +cdef void loop_d_ddp_d_As_ddp_dd(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *op0 = args[3] + cdef char *op1 = args[4] + cdef double ov0 + cdef double ov1 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0], &ov1) + (op0)[0] = ov0 + (op1)[0] = ov1 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + op0 += steps[3] + op1 += steps[4] + sf_error.check_fpe(func_name) + +cdef void loop_d_dpd__As_dpd_d(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *op0 = args[3] + cdef double ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + op0 += steps[3] + sf_error.check_fpe(func_name) + +cdef void loop_d_pd__As_pd_d(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *op0 = args[2] + cdef double ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + op0 += steps[2] + sf_error.check_fpe(func_name) + +cdef void loop_d_pdd__As_pdd_d(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *op0 = args[3] + cdef double ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + op0 += steps[3] + sf_error.check_fpe(func_name) + +cdef void loop_d_pddd__As_pddd_d(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *ip3 = args[3] + cdef char *op0 = args[4] + cdef double ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0], (ip3)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + ip3 += steps[3] + op0 += steps[4] + sf_error.check_fpe(func_name) + +cdef void loop_d_ppd__As_ppd_d(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *op0 = args[3] + cdef double ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + op0 += steps[3] + sf_error.check_fpe(func_name) + +cdef void loop_f_f__As_f_f(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *op0 = args[1] + cdef float ov0 + for i in range(n): + ov0 = (func)((ip0)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + op0 += steps[1] + sf_error.check_fpe(func_name) + +cdef void loop_f_ff__As_ff_f(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *op0 = args[2] + cdef float ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + op0 += steps[2] + sf_error.check_fpe(func_name) + +cdef void loop_f_fff__As_fff_f(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *op0 = args[3] + cdef float ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + op0 += steps[3] + sf_error.check_fpe(func_name) + +cdef void loop_f_ffff__As_ffff_f(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *ip3 = args[3] + cdef char *op0 = args[4] + cdef float ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0], (ip3)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + ip3 += steps[3] + op0 += steps[4] + sf_error.check_fpe(func_name) + +cdef void loop_i_D_DD_As_D_DD(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *op0 = args[1] + cdef char *op1 = args[2] + cdef double complex ov0 + cdef double complex ov1 + for i in range(n): + (func)((ip0)[0], &ov0, &ov1) + (op0)[0] = ov0 + (op1)[0] = ov1 + ip0 += steps[0] + op0 += steps[1] + op1 += steps[2] + sf_error.check_fpe(func_name) + +cdef void loop_i_D_DD_As_F_FF(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *op0 = args[1] + cdef char *op1 = args[2] + cdef double complex ov0 + cdef double complex ov1 + for i in range(n): + (func)((ip0)[0], &ov0, &ov1) + (op0)[0] = ov0 + (op1)[0] = ov1 + ip0 += steps[0] + op0 += steps[1] + op1 += steps[2] + sf_error.check_fpe(func_name) + +cdef void loop_i_d_dd_As_d_dd(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *op0 = args[1] + cdef char *op1 = args[2] + cdef double ov0 + cdef double ov1 + for i in range(n): + (func)((ip0)[0], &ov0, &ov1) + (op0)[0] = ov0 + (op1)[0] = ov1 + ip0 += steps[0] + op0 += steps[1] + op1 += steps[2] + sf_error.check_fpe(func_name) + +cdef void loop_i_d_dd_As_f_ff(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *op0 = args[1] + cdef char *op1 = args[2] + cdef double ov0 + cdef double ov1 + for i in range(n): + (func)((ip0)[0], &ov0, &ov1) + (op0)[0] = ov0 + (op1)[0] = ov1 + ip0 += steps[0] + op0 += steps[1] + op1 += steps[2] + sf_error.check_fpe(func_name) + +cdef void loop_i_dd_dddd_As_dd_dddd(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *op0 = args[2] + cdef char *op1 = args[3] + cdef char *op2 = args[4] + cdef char *op3 = args[5] + cdef double ov0 + cdef double ov1 + cdef double ov2 + cdef double ov3 + for i in range(n): + (func)((ip0)[0], (ip1)[0], &ov0, &ov1, &ov2, &ov3) + (op0)[0] = ov0 + (op1)[0] = ov1 + (op2)[0] = ov2 + (op3)[0] = ov3 + ip0 += steps[0] + ip1 += steps[1] + op0 += steps[2] + op1 += steps[3] + op2 += steps[4] + op3 += steps[5] + sf_error.check_fpe(func_name) + +cdef void loop_i_dd_dddd_As_ff_ffff(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *op0 = args[2] + cdef char *op1 = args[3] + cdef char *op2 = args[4] + cdef char *op3 = args[5] + cdef double ov0 + cdef double ov1 + cdef double ov2 + cdef double ov3 + for i in range(n): + (func)((ip0)[0], (ip1)[0], &ov0, &ov1, &ov2, &ov3) + (op0)[0] = ov0 + (op1)[0] = ov1 + (op2)[0] = ov2 + (op3)[0] = ov3 + ip0 += steps[0] + ip1 += steps[1] + op0 += steps[2] + op1 += steps[3] + op2 += steps[4] + op3 += steps[5] + sf_error.check_fpe(func_name) + +cdef void loop_i_i__As_l_l(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *op0 = args[1] + cdef int ov0 + for i in range(n): + if (ip0)[0] == (ip0)[0]: + ov0 = (func)((ip0)[0]) + else: + sf_error.error(func_name, sf_error.DOMAIN, "invalid input argument") + ov0 = 0xbad0bad0 + (op0)[0] = ov0 + ip0 += steps[0] + op0 += steps[1] + sf_error.check_fpe(func_name) + +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cosine_cdf "cosine_cdf"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cosine_invcdf "cosine_invcdf"(double) noexcept nogil +from ._ellip_harm cimport ellip_harmonic as _func_ellip_harmonic +ctypedef double _proto_ellip_harmonic_t(double, double, int, int, double, double, double) noexcept nogil +cdef _proto_ellip_harmonic_t *_proto_ellip_harmonic_t_var = &_func_ellip_harmonic +from ._legacy cimport ellip_harmonic_unsafe as _func_ellip_harmonic_unsafe +ctypedef double _proto_ellip_harmonic_unsafe_t(double, double, double, double, double, double, double) noexcept nogil +cdef _proto_ellip_harmonic_unsafe_t *_proto_ellip_harmonic_unsafe_t_var = &_func_ellip_harmonic_unsafe +from ._factorial cimport _factorial as _func__factorial +ctypedef double _proto__factorial_t(double) noexcept nogil +cdef _proto__factorial_t *_proto__factorial_t_var = &_func__factorial +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_igam_fac "cephes_igam_fac"(double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_kolmogc "cephes_kolmogc"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_kolmogci "cephes_kolmogci"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_kolmogp "cephes_kolmogp"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_lanczos_sum_expg_scaled "cephes_lanczos_sum_expg_scaled"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_lgam1p "cephes_lgam1p"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_log1pmx "cephes_log1pmx"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_riemann_zeta "cephes_riemann_zeta"(double) noexcept nogil +from .sf_error cimport _sf_error_test_function as _func__sf_error_test_function +ctypedef int _proto__sf_error_test_function_t(int) noexcept nogil +cdef _proto__sf_error_test_function_t *_proto__sf_error_test_function_t_var = &_func__sf_error_test_function +from ._legacy cimport smirnovc_unsafe as _func_smirnovc_unsafe +ctypedef double _proto_smirnovc_unsafe_t(double, double) noexcept nogil +cdef _proto_smirnovc_unsafe_t *_proto_smirnovc_unsafe_t_var = &_func_smirnovc_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_smirnovc_wrap "cephes_smirnovc_wrap"(Py_ssize_t, double) noexcept nogil +from ._legacy cimport smirnovci_unsafe as _func_smirnovci_unsafe +ctypedef double _proto_smirnovci_unsafe_t(double, double) noexcept nogil +cdef _proto_smirnovci_unsafe_t *_proto_smirnovci_unsafe_t_var = &_func_smirnovci_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_smirnovci_wrap "cephes_smirnovci_wrap"(Py_ssize_t, double) noexcept nogil +from ._legacy cimport smirnovp_unsafe as _func_smirnovp_unsafe +ctypedef double _proto_smirnovp_unsafe_t(double, double) noexcept nogil +cdef _proto_smirnovp_unsafe_t *_proto_smirnovp_unsafe_t_var = &_func_smirnovp_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_smirnovp_wrap "cephes_smirnovp_wrap"(Py_ssize_t, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes__struve_asymp_large_z "cephes__struve_asymp_large_z"(double, double, Py_ssize_t, double *) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes__struve_bessel_series "cephes__struve_bessel_series"(double, double, Py_ssize_t, double *) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes__struve_power_series "cephes__struve_power_series"(double, double, Py_ssize_t, double *) noexcept nogil +from ._agm cimport agm as _func_agm +ctypedef double _proto_agm_t(double, double) noexcept nogil +cdef _proto_agm_t *_proto_agm_t_var = &_func_agm +from ._legacy cimport bdtr_unsafe as _func_bdtr_unsafe +ctypedef double _proto_bdtr_unsafe_t(double, double, double) noexcept nogil +cdef _proto_bdtr_unsafe_t *_proto_bdtr_unsafe_t_var = &_func_bdtr_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_bdtr_wrap "cephes_bdtr_wrap"(double, Py_ssize_t, double) noexcept nogil +from ._legacy cimport bdtrc_unsafe as _func_bdtrc_unsafe +ctypedef double _proto_bdtrc_unsafe_t(double, double, double) noexcept nogil +cdef _proto_bdtrc_unsafe_t *_proto_bdtrc_unsafe_t_var = &_func_bdtrc_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_bdtrc_wrap "cephes_bdtrc_wrap"(double, Py_ssize_t, double) noexcept nogil +from ._legacy cimport bdtri_unsafe as _func_bdtri_unsafe +ctypedef double _proto_bdtri_unsafe_t(double, double, double) noexcept nogil +cdef _proto_bdtri_unsafe_t *_proto_bdtri_unsafe_t_var = &_func_bdtri_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_bdtri_wrap "cephes_bdtri_wrap"(double, Py_ssize_t, double) noexcept nogil +from ._cdflib_wrappers cimport bdtrik as _func_bdtrik +ctypedef double _proto_bdtrik_t(double, double, double) noexcept nogil +cdef _proto_bdtrik_t *_proto_bdtrik_t_var = &_func_bdtrik +from ._cdflib_wrappers cimport bdtrin as _func_bdtrin +ctypedef double _proto_bdtrin_t(double, double, double) noexcept nogil +cdef _proto_bdtrin_t *_proto_bdtrin_t_var = &_func_bdtrin +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_besselpoly "cephes_besselpoly"(double, double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_beta "cephes_beta"(double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_lbeta "cephes_lbeta"(double, double) noexcept nogil +from ._boxcox cimport boxcox as _func_boxcox +ctypedef double _proto_boxcox_t(double, double) noexcept nogil +cdef _proto_boxcox_t *_proto_boxcox_t_var = &_func_boxcox +from ._boxcox cimport boxcox1p as _func_boxcox1p +ctypedef double _proto_boxcox1p_t(double, double) noexcept nogil +cdef _proto_boxcox1p_t *_proto_boxcox1p_t_var = &_func_boxcox1p +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_btdtr "cephes_btdtr"(double, double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_btdtri "cephes_btdtri"(double, double, double) noexcept nogil +from ._cdflib_wrappers cimport btdtria as _func_btdtria +ctypedef double _proto_btdtria_t(double, double, double) noexcept nogil +cdef _proto_btdtria_t *_proto_btdtria_t_var = &_func_btdtria +from ._cdflib_wrappers cimport btdtrib as _func_btdtrib +ctypedef double _proto_btdtrib_t(double, double, double) noexcept nogil +cdef _proto_btdtrib_t *_proto_btdtrib_t_var = &_func_btdtrib +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_cbrt "cephes_cbrt"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_chdtr "cephes_chdtr"(double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_chdtrc "cephes_chdtrc"(double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_chdtri "cephes_chdtri"(double, double) noexcept nogil +from ._cdflib_wrappers cimport chdtriv as _func_chdtriv +ctypedef double _proto_chdtriv_t(double, double) noexcept nogil +cdef _proto_chdtriv_t *_proto_chdtriv_t_var = &_func_chdtriv +from ._cdflib_wrappers cimport chndtr as _func_chndtr +ctypedef double _proto_chndtr_t(double, double, double) noexcept nogil +cdef _proto_chndtr_t *_proto_chndtr_t_var = &_func_chndtr +from ._cdflib_wrappers cimport chndtridf as _func_chndtridf +ctypedef double _proto_chndtridf_t(double, double, double) noexcept nogil +cdef _proto_chndtridf_t *_proto_chndtridf_t_var = &_func_chndtridf +from ._cdflib_wrappers cimport chndtrinc as _func_chndtrinc +ctypedef double _proto_chndtrinc_t(double, double, double) noexcept nogil +cdef _proto_chndtrinc_t *_proto_chndtrinc_t_var = &_func_chndtrinc +from ._cdflib_wrappers cimport chndtrix as _func_chndtrix +ctypedef double _proto_chndtrix_t(double, double, double) noexcept nogil +cdef _proto_chndtrix_t *_proto_chndtrix_t_var = &_func_chndtrix +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_cosdg "cephes_cosdg"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_cosm1 "cephes_cosm1"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_cotdg "cephes_cotdg"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_ellpe "cephes_ellpe"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_ellie "cephes_ellie"(double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef int _func_cephes_ellpj_wrap "cephes_ellpj_wrap"(double, double, double *, double *, double *, double *) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_special_ellipk "special_ellipk"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_ellik "cephes_ellik"(double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_ellpk "cephes_ellpk"(double) noexcept nogil +from ._convex_analysis cimport entr as _func_entr +ctypedef double _proto_entr_t(double) noexcept nogil +cdef _proto_entr_t *_proto_entr_t_var = &_func_entr +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_erf "cephes_erf"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_erfc "cephes_erfc"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_erfcinv "cephes_erfcinv"(double) noexcept nogil +from .orthogonal_eval cimport eval_chebyc as _func_eval_chebyc +ctypedef double complex _proto_eval_chebyc_double_complex__t(double, double complex) noexcept nogil +cdef _proto_eval_chebyc_double_complex__t *_proto_eval_chebyc_double_complex__t_var = &_func_eval_chebyc[double_complex] +from .orthogonal_eval cimport eval_chebyc as _func_eval_chebyc +ctypedef double _proto_eval_chebyc_double__t(double, double) noexcept nogil +cdef _proto_eval_chebyc_double__t *_proto_eval_chebyc_double__t_var = &_func_eval_chebyc[double] +from .orthogonal_eval cimport eval_chebyc_l as _func_eval_chebyc_l +ctypedef double _proto_eval_chebyc_l_t(Py_ssize_t, double) noexcept nogil +cdef _proto_eval_chebyc_l_t *_proto_eval_chebyc_l_t_var = &_func_eval_chebyc_l +from .orthogonal_eval cimport eval_chebys as _func_eval_chebys +ctypedef double complex _proto_eval_chebys_double_complex__t(double, double complex) noexcept nogil +cdef _proto_eval_chebys_double_complex__t *_proto_eval_chebys_double_complex__t_var = &_func_eval_chebys[double_complex] +from .orthogonal_eval cimport eval_chebys as _func_eval_chebys +ctypedef double _proto_eval_chebys_double__t(double, double) noexcept nogil +cdef _proto_eval_chebys_double__t *_proto_eval_chebys_double__t_var = &_func_eval_chebys[double] +from .orthogonal_eval cimport eval_chebys_l as _func_eval_chebys_l +ctypedef double _proto_eval_chebys_l_t(Py_ssize_t, double) noexcept nogil +cdef _proto_eval_chebys_l_t *_proto_eval_chebys_l_t_var = &_func_eval_chebys_l +from .orthogonal_eval cimport eval_chebyt as _func_eval_chebyt +ctypedef double complex _proto_eval_chebyt_double_complex__t(double, double complex) noexcept nogil +cdef _proto_eval_chebyt_double_complex__t *_proto_eval_chebyt_double_complex__t_var = &_func_eval_chebyt[double_complex] +from .orthogonal_eval cimport eval_chebyt as _func_eval_chebyt +ctypedef double _proto_eval_chebyt_double__t(double, double) noexcept nogil +cdef _proto_eval_chebyt_double__t *_proto_eval_chebyt_double__t_var = &_func_eval_chebyt[double] +from .orthogonal_eval cimport eval_chebyt_l as _func_eval_chebyt_l +ctypedef double _proto_eval_chebyt_l_t(Py_ssize_t, double) noexcept nogil +cdef _proto_eval_chebyt_l_t *_proto_eval_chebyt_l_t_var = &_func_eval_chebyt_l +from .orthogonal_eval cimport eval_chebyu as _func_eval_chebyu +ctypedef double complex _proto_eval_chebyu_double_complex__t(double, double complex) noexcept nogil +cdef _proto_eval_chebyu_double_complex__t *_proto_eval_chebyu_double_complex__t_var = &_func_eval_chebyu[double_complex] +from .orthogonal_eval cimport eval_chebyu as _func_eval_chebyu +ctypedef double _proto_eval_chebyu_double__t(double, double) noexcept nogil +cdef _proto_eval_chebyu_double__t *_proto_eval_chebyu_double__t_var = &_func_eval_chebyu[double] +from .orthogonal_eval cimport eval_chebyu_l as _func_eval_chebyu_l +ctypedef double _proto_eval_chebyu_l_t(Py_ssize_t, double) noexcept nogil +cdef _proto_eval_chebyu_l_t *_proto_eval_chebyu_l_t_var = &_func_eval_chebyu_l +from .orthogonal_eval cimport eval_gegenbauer as _func_eval_gegenbauer +ctypedef double complex _proto_eval_gegenbauer_double_complex__t(double, double, double complex) noexcept nogil +cdef _proto_eval_gegenbauer_double_complex__t *_proto_eval_gegenbauer_double_complex__t_var = &_func_eval_gegenbauer[double_complex] +from .orthogonal_eval cimport eval_gegenbauer as _func_eval_gegenbauer +ctypedef double _proto_eval_gegenbauer_double__t(double, double, double) noexcept nogil +cdef _proto_eval_gegenbauer_double__t *_proto_eval_gegenbauer_double__t_var = &_func_eval_gegenbauer[double] +from .orthogonal_eval cimport eval_gegenbauer_l as _func_eval_gegenbauer_l +ctypedef double _proto_eval_gegenbauer_l_t(Py_ssize_t, double, double) noexcept nogil +cdef _proto_eval_gegenbauer_l_t *_proto_eval_gegenbauer_l_t_var = &_func_eval_gegenbauer_l +from .orthogonal_eval cimport eval_genlaguerre as _func_eval_genlaguerre +ctypedef double complex _proto_eval_genlaguerre_double_complex__t(double, double, double complex) noexcept nogil +cdef _proto_eval_genlaguerre_double_complex__t *_proto_eval_genlaguerre_double_complex__t_var = &_func_eval_genlaguerre[double_complex] +from .orthogonal_eval cimport eval_genlaguerre as _func_eval_genlaguerre +ctypedef double _proto_eval_genlaguerre_double__t(double, double, double) noexcept nogil +cdef _proto_eval_genlaguerre_double__t *_proto_eval_genlaguerre_double__t_var = &_func_eval_genlaguerre[double] +from .orthogonal_eval cimport eval_genlaguerre_l as _func_eval_genlaguerre_l +ctypedef double _proto_eval_genlaguerre_l_t(Py_ssize_t, double, double) noexcept nogil +cdef _proto_eval_genlaguerre_l_t *_proto_eval_genlaguerre_l_t_var = &_func_eval_genlaguerre_l +from .orthogonal_eval cimport eval_hermite as _func_eval_hermite +ctypedef double _proto_eval_hermite_t(Py_ssize_t, double) noexcept nogil +cdef _proto_eval_hermite_t *_proto_eval_hermite_t_var = &_func_eval_hermite +from .orthogonal_eval cimport eval_hermitenorm as _func_eval_hermitenorm +ctypedef double _proto_eval_hermitenorm_t(Py_ssize_t, double) noexcept nogil +cdef _proto_eval_hermitenorm_t *_proto_eval_hermitenorm_t_var = &_func_eval_hermitenorm +from .orthogonal_eval cimport eval_jacobi as _func_eval_jacobi +ctypedef double complex _proto_eval_jacobi_double_complex__t(double, double, double, double complex) noexcept nogil +cdef _proto_eval_jacobi_double_complex__t *_proto_eval_jacobi_double_complex__t_var = &_func_eval_jacobi[double_complex] +from .orthogonal_eval cimport eval_jacobi as _func_eval_jacobi +ctypedef double _proto_eval_jacobi_double__t(double, double, double, double) noexcept nogil +cdef _proto_eval_jacobi_double__t *_proto_eval_jacobi_double__t_var = &_func_eval_jacobi[double] +from .orthogonal_eval cimport eval_jacobi_l as _func_eval_jacobi_l +ctypedef double _proto_eval_jacobi_l_t(Py_ssize_t, double, double, double) noexcept nogil +cdef _proto_eval_jacobi_l_t *_proto_eval_jacobi_l_t_var = &_func_eval_jacobi_l +from .orthogonal_eval cimport eval_laguerre as _func_eval_laguerre +ctypedef double complex _proto_eval_laguerre_double_complex__t(double, double complex) noexcept nogil +cdef _proto_eval_laguerre_double_complex__t *_proto_eval_laguerre_double_complex__t_var = &_func_eval_laguerre[double_complex] +from .orthogonal_eval cimport eval_laguerre as _func_eval_laguerre +ctypedef double _proto_eval_laguerre_double__t(double, double) noexcept nogil +cdef _proto_eval_laguerre_double__t *_proto_eval_laguerre_double__t_var = &_func_eval_laguerre[double] +from .orthogonal_eval cimport eval_laguerre_l as _func_eval_laguerre_l +ctypedef double _proto_eval_laguerre_l_t(Py_ssize_t, double) noexcept nogil +cdef _proto_eval_laguerre_l_t *_proto_eval_laguerre_l_t_var = &_func_eval_laguerre_l +from .orthogonal_eval cimport eval_legendre as _func_eval_legendre +ctypedef double complex _proto_eval_legendre_double_complex__t(double, double complex) noexcept nogil +cdef _proto_eval_legendre_double_complex__t *_proto_eval_legendre_double_complex__t_var = &_func_eval_legendre[double_complex] +from .orthogonal_eval cimport eval_legendre as _func_eval_legendre +ctypedef double _proto_eval_legendre_double__t(double, double) noexcept nogil +cdef _proto_eval_legendre_double__t *_proto_eval_legendre_double__t_var = &_func_eval_legendre[double] +from .orthogonal_eval cimport eval_legendre_l as _func_eval_legendre_l +ctypedef double _proto_eval_legendre_l_t(Py_ssize_t, double) noexcept nogil +cdef _proto_eval_legendre_l_t *_proto_eval_legendre_l_t_var = &_func_eval_legendre_l +from .orthogonal_eval cimport eval_sh_chebyt as _func_eval_sh_chebyt +ctypedef double complex _proto_eval_sh_chebyt_double_complex__t(double, double complex) noexcept nogil +cdef _proto_eval_sh_chebyt_double_complex__t *_proto_eval_sh_chebyt_double_complex__t_var = &_func_eval_sh_chebyt[double_complex] +from .orthogonal_eval cimport eval_sh_chebyt as _func_eval_sh_chebyt +ctypedef double _proto_eval_sh_chebyt_double__t(double, double) noexcept nogil +cdef _proto_eval_sh_chebyt_double__t *_proto_eval_sh_chebyt_double__t_var = &_func_eval_sh_chebyt[double] +from .orthogonal_eval cimport eval_sh_chebyt_l as _func_eval_sh_chebyt_l +ctypedef double _proto_eval_sh_chebyt_l_t(Py_ssize_t, double) noexcept nogil +cdef _proto_eval_sh_chebyt_l_t *_proto_eval_sh_chebyt_l_t_var = &_func_eval_sh_chebyt_l +from .orthogonal_eval cimport eval_sh_chebyu as _func_eval_sh_chebyu +ctypedef double complex _proto_eval_sh_chebyu_double_complex__t(double, double complex) noexcept nogil +cdef _proto_eval_sh_chebyu_double_complex__t *_proto_eval_sh_chebyu_double_complex__t_var = &_func_eval_sh_chebyu[double_complex] +from .orthogonal_eval cimport eval_sh_chebyu as _func_eval_sh_chebyu +ctypedef double _proto_eval_sh_chebyu_double__t(double, double) noexcept nogil +cdef _proto_eval_sh_chebyu_double__t *_proto_eval_sh_chebyu_double__t_var = &_func_eval_sh_chebyu[double] +from .orthogonal_eval cimport eval_sh_chebyu_l as _func_eval_sh_chebyu_l +ctypedef double _proto_eval_sh_chebyu_l_t(Py_ssize_t, double) noexcept nogil +cdef _proto_eval_sh_chebyu_l_t *_proto_eval_sh_chebyu_l_t_var = &_func_eval_sh_chebyu_l +from .orthogonal_eval cimport eval_sh_jacobi as _func_eval_sh_jacobi +ctypedef double complex _proto_eval_sh_jacobi_double_complex__t(double, double, double, double complex) noexcept nogil +cdef _proto_eval_sh_jacobi_double_complex__t *_proto_eval_sh_jacobi_double_complex__t_var = &_func_eval_sh_jacobi[double_complex] +from .orthogonal_eval cimport eval_sh_jacobi as _func_eval_sh_jacobi +ctypedef double _proto_eval_sh_jacobi_double__t(double, double, double, double) noexcept nogil +cdef _proto_eval_sh_jacobi_double__t *_proto_eval_sh_jacobi_double__t_var = &_func_eval_sh_jacobi[double] +from .orthogonal_eval cimport eval_sh_jacobi_l as _func_eval_sh_jacobi_l +ctypedef double _proto_eval_sh_jacobi_l_t(Py_ssize_t, double, double, double) noexcept nogil +cdef _proto_eval_sh_jacobi_l_t *_proto_eval_sh_jacobi_l_t_var = &_func_eval_sh_jacobi_l +from .orthogonal_eval cimport eval_sh_legendre as _func_eval_sh_legendre +ctypedef double complex _proto_eval_sh_legendre_double_complex__t(double, double complex) noexcept nogil +cdef _proto_eval_sh_legendre_double_complex__t *_proto_eval_sh_legendre_double_complex__t_var = &_func_eval_sh_legendre[double_complex] +from .orthogonal_eval cimport eval_sh_legendre as _func_eval_sh_legendre +ctypedef double _proto_eval_sh_legendre_double__t(double, double) noexcept nogil +cdef _proto_eval_sh_legendre_double__t *_proto_eval_sh_legendre_double__t_var = &_func_eval_sh_legendre[double] +from .orthogonal_eval cimport eval_sh_legendre_l as _func_eval_sh_legendre_l +ctypedef double _proto_eval_sh_legendre_l_t(Py_ssize_t, double) noexcept nogil +cdef _proto_eval_sh_legendre_l_t *_proto_eval_sh_legendre_l_t_var = &_func_eval_sh_legendre_l +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_exp10 "cephes_exp10"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_exp2 "cephes_exp2"(double) noexcept nogil +from ._cunity cimport cexpm1 as _func_cexpm1 +ctypedef double complex _proto_cexpm1_t(double complex) noexcept nogil +cdef _proto_cexpm1_t *_proto_cexpm1_t_var = &_func_cexpm1 +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_expm1 "cephes_expm1"(double) noexcept nogil +from ._legacy cimport expn_unsafe as _func_expn_unsafe +ctypedef double _proto_expn_unsafe_t(double, double) noexcept nogil +cdef _proto_expn_unsafe_t *_proto_expn_unsafe_t_var = &_func_expn_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_expn_wrap "cephes_expn_wrap"(Py_ssize_t, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_fdtr "cephes_fdtr"(double, double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_fdtrc "cephes_fdtrc"(double, double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_fdtri "cephes_fdtri"(double, double, double) noexcept nogil +from ._cdflib_wrappers cimport fdtridfd as _func_fdtridfd +ctypedef double _proto_fdtridfd_t(double, double, double) noexcept nogil +cdef _proto_fdtridfd_t *_proto_fdtridfd_t_var = &_func_fdtridfd +cdef extern from r"_ufuncs_defs.h": + cdef int _func_cephes_fresnl_wrap "cephes_fresnl_wrap"(double, double *, double *) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef int _func_cfresnl_wrap "cfresnl_wrap"(double complex, double complex *, double complex *) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_igam "cephes_igam"(double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_igamc "cephes_igamc"(double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_igamci "cephes_igamci"(double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_igami "cephes_igami"(double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_gammasgn "cephes_gammasgn"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_gdtr "cephes_gdtr"(double, double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_gdtrc "cephes_gdtrc"(double, double, double) noexcept nogil +from ._cdflib_wrappers cimport gdtria as _func_gdtria +ctypedef double _proto_gdtria_t(double, double, double) noexcept nogil +cdef _proto_gdtria_t *_proto_gdtria_t_var = &_func_gdtria +from ._cdflib_wrappers cimport gdtrib as _func_gdtrib +ctypedef double _proto_gdtrib_t(double, double, double) noexcept nogil +cdef _proto_gdtrib_t *_proto_gdtrib_t_var = &_func_gdtrib +from ._cdflib_wrappers cimport gdtrix as _func_gdtrix +ctypedef double _proto_gdtrix_t(double, double, double) noexcept nogil +cdef _proto_gdtrix_t *_proto_gdtrix_t_var = &_func_gdtrix +from ._convex_analysis cimport huber as _func_huber +ctypedef double _proto_huber_t(double, double) noexcept nogil +cdef _proto_huber_t *_proto_huber_t_var = &_func_huber +from ._hyp0f1 cimport _hyp0f1_cmplx as _func__hyp0f1_cmplx +ctypedef double complex _proto__hyp0f1_cmplx_t(double, double complex) noexcept nogil +cdef _proto__hyp0f1_cmplx_t *_proto__hyp0f1_cmplx_t_var = &_func__hyp0f1_cmplx +from ._hyp0f1 cimport _hyp0f1_real as _func__hyp0f1_real +ctypedef double _proto__hyp0f1_real_t(double, double) noexcept nogil +cdef _proto__hyp0f1_real_t *_proto__hyp0f1_real_t_var = &_func__hyp0f1_real +cdef extern from r"_ufuncs_defs.h": + cdef double complex _func_chyp1f1_wrap "chyp1f1_wrap"(double, double, double complex) noexcept nogil +from ._hypergeometric cimport hyperu as _func_hyperu +ctypedef double _proto_hyperu_t(double, double, double) noexcept nogil +cdef _proto_hyperu_t *_proto_hyperu_t_var = &_func_hyperu +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_i0 "cephes_i0"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_i0e "cephes_i0e"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_i1 "cephes_i1"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_i1e "cephes_i1e"(double) noexcept nogil +from ._boxcox cimport inv_boxcox as _func_inv_boxcox +ctypedef double _proto_inv_boxcox_t(double, double) noexcept nogil +cdef _proto_inv_boxcox_t *_proto_inv_boxcox_t_var = &_func_inv_boxcox +from ._boxcox cimport inv_boxcox1p as _func_inv_boxcox1p +ctypedef double _proto_inv_boxcox1p_t(double, double) noexcept nogil +cdef _proto_inv_boxcox1p_t *_proto_inv_boxcox1p_t_var = &_func_inv_boxcox1p +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_j0 "cephes_j0"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_j1 "cephes_j1"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_k0 "cephes_k0"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_k0e "cephes_k0e"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_k1 "cephes_k1"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_k1e "cephes_k1e"(double) noexcept nogil +from ._convex_analysis cimport kl_div as _func_kl_div +ctypedef double _proto_kl_div_t(double, double) noexcept nogil +cdef _proto_kl_div_t *_proto_kl_div_t_var = &_func_kl_div +from ._legacy cimport kn_unsafe as _func_kn_unsafe +ctypedef double _proto_kn_unsafe_t(double, double) noexcept nogil +cdef _proto_kn_unsafe_t *_proto_kn_unsafe_t_var = &_func_kn_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef double _func_special_cyl_bessel_k_int "special_cyl_bessel_k_int"(Py_ssize_t, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_kolmogi "cephes_kolmogi"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_kolmogorov "cephes_kolmogorov"(double) noexcept nogil +from ._cunity cimport clog1p as _func_clog1p +ctypedef double complex _proto_clog1p_t(double complex) noexcept nogil +cdef _proto_clog1p_t *_proto_clog1p_t_var = &_func_clog1p +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_log1p "cephes_log1p"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_pmv_wrap "pmv_wrap"(double, double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_struve_l "cephes_struve_l"(double, double) noexcept nogil +from ._legacy cimport nbdtr_unsafe as _func_nbdtr_unsafe +ctypedef double _proto_nbdtr_unsafe_t(double, double, double) noexcept nogil +cdef _proto_nbdtr_unsafe_t *_proto_nbdtr_unsafe_t_var = &_func_nbdtr_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_nbdtr_wrap "cephes_nbdtr_wrap"(Py_ssize_t, Py_ssize_t, double) noexcept nogil +from ._legacy cimport nbdtrc_unsafe as _func_nbdtrc_unsafe +ctypedef double _proto_nbdtrc_unsafe_t(double, double, double) noexcept nogil +cdef _proto_nbdtrc_unsafe_t *_proto_nbdtrc_unsafe_t_var = &_func_nbdtrc_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_nbdtrc_wrap "cephes_nbdtrc_wrap"(Py_ssize_t, Py_ssize_t, double) noexcept nogil +from ._legacy cimport nbdtri_unsafe as _func_nbdtri_unsafe +ctypedef double _proto_nbdtri_unsafe_t(double, double, double) noexcept nogil +cdef _proto_nbdtri_unsafe_t *_proto_nbdtri_unsafe_t_var = &_func_nbdtri_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_nbdtri_wrap "cephes_nbdtri_wrap"(Py_ssize_t, Py_ssize_t, double) noexcept nogil +from ._cdflib_wrappers cimport nbdtrik as _func_nbdtrik +ctypedef double _proto_nbdtrik_t(double, double, double) noexcept nogil +cdef _proto_nbdtrik_t *_proto_nbdtrik_t_var = &_func_nbdtrik +from ._cdflib_wrappers cimport nbdtrin as _func_nbdtrin +ctypedef double _proto_nbdtrin_t(double, double, double) noexcept nogil +cdef _proto_nbdtrin_t *_proto_nbdtrin_t_var = &_func_nbdtrin +from ._cdflib_wrappers cimport ncfdtr as _func_ncfdtr +ctypedef double _proto_ncfdtr_t(double, double, double, double) noexcept nogil +cdef _proto_ncfdtr_t *_proto_ncfdtr_t_var = &_func_ncfdtr +from ._cdflib_wrappers cimport ncfdtri as _func_ncfdtri +ctypedef double _proto_ncfdtri_t(double, double, double, double) noexcept nogil +cdef _proto_ncfdtri_t *_proto_ncfdtri_t_var = &_func_ncfdtri +from ._cdflib_wrappers cimport ncfdtridfd as _func_ncfdtridfd +ctypedef double _proto_ncfdtridfd_t(double, double, double, double) noexcept nogil +cdef _proto_ncfdtridfd_t *_proto_ncfdtridfd_t_var = &_func_ncfdtridfd +from ._cdflib_wrappers cimport ncfdtridfn as _func_ncfdtridfn +ctypedef double _proto_ncfdtridfn_t(double, double, double, double) noexcept nogil +cdef _proto_ncfdtridfn_t *_proto_ncfdtridfn_t_var = &_func_ncfdtridfn +from ._cdflib_wrappers cimport ncfdtrinc as _func_ncfdtrinc +ctypedef double _proto_ncfdtrinc_t(double, double, double, double) noexcept nogil +cdef _proto_ncfdtrinc_t *_proto_ncfdtrinc_t_var = &_func_ncfdtrinc +from ._cdflib_wrappers cimport nctdtr as _func_nctdtr +ctypedef double _proto_nctdtr_t(double, double, double) noexcept nogil +cdef _proto_nctdtr_t *_proto_nctdtr_t_var = &_func_nctdtr +from ._cdflib_wrappers cimport nctdtridf as _func_nctdtridf +ctypedef double _proto_nctdtridf_t(double, double, double) noexcept nogil +cdef _proto_nctdtridf_t *_proto_nctdtridf_t_var = &_func_nctdtridf +from ._cdflib_wrappers cimport nctdtrinc as _func_nctdtrinc +ctypedef double _proto_nctdtrinc_t(double, double, double) noexcept nogil +cdef _proto_nctdtrinc_t *_proto_nctdtrinc_t_var = &_func_nctdtrinc +from ._cdflib_wrappers cimport nctdtrit as _func_nctdtrit +ctypedef double _proto_nctdtrit_t(double, double, double) noexcept nogil +cdef _proto_nctdtrit_t *_proto_nctdtrit_t_var = &_func_nctdtrit +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_ndtr "cephes_ndtr"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_ndtri "cephes_ndtri"(double) noexcept nogil +from ._ndtri_exp cimport ndtri_exp as _func_ndtri_exp +ctypedef double _proto_ndtri_exp_t(double) noexcept nogil +cdef _proto_ndtri_exp_t *_proto_ndtri_exp_t_var = &_func_ndtri_exp +from ._cdflib_wrappers cimport nrdtrimn as _func_nrdtrimn +ctypedef double _proto_nrdtrimn_t(double, double, double) noexcept nogil +cdef _proto_nrdtrimn_t *_proto_nrdtrimn_t_var = &_func_nrdtrimn +from ._cdflib_wrappers cimport nrdtrisd as _func_nrdtrisd +ctypedef double _proto_nrdtrisd_t(double, double, double) noexcept nogil +cdef _proto_nrdtrisd_t *_proto_nrdtrisd_t_var = &_func_nrdtrisd +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_owens_t "cephes_owens_t"(double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_pdtr "cephes_pdtr"(double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_pdtrc "cephes_pdtrc"(double, double) noexcept nogil +from ._legacy cimport pdtri_unsafe as _func_pdtri_unsafe +ctypedef double _proto_pdtri_unsafe_t(double, double) noexcept nogil +cdef _proto_pdtri_unsafe_t *_proto_pdtri_unsafe_t_var = &_func_pdtri_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_pdtri_wrap "cephes_pdtri_wrap"(Py_ssize_t, double) noexcept nogil +from ._cdflib_wrappers cimport pdtrik as _func_pdtrik +ctypedef double _proto_pdtrik_t(double, double) noexcept nogil +cdef _proto_pdtrik_t *_proto_pdtrik_t_var = &_func_pdtrik +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_poch "cephes_poch"(double, double) noexcept nogil +from ._convex_analysis cimport pseudo_huber as _func_pseudo_huber +ctypedef double _proto_pseudo_huber_t(double, double) noexcept nogil +cdef _proto_pseudo_huber_t *_proto_pseudo_huber_t_var = &_func_pseudo_huber +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_radian "cephes_radian"(double, double, double) noexcept nogil +from ._convex_analysis cimport rel_entr as _func_rel_entr +ctypedef double _proto_rel_entr_t(double, double) noexcept nogil +cdef _proto_rel_entr_t *_proto_rel_entr_t_var = &_func_rel_entr +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_round "cephes_round"(double) noexcept nogil +from ._sici cimport cshichi as _func_cshichi +ctypedef int _proto_cshichi_t(double complex, double complex *, double complex *) noexcept nogil +cdef _proto_cshichi_t *_proto_cshichi_t_var = &_func_cshichi +cdef extern from r"_ufuncs_defs.h": + cdef int _func_cephes_shichi_wrap "cephes_shichi_wrap"(double, double *, double *) noexcept nogil +from ._sici cimport csici as _func_csici +ctypedef int _proto_csici_t(double complex, double complex *, double complex *) noexcept nogil +cdef _proto_csici_t *_proto_csici_t_var = &_func_csici +cdef extern from r"_ufuncs_defs.h": + cdef int _func_cephes_sici_wrap "cephes_sici_wrap"(double, double *, double *) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_sindg "cephes_sindg"(double) noexcept nogil +from ._legacy cimport smirnov_unsafe as _func_smirnov_unsafe +ctypedef double _proto_smirnov_unsafe_t(double, double) noexcept nogil +cdef _proto_smirnov_unsafe_t *_proto_smirnov_unsafe_t_var = &_func_smirnov_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_smirnov_wrap "cephes_smirnov_wrap"(Py_ssize_t, double) noexcept nogil +from ._legacy cimport smirnovi_unsafe as _func_smirnovi_unsafe +ctypedef double _proto_smirnovi_unsafe_t(double, double) noexcept nogil +cdef _proto_smirnovi_unsafe_t *_proto_smirnovi_unsafe_t_var = &_func_smirnovi_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_smirnovi_wrap "cephes_smirnovi_wrap"(Py_ssize_t, double) noexcept nogil +from ._spence cimport cspence as _func_cspence +ctypedef double complex _proto_cspence_t(double complex) noexcept nogil +cdef _proto_cspence_t *_proto_cspence_t_var = &_func_cspence +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_spence "cephes_spence"(double) noexcept nogil +from ._cdflib_wrappers cimport stdtr as _func_stdtr +ctypedef double _proto_stdtr_t(double, double) noexcept nogil +cdef _proto_stdtr_t *_proto_stdtr_t_var = &_func_stdtr +from ._cdflib_wrappers cimport stdtridf as _func_stdtridf +ctypedef double _proto_stdtridf_t(double, double) noexcept nogil +cdef _proto_stdtridf_t *_proto_stdtridf_t_var = &_func_stdtridf +from ._cdflib_wrappers cimport stdtrit as _func_stdtrit +ctypedef double _proto_stdtrit_t(double, double) noexcept nogil +cdef _proto_stdtrit_t *_proto_stdtrit_t_var = &_func_stdtrit +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_struve_h "cephes_struve_h"(double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_tandg "cephes_tandg"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_tukeylambdacdf "cephes_tukeylambdacdf"(double, double) noexcept nogil +from ._xlogy cimport xlog1py as _func_xlog1py +ctypedef double _proto_xlog1py_double__t(double, double) noexcept nogil +cdef _proto_xlog1py_double__t *_proto_xlog1py_double__t_var = &_func_xlog1py[double] +from ._xlogy cimport xlog1py as _func_xlog1py +ctypedef double complex _proto_xlog1py_double_complex__t(double complex, double complex) noexcept nogil +cdef _proto_xlog1py_double_complex__t *_proto_xlog1py_double_complex__t_var = &_func_xlog1py[double_complex] +from ._xlogy cimport xlogy as _func_xlogy +ctypedef double _proto_xlogy_double__t(double, double) noexcept nogil +cdef _proto_xlogy_double__t *_proto_xlogy_double__t_var = &_func_xlogy[double] +from ._xlogy cimport xlogy as _func_xlogy +ctypedef double complex _proto_xlogy_double_complex__t(double complex, double complex) noexcept nogil +cdef _proto_xlogy_double_complex__t *_proto_xlogy_double_complex__t_var = &_func_xlogy[double_complex] +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_y0 "cephes_y0"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_y1 "cephes_y1"(double) noexcept nogil +from ._legacy cimport yn_unsafe as _func_yn_unsafe +ctypedef double _proto_yn_unsafe_t(double, double) noexcept nogil +cdef _proto_yn_unsafe_t *_proto_yn_unsafe_t_var = &_func_yn_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_yn_wrap "cephes_yn_wrap"(Py_ssize_t, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cephes_zetac "cephes_zetac"(double) noexcept nogil +cdef np.PyUFuncGenericFunction ufunc__beta_pdf_loops[2] +cdef void *ufunc__beta_pdf_ptr[4] +cdef void *ufunc__beta_pdf_data[2] +cdef char ufunc__beta_pdf_types[8] +cdef char *ufunc__beta_pdf_doc = ( + "_beta_pdf(x, a, b)\n" + "\n" + "Probability density function of beta distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real-valued such that :math:`0 \\leq x \\leq 1`,\n" + " the upper limit of integration\n" + "a, b : array_like\n" + " Positive, real-valued parameters\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__beta_pdf_loops[0] = loop_f_fff__As_fff_f +ufunc__beta_pdf_loops[1] = loop_d_ddd__As_ddd_d +ufunc__beta_pdf_types[0] = NPY_FLOAT +ufunc__beta_pdf_types[1] = NPY_FLOAT +ufunc__beta_pdf_types[2] = NPY_FLOAT +ufunc__beta_pdf_types[3] = NPY_FLOAT +ufunc__beta_pdf_types[4] = NPY_DOUBLE +ufunc__beta_pdf_types[5] = NPY_DOUBLE +ufunc__beta_pdf_types[6] = NPY_DOUBLE +ufunc__beta_pdf_types[7] = NPY_DOUBLE +ufunc__beta_pdf_ptr[2*0] = scipy.special._ufuncs_cxx._export_beta_pdf_float +ufunc__beta_pdf_ptr[2*0+1] = ("_beta_pdf") +ufunc__beta_pdf_ptr[2*1] = scipy.special._ufuncs_cxx._export_beta_pdf_double +ufunc__beta_pdf_ptr[2*1+1] = ("_beta_pdf") +ufunc__beta_pdf_data[0] = &ufunc__beta_pdf_ptr[2*0] +ufunc__beta_pdf_data[1] = &ufunc__beta_pdf_ptr[2*1] +_beta_pdf = np.PyUFunc_FromFuncAndData(ufunc__beta_pdf_loops, ufunc__beta_pdf_data, ufunc__beta_pdf_types, 2, 3, 1, 0, "_beta_pdf", ufunc__beta_pdf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__beta_ppf_loops[2] +cdef void *ufunc__beta_ppf_ptr[4] +cdef void *ufunc__beta_ppf_data[2] +cdef char ufunc__beta_ppf_types[8] +cdef char *ufunc__beta_ppf_doc = ( + "_beta_ppf(x, a, b)\n" + "\n" + "Percent point function of beta distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real-valued such that :math:`0 \\leq x \\leq 1`,\n" + " the upper limit of integration\n" + "a, b : array_like\n" + " Positive, real-valued parameters\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__beta_ppf_loops[0] = loop_f_fff__As_fff_f +ufunc__beta_ppf_loops[1] = loop_d_ddd__As_ddd_d +ufunc__beta_ppf_types[0] = NPY_FLOAT +ufunc__beta_ppf_types[1] = NPY_FLOAT +ufunc__beta_ppf_types[2] = NPY_FLOAT +ufunc__beta_ppf_types[3] = NPY_FLOAT +ufunc__beta_ppf_types[4] = NPY_DOUBLE +ufunc__beta_ppf_types[5] = NPY_DOUBLE +ufunc__beta_ppf_types[6] = NPY_DOUBLE +ufunc__beta_ppf_types[7] = NPY_DOUBLE +ufunc__beta_ppf_ptr[2*0] = scipy.special._ufuncs_cxx._export_beta_ppf_float +ufunc__beta_ppf_ptr[2*0+1] = ("_beta_ppf") +ufunc__beta_ppf_ptr[2*1] = scipy.special._ufuncs_cxx._export_beta_ppf_double +ufunc__beta_ppf_ptr[2*1+1] = ("_beta_ppf") +ufunc__beta_ppf_data[0] = &ufunc__beta_ppf_ptr[2*0] +ufunc__beta_ppf_data[1] = &ufunc__beta_ppf_ptr[2*1] +_beta_ppf = np.PyUFunc_FromFuncAndData(ufunc__beta_ppf_loops, ufunc__beta_ppf_data, ufunc__beta_ppf_types, 2, 3, 1, 0, "_beta_ppf", ufunc__beta_ppf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__binom_cdf_loops[2] +cdef void *ufunc__binom_cdf_ptr[4] +cdef void *ufunc__binom_cdf_data[2] +cdef char ufunc__binom_cdf_types[8] +cdef char *ufunc__binom_cdf_doc = ( + "_binom_cdf(x, n, p)\n" + "\n" + "Cumulative density function of binomial distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real-valued\n" + "n : array_like\n" + " Positive, integer-valued parameter\n" + "p : array_like\n" + " Positive, real-valued parameter\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__binom_cdf_loops[0] = loop_f_fff__As_fff_f +ufunc__binom_cdf_loops[1] = loop_d_ddd__As_ddd_d +ufunc__binom_cdf_types[0] = NPY_FLOAT +ufunc__binom_cdf_types[1] = NPY_FLOAT +ufunc__binom_cdf_types[2] = NPY_FLOAT +ufunc__binom_cdf_types[3] = NPY_FLOAT +ufunc__binom_cdf_types[4] = NPY_DOUBLE +ufunc__binom_cdf_types[5] = NPY_DOUBLE +ufunc__binom_cdf_types[6] = NPY_DOUBLE +ufunc__binom_cdf_types[7] = NPY_DOUBLE +ufunc__binom_cdf_ptr[2*0] = scipy.special._ufuncs_cxx._export_binom_cdf_float +ufunc__binom_cdf_ptr[2*0+1] = ("_binom_cdf") +ufunc__binom_cdf_ptr[2*1] = scipy.special._ufuncs_cxx._export_binom_cdf_double +ufunc__binom_cdf_ptr[2*1+1] = ("_binom_cdf") +ufunc__binom_cdf_data[0] = &ufunc__binom_cdf_ptr[2*0] +ufunc__binom_cdf_data[1] = &ufunc__binom_cdf_ptr[2*1] +_binom_cdf = np.PyUFunc_FromFuncAndData(ufunc__binom_cdf_loops, ufunc__binom_cdf_data, ufunc__binom_cdf_types, 2, 3, 1, 0, "_binom_cdf", ufunc__binom_cdf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__binom_isf_loops[2] +cdef void *ufunc__binom_isf_ptr[4] +cdef void *ufunc__binom_isf_data[2] +cdef char ufunc__binom_isf_types[8] +cdef char *ufunc__binom_isf_doc = ( + "_binom_isf(x, n, p)\n" + "\n" + "Inverse survival function of binomial distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real-valued\n" + "n : array_like\n" + " Positive, integer-valued parameter\n" + "p : array_like\n" + " Positive, real-valued parameter\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__binom_isf_loops[0] = loop_f_fff__As_fff_f +ufunc__binom_isf_loops[1] = loop_d_ddd__As_ddd_d +ufunc__binom_isf_types[0] = NPY_FLOAT +ufunc__binom_isf_types[1] = NPY_FLOAT +ufunc__binom_isf_types[2] = NPY_FLOAT +ufunc__binom_isf_types[3] = NPY_FLOAT +ufunc__binom_isf_types[4] = NPY_DOUBLE +ufunc__binom_isf_types[5] = NPY_DOUBLE +ufunc__binom_isf_types[6] = NPY_DOUBLE +ufunc__binom_isf_types[7] = NPY_DOUBLE +ufunc__binom_isf_ptr[2*0] = scipy.special._ufuncs_cxx._export_binom_isf_float +ufunc__binom_isf_ptr[2*0+1] = ("_binom_isf") +ufunc__binom_isf_ptr[2*1] = scipy.special._ufuncs_cxx._export_binom_isf_double +ufunc__binom_isf_ptr[2*1+1] = ("_binom_isf") +ufunc__binom_isf_data[0] = &ufunc__binom_isf_ptr[2*0] +ufunc__binom_isf_data[1] = &ufunc__binom_isf_ptr[2*1] +_binom_isf = np.PyUFunc_FromFuncAndData(ufunc__binom_isf_loops, ufunc__binom_isf_data, ufunc__binom_isf_types, 2, 3, 1, 0, "_binom_isf", ufunc__binom_isf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__binom_pmf_loops[2] +cdef void *ufunc__binom_pmf_ptr[4] +cdef void *ufunc__binom_pmf_data[2] +cdef char ufunc__binom_pmf_types[8] +cdef char *ufunc__binom_pmf_doc = ( + "_binom_pmf(x, n, p)\n" + "\n" + "Probability mass function of binomial distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real-valued\n" + "n : array_like\n" + " Positive, integer-valued parameter\n" + "p : array_like\n" + " Positive, real-valued parameter\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__binom_pmf_loops[0] = loop_f_fff__As_fff_f +ufunc__binom_pmf_loops[1] = loop_d_ddd__As_ddd_d +ufunc__binom_pmf_types[0] = NPY_FLOAT +ufunc__binom_pmf_types[1] = NPY_FLOAT +ufunc__binom_pmf_types[2] = NPY_FLOAT +ufunc__binom_pmf_types[3] = NPY_FLOAT +ufunc__binom_pmf_types[4] = NPY_DOUBLE +ufunc__binom_pmf_types[5] = NPY_DOUBLE +ufunc__binom_pmf_types[6] = NPY_DOUBLE +ufunc__binom_pmf_types[7] = NPY_DOUBLE +ufunc__binom_pmf_ptr[2*0] = scipy.special._ufuncs_cxx._export_binom_pmf_float +ufunc__binom_pmf_ptr[2*0+1] = ("_binom_pmf") +ufunc__binom_pmf_ptr[2*1] = scipy.special._ufuncs_cxx._export_binom_pmf_double +ufunc__binom_pmf_ptr[2*1+1] = ("_binom_pmf") +ufunc__binom_pmf_data[0] = &ufunc__binom_pmf_ptr[2*0] +ufunc__binom_pmf_data[1] = &ufunc__binom_pmf_ptr[2*1] +_binom_pmf = np.PyUFunc_FromFuncAndData(ufunc__binom_pmf_loops, ufunc__binom_pmf_data, ufunc__binom_pmf_types, 2, 3, 1, 0, "_binom_pmf", ufunc__binom_pmf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__binom_ppf_loops[2] +cdef void *ufunc__binom_ppf_ptr[4] +cdef void *ufunc__binom_ppf_data[2] +cdef char ufunc__binom_ppf_types[8] +cdef char *ufunc__binom_ppf_doc = ( + "_binom_ppf(x, n, p)\n" + "\n" + "Percent point function of binomial distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real-valued\n" + "n : array_like\n" + " Positive, integer-valued parameter\n" + "p : array_like\n" + " Positive, real-valued parameter\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__binom_ppf_loops[0] = loop_f_fff__As_fff_f +ufunc__binom_ppf_loops[1] = loop_d_ddd__As_ddd_d +ufunc__binom_ppf_types[0] = NPY_FLOAT +ufunc__binom_ppf_types[1] = NPY_FLOAT +ufunc__binom_ppf_types[2] = NPY_FLOAT +ufunc__binom_ppf_types[3] = NPY_FLOAT +ufunc__binom_ppf_types[4] = NPY_DOUBLE +ufunc__binom_ppf_types[5] = NPY_DOUBLE +ufunc__binom_ppf_types[6] = NPY_DOUBLE +ufunc__binom_ppf_types[7] = NPY_DOUBLE +ufunc__binom_ppf_ptr[2*0] = scipy.special._ufuncs_cxx._export_binom_ppf_float +ufunc__binom_ppf_ptr[2*0+1] = ("_binom_ppf") +ufunc__binom_ppf_ptr[2*1] = scipy.special._ufuncs_cxx._export_binom_ppf_double +ufunc__binom_ppf_ptr[2*1+1] = ("_binom_ppf") +ufunc__binom_ppf_data[0] = &ufunc__binom_ppf_ptr[2*0] +ufunc__binom_ppf_data[1] = &ufunc__binom_ppf_ptr[2*1] +_binom_ppf = np.PyUFunc_FromFuncAndData(ufunc__binom_ppf_loops, ufunc__binom_ppf_data, ufunc__binom_ppf_types, 2, 3, 1, 0, "_binom_ppf", ufunc__binom_ppf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__binom_sf_loops[2] +cdef void *ufunc__binom_sf_ptr[4] +cdef void *ufunc__binom_sf_data[2] +cdef char ufunc__binom_sf_types[8] +cdef char *ufunc__binom_sf_doc = ( + "_binom_sf(x, n, p)\n" + "\n" + "Survival function of binomial distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real-valued\n" + "n : array_like\n" + " Positive, integer-valued parameter\n" + "p : array_like\n" + " Positive, real-valued parameter\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__binom_sf_loops[0] = loop_f_fff__As_fff_f +ufunc__binom_sf_loops[1] = loop_d_ddd__As_ddd_d +ufunc__binom_sf_types[0] = NPY_FLOAT +ufunc__binom_sf_types[1] = NPY_FLOAT +ufunc__binom_sf_types[2] = NPY_FLOAT +ufunc__binom_sf_types[3] = NPY_FLOAT +ufunc__binom_sf_types[4] = NPY_DOUBLE +ufunc__binom_sf_types[5] = NPY_DOUBLE +ufunc__binom_sf_types[6] = NPY_DOUBLE +ufunc__binom_sf_types[7] = NPY_DOUBLE +ufunc__binom_sf_ptr[2*0] = scipy.special._ufuncs_cxx._export_binom_sf_float +ufunc__binom_sf_ptr[2*0+1] = ("_binom_sf") +ufunc__binom_sf_ptr[2*1] = scipy.special._ufuncs_cxx._export_binom_sf_double +ufunc__binom_sf_ptr[2*1+1] = ("_binom_sf") +ufunc__binom_sf_data[0] = &ufunc__binom_sf_ptr[2*0] +ufunc__binom_sf_data[1] = &ufunc__binom_sf_ptr[2*1] +_binom_sf = np.PyUFunc_FromFuncAndData(ufunc__binom_sf_loops, ufunc__binom_sf_data, ufunc__binom_sf_types, 2, 3, 1, 0, "_binom_sf", ufunc__binom_sf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__cosine_cdf_loops[2] +cdef void *ufunc__cosine_cdf_ptr[4] +cdef void *ufunc__cosine_cdf_data[2] +cdef char ufunc__cosine_cdf_types[4] +cdef char *ufunc__cosine_cdf_doc = ( + "_cosine_cdf(x)\n" + "\n" + "Cumulative distribution function (CDF) of the cosine distribution::\n" + "\n" + " { 0, x < -pi\n" + " cdf(x) = { (pi + x + sin(x))/(2*pi), -pi <= x <= pi\n" + " { 1, x > pi\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " `x` must contain real numbers.\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " The cosine distribution CDF evaluated at `x`.") +ufunc__cosine_cdf_loops[0] = loop_d_d__As_f_f +ufunc__cosine_cdf_loops[1] = loop_d_d__As_d_d +ufunc__cosine_cdf_types[0] = NPY_FLOAT +ufunc__cosine_cdf_types[1] = NPY_FLOAT +ufunc__cosine_cdf_types[2] = NPY_DOUBLE +ufunc__cosine_cdf_types[3] = NPY_DOUBLE +ufunc__cosine_cdf_ptr[2*0] = _func_cosine_cdf +ufunc__cosine_cdf_ptr[2*0+1] = ("_cosine_cdf") +ufunc__cosine_cdf_ptr[2*1] = _func_cosine_cdf +ufunc__cosine_cdf_ptr[2*1+1] = ("_cosine_cdf") +ufunc__cosine_cdf_data[0] = &ufunc__cosine_cdf_ptr[2*0] +ufunc__cosine_cdf_data[1] = &ufunc__cosine_cdf_ptr[2*1] +_cosine_cdf = np.PyUFunc_FromFuncAndData(ufunc__cosine_cdf_loops, ufunc__cosine_cdf_data, ufunc__cosine_cdf_types, 2, 1, 1, 0, "_cosine_cdf", ufunc__cosine_cdf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__cosine_invcdf_loops[2] +cdef void *ufunc__cosine_invcdf_ptr[4] +cdef void *ufunc__cosine_invcdf_data[2] +cdef char ufunc__cosine_invcdf_types[4] +cdef char *ufunc__cosine_invcdf_doc = ( + "_cosine_invcdf(p)\n" + "\n" + "Inverse of the cumulative distribution function (CDF) of the cosine\n" + "distribution.\n" + "\n" + "The CDF of the cosine distribution is::\n" + "\n" + " cdf(x) = (pi + x + sin(x))/(2*pi)\n" + "\n" + "This function computes the inverse of cdf(x).\n" + "\n" + "Parameters\n" + "----------\n" + "p : array_like\n" + " `p` must contain real numbers in the interval ``0 <= p <= 1``.\n" + " `nan` is returned for values of `p` outside the interval [0, 1].\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " The inverse of the cosine distribution CDF evaluated at `p`.") +ufunc__cosine_invcdf_loops[0] = loop_d_d__As_f_f +ufunc__cosine_invcdf_loops[1] = loop_d_d__As_d_d +ufunc__cosine_invcdf_types[0] = NPY_FLOAT +ufunc__cosine_invcdf_types[1] = NPY_FLOAT +ufunc__cosine_invcdf_types[2] = NPY_DOUBLE +ufunc__cosine_invcdf_types[3] = NPY_DOUBLE +ufunc__cosine_invcdf_ptr[2*0] = _func_cosine_invcdf +ufunc__cosine_invcdf_ptr[2*0+1] = ("_cosine_invcdf") +ufunc__cosine_invcdf_ptr[2*1] = _func_cosine_invcdf +ufunc__cosine_invcdf_ptr[2*1+1] = ("_cosine_invcdf") +ufunc__cosine_invcdf_data[0] = &ufunc__cosine_invcdf_ptr[2*0] +ufunc__cosine_invcdf_data[1] = &ufunc__cosine_invcdf_ptr[2*1] +_cosine_invcdf = np.PyUFunc_FromFuncAndData(ufunc__cosine_invcdf_loops, ufunc__cosine_invcdf_data, ufunc__cosine_invcdf_types, 2, 1, 1, 0, "_cosine_invcdf", ufunc__cosine_invcdf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__ellip_harm_loops[3] +cdef void *ufunc__ellip_harm_ptr[6] +cdef void *ufunc__ellip_harm_data[3] +cdef char ufunc__ellip_harm_types[24] +cdef char *ufunc__ellip_harm_doc = ( + "Internal function, use `ellip_harm` instead.") +ufunc__ellip_harm_loops[0] = loop_d_ddddddd__As_fffffff_f +ufunc__ellip_harm_loops[1] = loop_d_ddiiddd__As_ddllddd_d +ufunc__ellip_harm_loops[2] = loop_d_ddddddd__As_ddddddd_d +ufunc__ellip_harm_types[0] = NPY_FLOAT +ufunc__ellip_harm_types[1] = NPY_FLOAT +ufunc__ellip_harm_types[2] = NPY_FLOAT +ufunc__ellip_harm_types[3] = NPY_FLOAT +ufunc__ellip_harm_types[4] = NPY_FLOAT +ufunc__ellip_harm_types[5] = NPY_FLOAT +ufunc__ellip_harm_types[6] = NPY_FLOAT +ufunc__ellip_harm_types[7] = NPY_FLOAT +ufunc__ellip_harm_types[8] = NPY_DOUBLE +ufunc__ellip_harm_types[9] = NPY_DOUBLE +ufunc__ellip_harm_types[10] = NPY_LONG +ufunc__ellip_harm_types[11] = NPY_LONG +ufunc__ellip_harm_types[12] = NPY_DOUBLE +ufunc__ellip_harm_types[13] = NPY_DOUBLE +ufunc__ellip_harm_types[14] = NPY_DOUBLE +ufunc__ellip_harm_types[15] = NPY_DOUBLE +ufunc__ellip_harm_types[16] = NPY_DOUBLE +ufunc__ellip_harm_types[17] = NPY_DOUBLE +ufunc__ellip_harm_types[18] = NPY_DOUBLE +ufunc__ellip_harm_types[19] = NPY_DOUBLE +ufunc__ellip_harm_types[20] = NPY_DOUBLE +ufunc__ellip_harm_types[21] = NPY_DOUBLE +ufunc__ellip_harm_types[22] = NPY_DOUBLE +ufunc__ellip_harm_types[23] = NPY_DOUBLE +ufunc__ellip_harm_ptr[2*0] = _func_ellip_harmonic_unsafe +ufunc__ellip_harm_ptr[2*0+1] = ("_ellip_harm") +ufunc__ellip_harm_ptr[2*1] = _func_ellip_harmonic +ufunc__ellip_harm_ptr[2*1+1] = ("_ellip_harm") +ufunc__ellip_harm_ptr[2*2] = _func_ellip_harmonic_unsafe +ufunc__ellip_harm_ptr[2*2+1] = ("_ellip_harm") +ufunc__ellip_harm_data[0] = &ufunc__ellip_harm_ptr[2*0] +ufunc__ellip_harm_data[1] = &ufunc__ellip_harm_ptr[2*1] +ufunc__ellip_harm_data[2] = &ufunc__ellip_harm_ptr[2*2] +_ellip_harm = np.PyUFunc_FromFuncAndData(ufunc__ellip_harm_loops, ufunc__ellip_harm_data, ufunc__ellip_harm_types, 3, 7, 1, 0, "_ellip_harm", ufunc__ellip_harm_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__factorial_loops[2] +cdef void *ufunc__factorial_ptr[4] +cdef void *ufunc__factorial_data[2] +cdef char ufunc__factorial_types[4] +cdef char *ufunc__factorial_doc = ( + "Internal function, do not use.") +ufunc__factorial_loops[0] = loop_d_d__As_f_f +ufunc__factorial_loops[1] = loop_d_d__As_d_d +ufunc__factorial_types[0] = NPY_FLOAT +ufunc__factorial_types[1] = NPY_FLOAT +ufunc__factorial_types[2] = NPY_DOUBLE +ufunc__factorial_types[3] = NPY_DOUBLE +ufunc__factorial_ptr[2*0] = _func__factorial +ufunc__factorial_ptr[2*0+1] = ("_factorial") +ufunc__factorial_ptr[2*1] = _func__factorial +ufunc__factorial_ptr[2*1+1] = ("_factorial") +ufunc__factorial_data[0] = &ufunc__factorial_ptr[2*0] +ufunc__factorial_data[1] = &ufunc__factorial_ptr[2*1] +_factorial = np.PyUFunc_FromFuncAndData(ufunc__factorial_loops, ufunc__factorial_data, ufunc__factorial_types, 2, 1, 1, 0, "_factorial", ufunc__factorial_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__hypergeom_cdf_loops[2] +cdef void *ufunc__hypergeom_cdf_ptr[4] +cdef void *ufunc__hypergeom_cdf_data[2] +cdef char ufunc__hypergeom_cdf_types[10] +cdef char *ufunc__hypergeom_cdf_doc = ( + "_hypergeom_cdf(x, r, N, M)\n" + "\n" + "Cumulative density function of hypergeometric distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real-valued\n" + "r, N, M : array_like\n" + " Positive, integer-valued parameter\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__hypergeom_cdf_loops[0] = loop_f_ffff__As_ffff_f +ufunc__hypergeom_cdf_loops[1] = loop_d_dddd__As_dddd_d +ufunc__hypergeom_cdf_types[0] = NPY_FLOAT +ufunc__hypergeom_cdf_types[1] = NPY_FLOAT +ufunc__hypergeom_cdf_types[2] = NPY_FLOAT +ufunc__hypergeom_cdf_types[3] = NPY_FLOAT +ufunc__hypergeom_cdf_types[4] = NPY_FLOAT +ufunc__hypergeom_cdf_types[5] = NPY_DOUBLE +ufunc__hypergeom_cdf_types[6] = NPY_DOUBLE +ufunc__hypergeom_cdf_types[7] = NPY_DOUBLE +ufunc__hypergeom_cdf_types[8] = NPY_DOUBLE +ufunc__hypergeom_cdf_types[9] = NPY_DOUBLE +ufunc__hypergeom_cdf_ptr[2*0] = scipy.special._ufuncs_cxx._export_hypergeom_cdf_float +ufunc__hypergeom_cdf_ptr[2*0+1] = ("_hypergeom_cdf") +ufunc__hypergeom_cdf_ptr[2*1] = scipy.special._ufuncs_cxx._export_hypergeom_cdf_double +ufunc__hypergeom_cdf_ptr[2*1+1] = ("_hypergeom_cdf") +ufunc__hypergeom_cdf_data[0] = &ufunc__hypergeom_cdf_ptr[2*0] +ufunc__hypergeom_cdf_data[1] = &ufunc__hypergeom_cdf_ptr[2*1] +_hypergeom_cdf = np.PyUFunc_FromFuncAndData(ufunc__hypergeom_cdf_loops, ufunc__hypergeom_cdf_data, ufunc__hypergeom_cdf_types, 2, 4, 1, 0, "_hypergeom_cdf", ufunc__hypergeom_cdf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__hypergeom_mean_loops[2] +cdef void *ufunc__hypergeom_mean_ptr[4] +cdef void *ufunc__hypergeom_mean_data[2] +cdef char ufunc__hypergeom_mean_types[8] +cdef char *ufunc__hypergeom_mean_doc = ( + "_hypergeom_mean(r, N, M)\n" + "\n" + "Mean of hypergeometric distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "r, N, M : array_like\n" + " Positive, integer-valued parameter\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__hypergeom_mean_loops[0] = loop_f_fff__As_fff_f +ufunc__hypergeom_mean_loops[1] = loop_d_ddd__As_ddd_d +ufunc__hypergeom_mean_types[0] = NPY_FLOAT +ufunc__hypergeom_mean_types[1] = NPY_FLOAT +ufunc__hypergeom_mean_types[2] = NPY_FLOAT +ufunc__hypergeom_mean_types[3] = NPY_FLOAT +ufunc__hypergeom_mean_types[4] = NPY_DOUBLE +ufunc__hypergeom_mean_types[5] = NPY_DOUBLE +ufunc__hypergeom_mean_types[6] = NPY_DOUBLE +ufunc__hypergeom_mean_types[7] = NPY_DOUBLE +ufunc__hypergeom_mean_ptr[2*0] = scipy.special._ufuncs_cxx._export_hypergeom_mean_float +ufunc__hypergeom_mean_ptr[2*0+1] = ("_hypergeom_mean") +ufunc__hypergeom_mean_ptr[2*1] = scipy.special._ufuncs_cxx._export_hypergeom_mean_double +ufunc__hypergeom_mean_ptr[2*1+1] = ("_hypergeom_mean") +ufunc__hypergeom_mean_data[0] = &ufunc__hypergeom_mean_ptr[2*0] +ufunc__hypergeom_mean_data[1] = &ufunc__hypergeom_mean_ptr[2*1] +_hypergeom_mean = np.PyUFunc_FromFuncAndData(ufunc__hypergeom_mean_loops, ufunc__hypergeom_mean_data, ufunc__hypergeom_mean_types, 2, 3, 1, 0, "_hypergeom_mean", ufunc__hypergeom_mean_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__hypergeom_pmf_loops[2] +cdef void *ufunc__hypergeom_pmf_ptr[4] +cdef void *ufunc__hypergeom_pmf_data[2] +cdef char ufunc__hypergeom_pmf_types[10] +cdef char *ufunc__hypergeom_pmf_doc = ( + "_hypergeom_pmf(x, r, N, M)\n" + "\n" + "Probability mass function of hypergeometric distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real-valued\n" + "r, N, M : array_like\n" + " Positive, integer-valued parameter\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__hypergeom_pmf_loops[0] = loop_f_ffff__As_ffff_f +ufunc__hypergeom_pmf_loops[1] = loop_d_dddd__As_dddd_d +ufunc__hypergeom_pmf_types[0] = NPY_FLOAT +ufunc__hypergeom_pmf_types[1] = NPY_FLOAT +ufunc__hypergeom_pmf_types[2] = NPY_FLOAT +ufunc__hypergeom_pmf_types[3] = NPY_FLOAT +ufunc__hypergeom_pmf_types[4] = NPY_FLOAT +ufunc__hypergeom_pmf_types[5] = NPY_DOUBLE +ufunc__hypergeom_pmf_types[6] = NPY_DOUBLE +ufunc__hypergeom_pmf_types[7] = NPY_DOUBLE +ufunc__hypergeom_pmf_types[8] = NPY_DOUBLE +ufunc__hypergeom_pmf_types[9] = NPY_DOUBLE +ufunc__hypergeom_pmf_ptr[2*0] = scipy.special._ufuncs_cxx._export_hypergeom_pmf_float +ufunc__hypergeom_pmf_ptr[2*0+1] = ("_hypergeom_pmf") +ufunc__hypergeom_pmf_ptr[2*1] = scipy.special._ufuncs_cxx._export_hypergeom_pmf_double +ufunc__hypergeom_pmf_ptr[2*1+1] = ("_hypergeom_pmf") +ufunc__hypergeom_pmf_data[0] = &ufunc__hypergeom_pmf_ptr[2*0] +ufunc__hypergeom_pmf_data[1] = &ufunc__hypergeom_pmf_ptr[2*1] +_hypergeom_pmf = np.PyUFunc_FromFuncAndData(ufunc__hypergeom_pmf_loops, ufunc__hypergeom_pmf_data, ufunc__hypergeom_pmf_types, 2, 4, 1, 0, "_hypergeom_pmf", ufunc__hypergeom_pmf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__hypergeom_sf_loops[2] +cdef void *ufunc__hypergeom_sf_ptr[4] +cdef void *ufunc__hypergeom_sf_data[2] +cdef char ufunc__hypergeom_sf_types[10] +cdef char *ufunc__hypergeom_sf_doc = ( + "_hypergeom_sf(x, r, N, M)\n" + "\n" + "Survival function of hypergeometric distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real-valued\n" + "r, N, M : array_like\n" + " Positive, integer-valued parameter\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__hypergeom_sf_loops[0] = loop_f_ffff__As_ffff_f +ufunc__hypergeom_sf_loops[1] = loop_d_dddd__As_dddd_d +ufunc__hypergeom_sf_types[0] = NPY_FLOAT +ufunc__hypergeom_sf_types[1] = NPY_FLOAT +ufunc__hypergeom_sf_types[2] = NPY_FLOAT +ufunc__hypergeom_sf_types[3] = NPY_FLOAT +ufunc__hypergeom_sf_types[4] = NPY_FLOAT +ufunc__hypergeom_sf_types[5] = NPY_DOUBLE +ufunc__hypergeom_sf_types[6] = NPY_DOUBLE +ufunc__hypergeom_sf_types[7] = NPY_DOUBLE +ufunc__hypergeom_sf_types[8] = NPY_DOUBLE +ufunc__hypergeom_sf_types[9] = NPY_DOUBLE +ufunc__hypergeom_sf_ptr[2*0] = scipy.special._ufuncs_cxx._export_hypergeom_sf_float +ufunc__hypergeom_sf_ptr[2*0+1] = ("_hypergeom_sf") +ufunc__hypergeom_sf_ptr[2*1] = scipy.special._ufuncs_cxx._export_hypergeom_sf_double +ufunc__hypergeom_sf_ptr[2*1+1] = ("_hypergeom_sf") +ufunc__hypergeom_sf_data[0] = &ufunc__hypergeom_sf_ptr[2*0] +ufunc__hypergeom_sf_data[1] = &ufunc__hypergeom_sf_ptr[2*1] +_hypergeom_sf = np.PyUFunc_FromFuncAndData(ufunc__hypergeom_sf_loops, ufunc__hypergeom_sf_data, ufunc__hypergeom_sf_types, 2, 4, 1, 0, "_hypergeom_sf", ufunc__hypergeom_sf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__hypergeom_skewness_loops[2] +cdef void *ufunc__hypergeom_skewness_ptr[4] +cdef void *ufunc__hypergeom_skewness_data[2] +cdef char ufunc__hypergeom_skewness_types[8] +cdef char *ufunc__hypergeom_skewness_doc = ( + "_hypergeom_skewness(r, N, M)\n" + "\n" + "Skewness of hypergeometric distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "r, N, M : array_like\n" + " Positive, integer-valued parameter\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__hypergeom_skewness_loops[0] = loop_f_fff__As_fff_f +ufunc__hypergeom_skewness_loops[1] = loop_d_ddd__As_ddd_d +ufunc__hypergeom_skewness_types[0] = NPY_FLOAT +ufunc__hypergeom_skewness_types[1] = NPY_FLOAT +ufunc__hypergeom_skewness_types[2] = NPY_FLOAT +ufunc__hypergeom_skewness_types[3] = NPY_FLOAT +ufunc__hypergeom_skewness_types[4] = NPY_DOUBLE +ufunc__hypergeom_skewness_types[5] = NPY_DOUBLE +ufunc__hypergeom_skewness_types[6] = NPY_DOUBLE +ufunc__hypergeom_skewness_types[7] = NPY_DOUBLE +ufunc__hypergeom_skewness_ptr[2*0] = scipy.special._ufuncs_cxx._export_hypergeom_skewness_float +ufunc__hypergeom_skewness_ptr[2*0+1] = ("_hypergeom_skewness") +ufunc__hypergeom_skewness_ptr[2*1] = scipy.special._ufuncs_cxx._export_hypergeom_skewness_double +ufunc__hypergeom_skewness_ptr[2*1+1] = ("_hypergeom_skewness") +ufunc__hypergeom_skewness_data[0] = &ufunc__hypergeom_skewness_ptr[2*0] +ufunc__hypergeom_skewness_data[1] = &ufunc__hypergeom_skewness_ptr[2*1] +_hypergeom_skewness = np.PyUFunc_FromFuncAndData(ufunc__hypergeom_skewness_loops, ufunc__hypergeom_skewness_data, ufunc__hypergeom_skewness_types, 2, 3, 1, 0, "_hypergeom_skewness", ufunc__hypergeom_skewness_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__hypergeom_variance_loops[2] +cdef void *ufunc__hypergeom_variance_ptr[4] +cdef void *ufunc__hypergeom_variance_data[2] +cdef char ufunc__hypergeom_variance_types[8] +cdef char *ufunc__hypergeom_variance_doc = ( + "_hypergeom_variance(r, N, M)\n" + "\n" + "Mean of hypergeometric distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "r, N, M : array_like\n" + " Positive, integer-valued parameter\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__hypergeom_variance_loops[0] = loop_f_fff__As_fff_f +ufunc__hypergeom_variance_loops[1] = loop_d_ddd__As_ddd_d +ufunc__hypergeom_variance_types[0] = NPY_FLOAT +ufunc__hypergeom_variance_types[1] = NPY_FLOAT +ufunc__hypergeom_variance_types[2] = NPY_FLOAT +ufunc__hypergeom_variance_types[3] = NPY_FLOAT +ufunc__hypergeom_variance_types[4] = NPY_DOUBLE +ufunc__hypergeom_variance_types[5] = NPY_DOUBLE +ufunc__hypergeom_variance_types[6] = NPY_DOUBLE +ufunc__hypergeom_variance_types[7] = NPY_DOUBLE +ufunc__hypergeom_variance_ptr[2*0] = scipy.special._ufuncs_cxx._export_hypergeom_variance_float +ufunc__hypergeom_variance_ptr[2*0+1] = ("_hypergeom_variance") +ufunc__hypergeom_variance_ptr[2*1] = scipy.special._ufuncs_cxx._export_hypergeom_variance_double +ufunc__hypergeom_variance_ptr[2*1+1] = ("_hypergeom_variance") +ufunc__hypergeom_variance_data[0] = &ufunc__hypergeom_variance_ptr[2*0] +ufunc__hypergeom_variance_data[1] = &ufunc__hypergeom_variance_ptr[2*1] +_hypergeom_variance = np.PyUFunc_FromFuncAndData(ufunc__hypergeom_variance_loops, ufunc__hypergeom_variance_data, ufunc__hypergeom_variance_types, 2, 3, 1, 0, "_hypergeom_variance", ufunc__hypergeom_variance_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__igam_fac_loops[2] +cdef void *ufunc__igam_fac_ptr[4] +cdef void *ufunc__igam_fac_data[2] +cdef char ufunc__igam_fac_types[6] +cdef char *ufunc__igam_fac_doc = ( + "Internal function, do not use.") +ufunc__igam_fac_loops[0] = loop_d_dd__As_ff_f +ufunc__igam_fac_loops[1] = loop_d_dd__As_dd_d +ufunc__igam_fac_types[0] = NPY_FLOAT +ufunc__igam_fac_types[1] = NPY_FLOAT +ufunc__igam_fac_types[2] = NPY_FLOAT +ufunc__igam_fac_types[3] = NPY_DOUBLE +ufunc__igam_fac_types[4] = NPY_DOUBLE +ufunc__igam_fac_types[5] = NPY_DOUBLE +ufunc__igam_fac_ptr[2*0] = _func_cephes_igam_fac +ufunc__igam_fac_ptr[2*0+1] = ("_igam_fac") +ufunc__igam_fac_ptr[2*1] = _func_cephes_igam_fac +ufunc__igam_fac_ptr[2*1+1] = ("_igam_fac") +ufunc__igam_fac_data[0] = &ufunc__igam_fac_ptr[2*0] +ufunc__igam_fac_data[1] = &ufunc__igam_fac_ptr[2*1] +_igam_fac = np.PyUFunc_FromFuncAndData(ufunc__igam_fac_loops, ufunc__igam_fac_data, ufunc__igam_fac_types, 2, 2, 1, 0, "_igam_fac", ufunc__igam_fac_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__invgauss_isf_loops[2] +cdef void *ufunc__invgauss_isf_ptr[4] +cdef void *ufunc__invgauss_isf_data[2] +cdef char ufunc__invgauss_isf_types[8] +cdef char *ufunc__invgauss_isf_doc = ( + "_invgauss_isf(x, mu, s)\n" + "\n" + "Inverse survival function of inverse gaussian distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Positive real-valued\n" + "mu : array_like\n" + " Positive, real-valued parameters\n" + "s : array_like\n" + " Positive, real-valued parameters\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__invgauss_isf_loops[0] = loop_f_fff__As_fff_f +ufunc__invgauss_isf_loops[1] = loop_d_ddd__As_ddd_d +ufunc__invgauss_isf_types[0] = NPY_FLOAT +ufunc__invgauss_isf_types[1] = NPY_FLOAT +ufunc__invgauss_isf_types[2] = NPY_FLOAT +ufunc__invgauss_isf_types[3] = NPY_FLOAT +ufunc__invgauss_isf_types[4] = NPY_DOUBLE +ufunc__invgauss_isf_types[5] = NPY_DOUBLE +ufunc__invgauss_isf_types[6] = NPY_DOUBLE +ufunc__invgauss_isf_types[7] = NPY_DOUBLE +ufunc__invgauss_isf_ptr[2*0] = scipy.special._ufuncs_cxx._export_invgauss_isf_float +ufunc__invgauss_isf_ptr[2*0+1] = ("_invgauss_isf") +ufunc__invgauss_isf_ptr[2*1] = scipy.special._ufuncs_cxx._export_invgauss_isf_double +ufunc__invgauss_isf_ptr[2*1+1] = ("_invgauss_isf") +ufunc__invgauss_isf_data[0] = &ufunc__invgauss_isf_ptr[2*0] +ufunc__invgauss_isf_data[1] = &ufunc__invgauss_isf_ptr[2*1] +_invgauss_isf = np.PyUFunc_FromFuncAndData(ufunc__invgauss_isf_loops, ufunc__invgauss_isf_data, ufunc__invgauss_isf_types, 2, 3, 1, 0, "_invgauss_isf", ufunc__invgauss_isf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__invgauss_ppf_loops[2] +cdef void *ufunc__invgauss_ppf_ptr[4] +cdef void *ufunc__invgauss_ppf_data[2] +cdef char ufunc__invgauss_ppf_types[8] +cdef char *ufunc__invgauss_ppf_doc = ( + "_invgauss_ppf(x, mu)\n" + "\n" + "Percent point function of inverse gaussian distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Positive real-valued\n" + "mu : array_like\n" + " Positive, real-valued parameters\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__invgauss_ppf_loops[0] = loop_f_fff__As_fff_f +ufunc__invgauss_ppf_loops[1] = loop_d_ddd__As_ddd_d +ufunc__invgauss_ppf_types[0] = NPY_FLOAT +ufunc__invgauss_ppf_types[1] = NPY_FLOAT +ufunc__invgauss_ppf_types[2] = NPY_FLOAT +ufunc__invgauss_ppf_types[3] = NPY_FLOAT +ufunc__invgauss_ppf_types[4] = NPY_DOUBLE +ufunc__invgauss_ppf_types[5] = NPY_DOUBLE +ufunc__invgauss_ppf_types[6] = NPY_DOUBLE +ufunc__invgauss_ppf_types[7] = NPY_DOUBLE +ufunc__invgauss_ppf_ptr[2*0] = scipy.special._ufuncs_cxx._export_invgauss_ppf_float +ufunc__invgauss_ppf_ptr[2*0+1] = ("_invgauss_ppf") +ufunc__invgauss_ppf_ptr[2*1] = scipy.special._ufuncs_cxx._export_invgauss_ppf_double +ufunc__invgauss_ppf_ptr[2*1+1] = ("_invgauss_ppf") +ufunc__invgauss_ppf_data[0] = &ufunc__invgauss_ppf_ptr[2*0] +ufunc__invgauss_ppf_data[1] = &ufunc__invgauss_ppf_ptr[2*1] +_invgauss_ppf = np.PyUFunc_FromFuncAndData(ufunc__invgauss_ppf_loops, ufunc__invgauss_ppf_data, ufunc__invgauss_ppf_types, 2, 3, 1, 0, "_invgauss_ppf", ufunc__invgauss_ppf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__kolmogc_loops[2] +cdef void *ufunc__kolmogc_ptr[4] +cdef void *ufunc__kolmogc_data[2] +cdef char ufunc__kolmogc_types[4] +cdef char *ufunc__kolmogc_doc = ( + "Internal function, do not use.") +ufunc__kolmogc_loops[0] = loop_d_d__As_f_f +ufunc__kolmogc_loops[1] = loop_d_d__As_d_d +ufunc__kolmogc_types[0] = NPY_FLOAT +ufunc__kolmogc_types[1] = NPY_FLOAT +ufunc__kolmogc_types[2] = NPY_DOUBLE +ufunc__kolmogc_types[3] = NPY_DOUBLE +ufunc__kolmogc_ptr[2*0] = _func_cephes_kolmogc +ufunc__kolmogc_ptr[2*0+1] = ("_kolmogc") +ufunc__kolmogc_ptr[2*1] = _func_cephes_kolmogc +ufunc__kolmogc_ptr[2*1+1] = ("_kolmogc") +ufunc__kolmogc_data[0] = &ufunc__kolmogc_ptr[2*0] +ufunc__kolmogc_data[1] = &ufunc__kolmogc_ptr[2*1] +_kolmogc = np.PyUFunc_FromFuncAndData(ufunc__kolmogc_loops, ufunc__kolmogc_data, ufunc__kolmogc_types, 2, 1, 1, 0, "_kolmogc", ufunc__kolmogc_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__kolmogci_loops[2] +cdef void *ufunc__kolmogci_ptr[4] +cdef void *ufunc__kolmogci_data[2] +cdef char ufunc__kolmogci_types[4] +cdef char *ufunc__kolmogci_doc = ( + "Internal function, do not use.") +ufunc__kolmogci_loops[0] = loop_d_d__As_f_f +ufunc__kolmogci_loops[1] = loop_d_d__As_d_d +ufunc__kolmogci_types[0] = NPY_FLOAT +ufunc__kolmogci_types[1] = NPY_FLOAT +ufunc__kolmogci_types[2] = NPY_DOUBLE +ufunc__kolmogci_types[3] = NPY_DOUBLE +ufunc__kolmogci_ptr[2*0] = _func_cephes_kolmogci +ufunc__kolmogci_ptr[2*0+1] = ("_kolmogci") +ufunc__kolmogci_ptr[2*1] = _func_cephes_kolmogci +ufunc__kolmogci_ptr[2*1+1] = ("_kolmogci") +ufunc__kolmogci_data[0] = &ufunc__kolmogci_ptr[2*0] +ufunc__kolmogci_data[1] = &ufunc__kolmogci_ptr[2*1] +_kolmogci = np.PyUFunc_FromFuncAndData(ufunc__kolmogci_loops, ufunc__kolmogci_data, ufunc__kolmogci_types, 2, 1, 1, 0, "_kolmogci", ufunc__kolmogci_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__kolmogp_loops[2] +cdef void *ufunc__kolmogp_ptr[4] +cdef void *ufunc__kolmogp_data[2] +cdef char ufunc__kolmogp_types[4] +cdef char *ufunc__kolmogp_doc = ( + "Internal function, do not use.") +ufunc__kolmogp_loops[0] = loop_d_d__As_f_f +ufunc__kolmogp_loops[1] = loop_d_d__As_d_d +ufunc__kolmogp_types[0] = NPY_FLOAT +ufunc__kolmogp_types[1] = NPY_FLOAT +ufunc__kolmogp_types[2] = NPY_DOUBLE +ufunc__kolmogp_types[3] = NPY_DOUBLE +ufunc__kolmogp_ptr[2*0] = _func_cephes_kolmogp +ufunc__kolmogp_ptr[2*0+1] = ("_kolmogp") +ufunc__kolmogp_ptr[2*1] = _func_cephes_kolmogp +ufunc__kolmogp_ptr[2*1+1] = ("_kolmogp") +ufunc__kolmogp_data[0] = &ufunc__kolmogp_ptr[2*0] +ufunc__kolmogp_data[1] = &ufunc__kolmogp_ptr[2*1] +_kolmogp = np.PyUFunc_FromFuncAndData(ufunc__kolmogp_loops, ufunc__kolmogp_data, ufunc__kolmogp_types, 2, 1, 1, 0, "_kolmogp", ufunc__kolmogp_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__lanczos_sum_expg_scaled_loops[2] +cdef void *ufunc__lanczos_sum_expg_scaled_ptr[4] +cdef void *ufunc__lanczos_sum_expg_scaled_data[2] +cdef char ufunc__lanczos_sum_expg_scaled_types[4] +cdef char *ufunc__lanczos_sum_expg_scaled_doc = ( + "Internal function, do not use.") +ufunc__lanczos_sum_expg_scaled_loops[0] = loop_d_d__As_f_f +ufunc__lanczos_sum_expg_scaled_loops[1] = loop_d_d__As_d_d +ufunc__lanczos_sum_expg_scaled_types[0] = NPY_FLOAT +ufunc__lanczos_sum_expg_scaled_types[1] = NPY_FLOAT +ufunc__lanczos_sum_expg_scaled_types[2] = NPY_DOUBLE +ufunc__lanczos_sum_expg_scaled_types[3] = NPY_DOUBLE +ufunc__lanczos_sum_expg_scaled_ptr[2*0] = _func_cephes_lanczos_sum_expg_scaled +ufunc__lanczos_sum_expg_scaled_ptr[2*0+1] = ("_lanczos_sum_expg_scaled") +ufunc__lanczos_sum_expg_scaled_ptr[2*1] = _func_cephes_lanczos_sum_expg_scaled +ufunc__lanczos_sum_expg_scaled_ptr[2*1+1] = ("_lanczos_sum_expg_scaled") +ufunc__lanczos_sum_expg_scaled_data[0] = &ufunc__lanczos_sum_expg_scaled_ptr[2*0] +ufunc__lanczos_sum_expg_scaled_data[1] = &ufunc__lanczos_sum_expg_scaled_ptr[2*1] +_lanczos_sum_expg_scaled = np.PyUFunc_FromFuncAndData(ufunc__lanczos_sum_expg_scaled_loops, ufunc__lanczos_sum_expg_scaled_data, ufunc__lanczos_sum_expg_scaled_types, 2, 1, 1, 0, "_lanczos_sum_expg_scaled", ufunc__lanczos_sum_expg_scaled_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__lgam1p_loops[2] +cdef void *ufunc__lgam1p_ptr[4] +cdef void *ufunc__lgam1p_data[2] +cdef char ufunc__lgam1p_types[4] +cdef char *ufunc__lgam1p_doc = ( + "Internal function, do not use.") +ufunc__lgam1p_loops[0] = loop_d_d__As_f_f +ufunc__lgam1p_loops[1] = loop_d_d__As_d_d +ufunc__lgam1p_types[0] = NPY_FLOAT +ufunc__lgam1p_types[1] = NPY_FLOAT +ufunc__lgam1p_types[2] = NPY_DOUBLE +ufunc__lgam1p_types[3] = NPY_DOUBLE +ufunc__lgam1p_ptr[2*0] = _func_cephes_lgam1p +ufunc__lgam1p_ptr[2*0+1] = ("_lgam1p") +ufunc__lgam1p_ptr[2*1] = _func_cephes_lgam1p +ufunc__lgam1p_ptr[2*1+1] = ("_lgam1p") +ufunc__lgam1p_data[0] = &ufunc__lgam1p_ptr[2*0] +ufunc__lgam1p_data[1] = &ufunc__lgam1p_ptr[2*1] +_lgam1p = np.PyUFunc_FromFuncAndData(ufunc__lgam1p_loops, ufunc__lgam1p_data, ufunc__lgam1p_types, 2, 1, 1, 0, "_lgam1p", ufunc__lgam1p_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__log1pmx_loops[2] +cdef void *ufunc__log1pmx_ptr[4] +cdef void *ufunc__log1pmx_data[2] +cdef char ufunc__log1pmx_types[4] +cdef char *ufunc__log1pmx_doc = ( + "Internal function, do not use.") +ufunc__log1pmx_loops[0] = loop_d_d__As_f_f +ufunc__log1pmx_loops[1] = loop_d_d__As_d_d +ufunc__log1pmx_types[0] = NPY_FLOAT +ufunc__log1pmx_types[1] = NPY_FLOAT +ufunc__log1pmx_types[2] = NPY_DOUBLE +ufunc__log1pmx_types[3] = NPY_DOUBLE +ufunc__log1pmx_ptr[2*0] = _func_cephes_log1pmx +ufunc__log1pmx_ptr[2*0+1] = ("_log1pmx") +ufunc__log1pmx_ptr[2*1] = _func_cephes_log1pmx +ufunc__log1pmx_ptr[2*1+1] = ("_log1pmx") +ufunc__log1pmx_data[0] = &ufunc__log1pmx_ptr[2*0] +ufunc__log1pmx_data[1] = &ufunc__log1pmx_ptr[2*1] +_log1pmx = np.PyUFunc_FromFuncAndData(ufunc__log1pmx_loops, ufunc__log1pmx_data, ufunc__log1pmx_types, 2, 1, 1, 0, "_log1pmx", ufunc__log1pmx_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__nbinom_cdf_loops[2] +cdef void *ufunc__nbinom_cdf_ptr[4] +cdef void *ufunc__nbinom_cdf_data[2] +cdef char ufunc__nbinom_cdf_types[8] +cdef char *ufunc__nbinom_cdf_doc = ( + "_nbinom_cdf(x, r, p)\n" + "\n" + "Cumulative density function of negative binomial distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real-valued\n" + "r : array_like\n" + " Positive, integer-valued parameter\n" + "p : array_like\n" + " Positive, real-valued parameter\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__nbinom_cdf_loops[0] = loop_f_fff__As_fff_f +ufunc__nbinom_cdf_loops[1] = loop_d_ddd__As_ddd_d +ufunc__nbinom_cdf_types[0] = NPY_FLOAT +ufunc__nbinom_cdf_types[1] = NPY_FLOAT +ufunc__nbinom_cdf_types[2] = NPY_FLOAT +ufunc__nbinom_cdf_types[3] = NPY_FLOAT +ufunc__nbinom_cdf_types[4] = NPY_DOUBLE +ufunc__nbinom_cdf_types[5] = NPY_DOUBLE +ufunc__nbinom_cdf_types[6] = NPY_DOUBLE +ufunc__nbinom_cdf_types[7] = NPY_DOUBLE +ufunc__nbinom_cdf_ptr[2*0] = scipy.special._ufuncs_cxx._export_nbinom_cdf_float +ufunc__nbinom_cdf_ptr[2*0+1] = ("_nbinom_cdf") +ufunc__nbinom_cdf_ptr[2*1] = scipy.special._ufuncs_cxx._export_nbinom_cdf_double +ufunc__nbinom_cdf_ptr[2*1+1] = ("_nbinom_cdf") +ufunc__nbinom_cdf_data[0] = &ufunc__nbinom_cdf_ptr[2*0] +ufunc__nbinom_cdf_data[1] = &ufunc__nbinom_cdf_ptr[2*1] +_nbinom_cdf = np.PyUFunc_FromFuncAndData(ufunc__nbinom_cdf_loops, ufunc__nbinom_cdf_data, ufunc__nbinom_cdf_types, 2, 3, 1, 0, "_nbinom_cdf", ufunc__nbinom_cdf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__nbinom_isf_loops[2] +cdef void *ufunc__nbinom_isf_ptr[4] +cdef void *ufunc__nbinom_isf_data[2] +cdef char ufunc__nbinom_isf_types[8] +cdef char *ufunc__nbinom_isf_doc = ( + "_nbinom_isf(x, r, p)\n" + "\n" + "Inverse survival function of negative binomial distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real-valued\n" + "r : array_like\n" + " Positive, integer-valued parameter\n" + "p : array_like\n" + " Positive, real-valued parameter\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__nbinom_isf_loops[0] = loop_f_fff__As_fff_f +ufunc__nbinom_isf_loops[1] = loop_d_ddd__As_ddd_d +ufunc__nbinom_isf_types[0] = NPY_FLOAT +ufunc__nbinom_isf_types[1] = NPY_FLOAT +ufunc__nbinom_isf_types[2] = NPY_FLOAT +ufunc__nbinom_isf_types[3] = NPY_FLOAT +ufunc__nbinom_isf_types[4] = NPY_DOUBLE +ufunc__nbinom_isf_types[5] = NPY_DOUBLE +ufunc__nbinom_isf_types[6] = NPY_DOUBLE +ufunc__nbinom_isf_types[7] = NPY_DOUBLE +ufunc__nbinom_isf_ptr[2*0] = scipy.special._ufuncs_cxx._export_nbinom_isf_float +ufunc__nbinom_isf_ptr[2*0+1] = ("_nbinom_isf") +ufunc__nbinom_isf_ptr[2*1] = scipy.special._ufuncs_cxx._export_nbinom_isf_double +ufunc__nbinom_isf_ptr[2*1+1] = ("_nbinom_isf") +ufunc__nbinom_isf_data[0] = &ufunc__nbinom_isf_ptr[2*0] +ufunc__nbinom_isf_data[1] = &ufunc__nbinom_isf_ptr[2*1] +_nbinom_isf = np.PyUFunc_FromFuncAndData(ufunc__nbinom_isf_loops, ufunc__nbinom_isf_data, ufunc__nbinom_isf_types, 2, 3, 1, 0, "_nbinom_isf", ufunc__nbinom_isf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__nbinom_kurtosis_excess_loops[2] +cdef void *ufunc__nbinom_kurtosis_excess_ptr[4] +cdef void *ufunc__nbinom_kurtosis_excess_data[2] +cdef char ufunc__nbinom_kurtosis_excess_types[6] +cdef char *ufunc__nbinom_kurtosis_excess_doc = ( + "_nbinom_kurtosis_excess(r, p)\n" + "\n" + "Kurtosis excess of negative binomial distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "r : array_like\n" + " Positive, integer-valued parameter\n" + "p : array_like\n" + " Positive, real-valued parameter\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__nbinom_kurtosis_excess_loops[0] = loop_f_ff__As_ff_f +ufunc__nbinom_kurtosis_excess_loops[1] = loop_d_dd__As_dd_d +ufunc__nbinom_kurtosis_excess_types[0] = NPY_FLOAT +ufunc__nbinom_kurtosis_excess_types[1] = NPY_FLOAT +ufunc__nbinom_kurtosis_excess_types[2] = NPY_FLOAT +ufunc__nbinom_kurtosis_excess_types[3] = NPY_DOUBLE +ufunc__nbinom_kurtosis_excess_types[4] = NPY_DOUBLE +ufunc__nbinom_kurtosis_excess_types[5] = NPY_DOUBLE +ufunc__nbinom_kurtosis_excess_ptr[2*0] = scipy.special._ufuncs_cxx._export_nbinom_kurtosis_excess_float +ufunc__nbinom_kurtosis_excess_ptr[2*0+1] = ("_nbinom_kurtosis_excess") +ufunc__nbinom_kurtosis_excess_ptr[2*1] = scipy.special._ufuncs_cxx._export_nbinom_kurtosis_excess_double +ufunc__nbinom_kurtosis_excess_ptr[2*1+1] = ("_nbinom_kurtosis_excess") +ufunc__nbinom_kurtosis_excess_data[0] = &ufunc__nbinom_kurtosis_excess_ptr[2*0] +ufunc__nbinom_kurtosis_excess_data[1] = &ufunc__nbinom_kurtosis_excess_ptr[2*1] +_nbinom_kurtosis_excess = np.PyUFunc_FromFuncAndData(ufunc__nbinom_kurtosis_excess_loops, ufunc__nbinom_kurtosis_excess_data, ufunc__nbinom_kurtosis_excess_types, 2, 2, 1, 0, "_nbinom_kurtosis_excess", ufunc__nbinom_kurtosis_excess_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__nbinom_mean_loops[2] +cdef void *ufunc__nbinom_mean_ptr[4] +cdef void *ufunc__nbinom_mean_data[2] +cdef char ufunc__nbinom_mean_types[6] +cdef char *ufunc__nbinom_mean_doc = ( + "_nbinom_mean(r, p)\n" + "\n" + "Mean of negative binomial distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "r : array_like\n" + " Positive, integer-valued parameter\n" + "p : array_like\n" + " Positive, real-valued parameter\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__nbinom_mean_loops[0] = loop_f_ff__As_ff_f +ufunc__nbinom_mean_loops[1] = loop_d_dd__As_dd_d +ufunc__nbinom_mean_types[0] = NPY_FLOAT +ufunc__nbinom_mean_types[1] = NPY_FLOAT +ufunc__nbinom_mean_types[2] = NPY_FLOAT +ufunc__nbinom_mean_types[3] = NPY_DOUBLE +ufunc__nbinom_mean_types[4] = NPY_DOUBLE +ufunc__nbinom_mean_types[5] = NPY_DOUBLE +ufunc__nbinom_mean_ptr[2*0] = scipy.special._ufuncs_cxx._export_nbinom_mean_float +ufunc__nbinom_mean_ptr[2*0+1] = ("_nbinom_mean") +ufunc__nbinom_mean_ptr[2*1] = scipy.special._ufuncs_cxx._export_nbinom_mean_double +ufunc__nbinom_mean_ptr[2*1+1] = ("_nbinom_mean") +ufunc__nbinom_mean_data[0] = &ufunc__nbinom_mean_ptr[2*0] +ufunc__nbinom_mean_data[1] = &ufunc__nbinom_mean_ptr[2*1] +_nbinom_mean = np.PyUFunc_FromFuncAndData(ufunc__nbinom_mean_loops, ufunc__nbinom_mean_data, ufunc__nbinom_mean_types, 2, 2, 1, 0, "_nbinom_mean", ufunc__nbinom_mean_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__nbinom_pmf_loops[2] +cdef void *ufunc__nbinom_pmf_ptr[4] +cdef void *ufunc__nbinom_pmf_data[2] +cdef char ufunc__nbinom_pmf_types[8] +cdef char *ufunc__nbinom_pmf_doc = ( + "_nbinom_pmf(x, r, p)\n" + "\n" + "Probability mass function of negative binomial distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real-valued\n" + "r : array_like\n" + " Positive, integer-valued parameter\n" + "p : array_like\n" + " Positive, real-valued parameter\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__nbinom_pmf_loops[0] = loop_f_fff__As_fff_f +ufunc__nbinom_pmf_loops[1] = loop_d_ddd__As_ddd_d +ufunc__nbinom_pmf_types[0] = NPY_FLOAT +ufunc__nbinom_pmf_types[1] = NPY_FLOAT +ufunc__nbinom_pmf_types[2] = NPY_FLOAT +ufunc__nbinom_pmf_types[3] = NPY_FLOAT +ufunc__nbinom_pmf_types[4] = NPY_DOUBLE +ufunc__nbinom_pmf_types[5] = NPY_DOUBLE +ufunc__nbinom_pmf_types[6] = NPY_DOUBLE +ufunc__nbinom_pmf_types[7] = NPY_DOUBLE +ufunc__nbinom_pmf_ptr[2*0] = scipy.special._ufuncs_cxx._export_nbinom_pmf_float +ufunc__nbinom_pmf_ptr[2*0+1] = ("_nbinom_pmf") +ufunc__nbinom_pmf_ptr[2*1] = scipy.special._ufuncs_cxx._export_nbinom_pmf_double +ufunc__nbinom_pmf_ptr[2*1+1] = ("_nbinom_pmf") +ufunc__nbinom_pmf_data[0] = &ufunc__nbinom_pmf_ptr[2*0] +ufunc__nbinom_pmf_data[1] = &ufunc__nbinom_pmf_ptr[2*1] +_nbinom_pmf = np.PyUFunc_FromFuncAndData(ufunc__nbinom_pmf_loops, ufunc__nbinom_pmf_data, ufunc__nbinom_pmf_types, 2, 3, 1, 0, "_nbinom_pmf", ufunc__nbinom_pmf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__nbinom_ppf_loops[2] +cdef void *ufunc__nbinom_ppf_ptr[4] +cdef void *ufunc__nbinom_ppf_data[2] +cdef char ufunc__nbinom_ppf_types[8] +cdef char *ufunc__nbinom_ppf_doc = ( + "_nbinom_ppf(x, r, p)\n" + "\n" + "Percent point function of negative binomial distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real-valued\n" + "r : array_like\n" + " Positive, integer-valued parameter\n" + "p : array_like\n" + " Positive, real-valued parameter\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__nbinom_ppf_loops[0] = loop_f_fff__As_fff_f +ufunc__nbinom_ppf_loops[1] = loop_d_ddd__As_ddd_d +ufunc__nbinom_ppf_types[0] = NPY_FLOAT +ufunc__nbinom_ppf_types[1] = NPY_FLOAT +ufunc__nbinom_ppf_types[2] = NPY_FLOAT +ufunc__nbinom_ppf_types[3] = NPY_FLOAT +ufunc__nbinom_ppf_types[4] = NPY_DOUBLE +ufunc__nbinom_ppf_types[5] = NPY_DOUBLE +ufunc__nbinom_ppf_types[6] = NPY_DOUBLE +ufunc__nbinom_ppf_types[7] = NPY_DOUBLE +ufunc__nbinom_ppf_ptr[2*0] = scipy.special._ufuncs_cxx._export_nbinom_ppf_float +ufunc__nbinom_ppf_ptr[2*0+1] = ("_nbinom_ppf") +ufunc__nbinom_ppf_ptr[2*1] = scipy.special._ufuncs_cxx._export_nbinom_ppf_double +ufunc__nbinom_ppf_ptr[2*1+1] = ("_nbinom_ppf") +ufunc__nbinom_ppf_data[0] = &ufunc__nbinom_ppf_ptr[2*0] +ufunc__nbinom_ppf_data[1] = &ufunc__nbinom_ppf_ptr[2*1] +_nbinom_ppf = np.PyUFunc_FromFuncAndData(ufunc__nbinom_ppf_loops, ufunc__nbinom_ppf_data, ufunc__nbinom_ppf_types, 2, 3, 1, 0, "_nbinom_ppf", ufunc__nbinom_ppf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__nbinom_sf_loops[2] +cdef void *ufunc__nbinom_sf_ptr[4] +cdef void *ufunc__nbinom_sf_data[2] +cdef char ufunc__nbinom_sf_types[8] +cdef char *ufunc__nbinom_sf_doc = ( + "_nbinom_sf(x, r, p)\n" + "\n" + "Survival function of negative binomial distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real-valued\n" + "r : array_like\n" + " Positive, integer-valued parameter\n" + "p : array_like\n" + " Positive, real-valued parameter\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__nbinom_sf_loops[0] = loop_f_fff__As_fff_f +ufunc__nbinom_sf_loops[1] = loop_d_ddd__As_ddd_d +ufunc__nbinom_sf_types[0] = NPY_FLOAT +ufunc__nbinom_sf_types[1] = NPY_FLOAT +ufunc__nbinom_sf_types[2] = NPY_FLOAT +ufunc__nbinom_sf_types[3] = NPY_FLOAT +ufunc__nbinom_sf_types[4] = NPY_DOUBLE +ufunc__nbinom_sf_types[5] = NPY_DOUBLE +ufunc__nbinom_sf_types[6] = NPY_DOUBLE +ufunc__nbinom_sf_types[7] = NPY_DOUBLE +ufunc__nbinom_sf_ptr[2*0] = scipy.special._ufuncs_cxx._export_nbinom_sf_float +ufunc__nbinom_sf_ptr[2*0+1] = ("_nbinom_sf") +ufunc__nbinom_sf_ptr[2*1] = scipy.special._ufuncs_cxx._export_nbinom_sf_double +ufunc__nbinom_sf_ptr[2*1+1] = ("_nbinom_sf") +ufunc__nbinom_sf_data[0] = &ufunc__nbinom_sf_ptr[2*0] +ufunc__nbinom_sf_data[1] = &ufunc__nbinom_sf_ptr[2*1] +_nbinom_sf = np.PyUFunc_FromFuncAndData(ufunc__nbinom_sf_loops, ufunc__nbinom_sf_data, ufunc__nbinom_sf_types, 2, 3, 1, 0, "_nbinom_sf", ufunc__nbinom_sf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__nbinom_skewness_loops[2] +cdef void *ufunc__nbinom_skewness_ptr[4] +cdef void *ufunc__nbinom_skewness_data[2] +cdef char ufunc__nbinom_skewness_types[6] +cdef char *ufunc__nbinom_skewness_doc = ( + "_nbinom_skewness(r, p)\n" + "\n" + "Skewness of negative binomial distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "r : array_like\n" + " Positive, integer-valued parameter\n" + "p : array_like\n" + " Positive, real-valued parameter\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__nbinom_skewness_loops[0] = loop_f_ff__As_ff_f +ufunc__nbinom_skewness_loops[1] = loop_d_dd__As_dd_d +ufunc__nbinom_skewness_types[0] = NPY_FLOAT +ufunc__nbinom_skewness_types[1] = NPY_FLOAT +ufunc__nbinom_skewness_types[2] = NPY_FLOAT +ufunc__nbinom_skewness_types[3] = NPY_DOUBLE +ufunc__nbinom_skewness_types[4] = NPY_DOUBLE +ufunc__nbinom_skewness_types[5] = NPY_DOUBLE +ufunc__nbinom_skewness_ptr[2*0] = scipy.special._ufuncs_cxx._export_nbinom_skewness_float +ufunc__nbinom_skewness_ptr[2*0+1] = ("_nbinom_skewness") +ufunc__nbinom_skewness_ptr[2*1] = scipy.special._ufuncs_cxx._export_nbinom_skewness_double +ufunc__nbinom_skewness_ptr[2*1+1] = ("_nbinom_skewness") +ufunc__nbinom_skewness_data[0] = &ufunc__nbinom_skewness_ptr[2*0] +ufunc__nbinom_skewness_data[1] = &ufunc__nbinom_skewness_ptr[2*1] +_nbinom_skewness = np.PyUFunc_FromFuncAndData(ufunc__nbinom_skewness_loops, ufunc__nbinom_skewness_data, ufunc__nbinom_skewness_types, 2, 2, 1, 0, "_nbinom_skewness", ufunc__nbinom_skewness_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__nbinom_variance_loops[2] +cdef void *ufunc__nbinom_variance_ptr[4] +cdef void *ufunc__nbinom_variance_data[2] +cdef char ufunc__nbinom_variance_types[6] +cdef char *ufunc__nbinom_variance_doc = ( + "_nbinom_variance(r, p)\n" + "\n" + "Variance of negative binomial distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "r : array_like\n" + " Positive, integer-valued parameter\n" + "p : array_like\n" + " Positive, real-valued parameter\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__nbinom_variance_loops[0] = loop_f_ff__As_ff_f +ufunc__nbinom_variance_loops[1] = loop_d_dd__As_dd_d +ufunc__nbinom_variance_types[0] = NPY_FLOAT +ufunc__nbinom_variance_types[1] = NPY_FLOAT +ufunc__nbinom_variance_types[2] = NPY_FLOAT +ufunc__nbinom_variance_types[3] = NPY_DOUBLE +ufunc__nbinom_variance_types[4] = NPY_DOUBLE +ufunc__nbinom_variance_types[5] = NPY_DOUBLE +ufunc__nbinom_variance_ptr[2*0] = scipy.special._ufuncs_cxx._export_nbinom_variance_float +ufunc__nbinom_variance_ptr[2*0+1] = ("_nbinom_variance") +ufunc__nbinom_variance_ptr[2*1] = scipy.special._ufuncs_cxx._export_nbinom_variance_double +ufunc__nbinom_variance_ptr[2*1+1] = ("_nbinom_variance") +ufunc__nbinom_variance_data[0] = &ufunc__nbinom_variance_ptr[2*0] +ufunc__nbinom_variance_data[1] = &ufunc__nbinom_variance_ptr[2*1] +_nbinom_variance = np.PyUFunc_FromFuncAndData(ufunc__nbinom_variance_loops, ufunc__nbinom_variance_data, ufunc__nbinom_variance_types, 2, 2, 1, 0, "_nbinom_variance", ufunc__nbinom_variance_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__ncf_cdf_loops[2] +cdef void *ufunc__ncf_cdf_ptr[4] +cdef void *ufunc__ncf_cdf_data[2] +cdef char ufunc__ncf_cdf_types[10] +cdef char *ufunc__ncf_cdf_doc = ( + "_ncf_cdf(x, v1, v2, l)\n" + "\n" + "Cumulative density function of noncentral F-distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Positive real-valued\n" + "v1, v2, l : array_like\n" + " Positive, real-valued parameters\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__ncf_cdf_loops[0] = loop_f_ffff__As_ffff_f +ufunc__ncf_cdf_loops[1] = loop_d_dddd__As_dddd_d +ufunc__ncf_cdf_types[0] = NPY_FLOAT +ufunc__ncf_cdf_types[1] = NPY_FLOAT +ufunc__ncf_cdf_types[2] = NPY_FLOAT +ufunc__ncf_cdf_types[3] = NPY_FLOAT +ufunc__ncf_cdf_types[4] = NPY_FLOAT +ufunc__ncf_cdf_types[5] = NPY_DOUBLE +ufunc__ncf_cdf_types[6] = NPY_DOUBLE +ufunc__ncf_cdf_types[7] = NPY_DOUBLE +ufunc__ncf_cdf_types[8] = NPY_DOUBLE +ufunc__ncf_cdf_types[9] = NPY_DOUBLE +ufunc__ncf_cdf_ptr[2*0] = scipy.special._ufuncs_cxx._export_ncf_cdf_float +ufunc__ncf_cdf_ptr[2*0+1] = ("_ncf_cdf") +ufunc__ncf_cdf_ptr[2*1] = scipy.special._ufuncs_cxx._export_ncf_cdf_double +ufunc__ncf_cdf_ptr[2*1+1] = ("_ncf_cdf") +ufunc__ncf_cdf_data[0] = &ufunc__ncf_cdf_ptr[2*0] +ufunc__ncf_cdf_data[1] = &ufunc__ncf_cdf_ptr[2*1] +_ncf_cdf = np.PyUFunc_FromFuncAndData(ufunc__ncf_cdf_loops, ufunc__ncf_cdf_data, ufunc__ncf_cdf_types, 2, 4, 1, 0, "_ncf_cdf", ufunc__ncf_cdf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__ncf_isf_loops[2] +cdef void *ufunc__ncf_isf_ptr[4] +cdef void *ufunc__ncf_isf_data[2] +cdef char ufunc__ncf_isf_types[10] +cdef char *ufunc__ncf_isf_doc = ( + "_ncf_isf(x, v1, v2, l)\n" + "\n" + "Inverse surivial function of noncentral F-distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Positive real-valued\n" + "v1, v2, l : array_like\n" + " Positive, real-valued parameters\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__ncf_isf_loops[0] = loop_f_ffff__As_ffff_f +ufunc__ncf_isf_loops[1] = loop_d_dddd__As_dddd_d +ufunc__ncf_isf_types[0] = NPY_FLOAT +ufunc__ncf_isf_types[1] = NPY_FLOAT +ufunc__ncf_isf_types[2] = NPY_FLOAT +ufunc__ncf_isf_types[3] = NPY_FLOAT +ufunc__ncf_isf_types[4] = NPY_FLOAT +ufunc__ncf_isf_types[5] = NPY_DOUBLE +ufunc__ncf_isf_types[6] = NPY_DOUBLE +ufunc__ncf_isf_types[7] = NPY_DOUBLE +ufunc__ncf_isf_types[8] = NPY_DOUBLE +ufunc__ncf_isf_types[9] = NPY_DOUBLE +ufunc__ncf_isf_ptr[2*0] = scipy.special._ufuncs_cxx._export_ncf_isf_float +ufunc__ncf_isf_ptr[2*0+1] = ("_ncf_isf") +ufunc__ncf_isf_ptr[2*1] = scipy.special._ufuncs_cxx._export_ncf_isf_double +ufunc__ncf_isf_ptr[2*1+1] = ("_ncf_isf") +ufunc__ncf_isf_data[0] = &ufunc__ncf_isf_ptr[2*0] +ufunc__ncf_isf_data[1] = &ufunc__ncf_isf_ptr[2*1] +_ncf_isf = np.PyUFunc_FromFuncAndData(ufunc__ncf_isf_loops, ufunc__ncf_isf_data, ufunc__ncf_isf_types, 2, 4, 1, 0, "_ncf_isf", ufunc__ncf_isf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__ncf_kurtosis_excess_loops[2] +cdef void *ufunc__ncf_kurtosis_excess_ptr[4] +cdef void *ufunc__ncf_kurtosis_excess_data[2] +cdef char ufunc__ncf_kurtosis_excess_types[8] +cdef char *ufunc__ncf_kurtosis_excess_doc = ( + "_ncf_kurtosis_excess(v1, v2, l)\n" + "\n" + "Kurtosis excess of noncentral F-distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "v1, v2, l : array_like\n" + " Positive, real-valued parameters\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__ncf_kurtosis_excess_loops[0] = loop_f_fff__As_fff_f +ufunc__ncf_kurtosis_excess_loops[1] = loop_d_ddd__As_ddd_d +ufunc__ncf_kurtosis_excess_types[0] = NPY_FLOAT +ufunc__ncf_kurtosis_excess_types[1] = NPY_FLOAT +ufunc__ncf_kurtosis_excess_types[2] = NPY_FLOAT +ufunc__ncf_kurtosis_excess_types[3] = NPY_FLOAT +ufunc__ncf_kurtosis_excess_types[4] = NPY_DOUBLE +ufunc__ncf_kurtosis_excess_types[5] = NPY_DOUBLE +ufunc__ncf_kurtosis_excess_types[6] = NPY_DOUBLE +ufunc__ncf_kurtosis_excess_types[7] = NPY_DOUBLE +ufunc__ncf_kurtosis_excess_ptr[2*0] = scipy.special._ufuncs_cxx._export_ncf_kurtosis_excess_float +ufunc__ncf_kurtosis_excess_ptr[2*0+1] = ("_ncf_kurtosis_excess") +ufunc__ncf_kurtosis_excess_ptr[2*1] = scipy.special._ufuncs_cxx._export_ncf_kurtosis_excess_double +ufunc__ncf_kurtosis_excess_ptr[2*1+1] = ("_ncf_kurtosis_excess") +ufunc__ncf_kurtosis_excess_data[0] = &ufunc__ncf_kurtosis_excess_ptr[2*0] +ufunc__ncf_kurtosis_excess_data[1] = &ufunc__ncf_kurtosis_excess_ptr[2*1] +_ncf_kurtosis_excess = np.PyUFunc_FromFuncAndData(ufunc__ncf_kurtosis_excess_loops, ufunc__ncf_kurtosis_excess_data, ufunc__ncf_kurtosis_excess_types, 2, 3, 1, 0, "_ncf_kurtosis_excess", ufunc__ncf_kurtosis_excess_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__ncf_mean_loops[2] +cdef void *ufunc__ncf_mean_ptr[4] +cdef void *ufunc__ncf_mean_data[2] +cdef char ufunc__ncf_mean_types[8] +cdef char *ufunc__ncf_mean_doc = ( + "_ncf_mean(v1, v2, l)\n" + "\n" + "Mean of noncentral F-distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "v1, v2, l : array_like\n" + " Positive, real-valued parameters\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__ncf_mean_loops[0] = loop_f_fff__As_fff_f +ufunc__ncf_mean_loops[1] = loop_d_ddd__As_ddd_d +ufunc__ncf_mean_types[0] = NPY_FLOAT +ufunc__ncf_mean_types[1] = NPY_FLOAT +ufunc__ncf_mean_types[2] = NPY_FLOAT +ufunc__ncf_mean_types[3] = NPY_FLOAT +ufunc__ncf_mean_types[4] = NPY_DOUBLE +ufunc__ncf_mean_types[5] = NPY_DOUBLE +ufunc__ncf_mean_types[6] = NPY_DOUBLE +ufunc__ncf_mean_types[7] = NPY_DOUBLE +ufunc__ncf_mean_ptr[2*0] = scipy.special._ufuncs_cxx._export_ncf_mean_float +ufunc__ncf_mean_ptr[2*0+1] = ("_ncf_mean") +ufunc__ncf_mean_ptr[2*1] = scipy.special._ufuncs_cxx._export_ncf_mean_double +ufunc__ncf_mean_ptr[2*1+1] = ("_ncf_mean") +ufunc__ncf_mean_data[0] = &ufunc__ncf_mean_ptr[2*0] +ufunc__ncf_mean_data[1] = &ufunc__ncf_mean_ptr[2*1] +_ncf_mean = np.PyUFunc_FromFuncAndData(ufunc__ncf_mean_loops, ufunc__ncf_mean_data, ufunc__ncf_mean_types, 2, 3, 1, 0, "_ncf_mean", ufunc__ncf_mean_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__ncf_pdf_loops[2] +cdef void *ufunc__ncf_pdf_ptr[4] +cdef void *ufunc__ncf_pdf_data[2] +cdef char ufunc__ncf_pdf_types[10] +cdef char *ufunc__ncf_pdf_doc = ( + "_ncf_pdf(x, v1, v2, l)\n" + "\n" + "Probability density function of noncentral F-distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Positive real-valued\n" + "v1, v2, l : array_like\n" + " Positive, real-valued parameters\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__ncf_pdf_loops[0] = loop_f_ffff__As_ffff_f +ufunc__ncf_pdf_loops[1] = loop_d_dddd__As_dddd_d +ufunc__ncf_pdf_types[0] = NPY_FLOAT +ufunc__ncf_pdf_types[1] = NPY_FLOAT +ufunc__ncf_pdf_types[2] = NPY_FLOAT +ufunc__ncf_pdf_types[3] = NPY_FLOAT +ufunc__ncf_pdf_types[4] = NPY_FLOAT +ufunc__ncf_pdf_types[5] = NPY_DOUBLE +ufunc__ncf_pdf_types[6] = NPY_DOUBLE +ufunc__ncf_pdf_types[7] = NPY_DOUBLE +ufunc__ncf_pdf_types[8] = NPY_DOUBLE +ufunc__ncf_pdf_types[9] = NPY_DOUBLE +ufunc__ncf_pdf_ptr[2*0] = scipy.special._ufuncs_cxx._export_ncf_pdf_float +ufunc__ncf_pdf_ptr[2*0+1] = ("_ncf_pdf") +ufunc__ncf_pdf_ptr[2*1] = scipy.special._ufuncs_cxx._export_ncf_pdf_double +ufunc__ncf_pdf_ptr[2*1+1] = ("_ncf_pdf") +ufunc__ncf_pdf_data[0] = &ufunc__ncf_pdf_ptr[2*0] +ufunc__ncf_pdf_data[1] = &ufunc__ncf_pdf_ptr[2*1] +_ncf_pdf = np.PyUFunc_FromFuncAndData(ufunc__ncf_pdf_loops, ufunc__ncf_pdf_data, ufunc__ncf_pdf_types, 2, 4, 1, 0, "_ncf_pdf", ufunc__ncf_pdf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__ncf_ppf_loops[2] +cdef void *ufunc__ncf_ppf_ptr[4] +cdef void *ufunc__ncf_ppf_data[2] +cdef char ufunc__ncf_ppf_types[10] +cdef char *ufunc__ncf_ppf_doc = ( + "_ncf_ppf(x, v1, v2, l)\n" + "\n" + "Percent point function of noncentral F-distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Positive real-valued\n" + "v1, v2, l : array_like\n" + " Positive, real-valued parameters\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__ncf_ppf_loops[0] = loop_f_ffff__As_ffff_f +ufunc__ncf_ppf_loops[1] = loop_d_dddd__As_dddd_d +ufunc__ncf_ppf_types[0] = NPY_FLOAT +ufunc__ncf_ppf_types[1] = NPY_FLOAT +ufunc__ncf_ppf_types[2] = NPY_FLOAT +ufunc__ncf_ppf_types[3] = NPY_FLOAT +ufunc__ncf_ppf_types[4] = NPY_FLOAT +ufunc__ncf_ppf_types[5] = NPY_DOUBLE +ufunc__ncf_ppf_types[6] = NPY_DOUBLE +ufunc__ncf_ppf_types[7] = NPY_DOUBLE +ufunc__ncf_ppf_types[8] = NPY_DOUBLE +ufunc__ncf_ppf_types[9] = NPY_DOUBLE +ufunc__ncf_ppf_ptr[2*0] = scipy.special._ufuncs_cxx._export_ncf_ppf_float +ufunc__ncf_ppf_ptr[2*0+1] = ("_ncf_ppf") +ufunc__ncf_ppf_ptr[2*1] = scipy.special._ufuncs_cxx._export_ncf_ppf_double +ufunc__ncf_ppf_ptr[2*1+1] = ("_ncf_ppf") +ufunc__ncf_ppf_data[0] = &ufunc__ncf_ppf_ptr[2*0] +ufunc__ncf_ppf_data[1] = &ufunc__ncf_ppf_ptr[2*1] +_ncf_ppf = np.PyUFunc_FromFuncAndData(ufunc__ncf_ppf_loops, ufunc__ncf_ppf_data, ufunc__ncf_ppf_types, 2, 4, 1, 0, "_ncf_ppf", ufunc__ncf_ppf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__ncf_sf_loops[2] +cdef void *ufunc__ncf_sf_ptr[4] +cdef void *ufunc__ncf_sf_data[2] +cdef char ufunc__ncf_sf_types[10] +cdef char *ufunc__ncf_sf_doc = ( + "_ncf_sf(x, v1, v2, l)\n" + "\n" + "Survival function of noncentral F-distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Positive real-valued\n" + "v1, v2, l : array_like\n" + " Positive, real-valued parameters\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__ncf_sf_loops[0] = loop_f_ffff__As_ffff_f +ufunc__ncf_sf_loops[1] = loop_d_dddd__As_dddd_d +ufunc__ncf_sf_types[0] = NPY_FLOAT +ufunc__ncf_sf_types[1] = NPY_FLOAT +ufunc__ncf_sf_types[2] = NPY_FLOAT +ufunc__ncf_sf_types[3] = NPY_FLOAT +ufunc__ncf_sf_types[4] = NPY_FLOAT +ufunc__ncf_sf_types[5] = NPY_DOUBLE +ufunc__ncf_sf_types[6] = NPY_DOUBLE +ufunc__ncf_sf_types[7] = NPY_DOUBLE +ufunc__ncf_sf_types[8] = NPY_DOUBLE +ufunc__ncf_sf_types[9] = NPY_DOUBLE +ufunc__ncf_sf_ptr[2*0] = scipy.special._ufuncs_cxx._export_ncf_sf_float +ufunc__ncf_sf_ptr[2*0+1] = ("_ncf_sf") +ufunc__ncf_sf_ptr[2*1] = scipy.special._ufuncs_cxx._export_ncf_sf_double +ufunc__ncf_sf_ptr[2*1+1] = ("_ncf_sf") +ufunc__ncf_sf_data[0] = &ufunc__ncf_sf_ptr[2*0] +ufunc__ncf_sf_data[1] = &ufunc__ncf_sf_ptr[2*1] +_ncf_sf = np.PyUFunc_FromFuncAndData(ufunc__ncf_sf_loops, ufunc__ncf_sf_data, ufunc__ncf_sf_types, 2, 4, 1, 0, "_ncf_sf", ufunc__ncf_sf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__ncf_skewness_loops[2] +cdef void *ufunc__ncf_skewness_ptr[4] +cdef void *ufunc__ncf_skewness_data[2] +cdef char ufunc__ncf_skewness_types[8] +cdef char *ufunc__ncf_skewness_doc = ( + "_ncf_skewness(v1, v2, l)\n" + "\n" + "Skewness of noncentral F-distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "v1, v2, l : array_like\n" + " Positive, real-valued parameters\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__ncf_skewness_loops[0] = loop_f_fff__As_fff_f +ufunc__ncf_skewness_loops[1] = loop_d_ddd__As_ddd_d +ufunc__ncf_skewness_types[0] = NPY_FLOAT +ufunc__ncf_skewness_types[1] = NPY_FLOAT +ufunc__ncf_skewness_types[2] = NPY_FLOAT +ufunc__ncf_skewness_types[3] = NPY_FLOAT +ufunc__ncf_skewness_types[4] = NPY_DOUBLE +ufunc__ncf_skewness_types[5] = NPY_DOUBLE +ufunc__ncf_skewness_types[6] = NPY_DOUBLE +ufunc__ncf_skewness_types[7] = NPY_DOUBLE +ufunc__ncf_skewness_ptr[2*0] = scipy.special._ufuncs_cxx._export_ncf_skewness_float +ufunc__ncf_skewness_ptr[2*0+1] = ("_ncf_skewness") +ufunc__ncf_skewness_ptr[2*1] = scipy.special._ufuncs_cxx._export_ncf_skewness_double +ufunc__ncf_skewness_ptr[2*1+1] = ("_ncf_skewness") +ufunc__ncf_skewness_data[0] = &ufunc__ncf_skewness_ptr[2*0] +ufunc__ncf_skewness_data[1] = &ufunc__ncf_skewness_ptr[2*1] +_ncf_skewness = np.PyUFunc_FromFuncAndData(ufunc__ncf_skewness_loops, ufunc__ncf_skewness_data, ufunc__ncf_skewness_types, 2, 3, 1, 0, "_ncf_skewness", ufunc__ncf_skewness_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__ncf_variance_loops[2] +cdef void *ufunc__ncf_variance_ptr[4] +cdef void *ufunc__ncf_variance_data[2] +cdef char ufunc__ncf_variance_types[8] +cdef char *ufunc__ncf_variance_doc = ( + "_ncf_variance(v1, v2, l)\n" + "\n" + "Variance of noncentral F-distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "v1, v2, l : array_like\n" + " Positive, real-valued parameters\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__ncf_variance_loops[0] = loop_f_fff__As_fff_f +ufunc__ncf_variance_loops[1] = loop_d_ddd__As_ddd_d +ufunc__ncf_variance_types[0] = NPY_FLOAT +ufunc__ncf_variance_types[1] = NPY_FLOAT +ufunc__ncf_variance_types[2] = NPY_FLOAT +ufunc__ncf_variance_types[3] = NPY_FLOAT +ufunc__ncf_variance_types[4] = NPY_DOUBLE +ufunc__ncf_variance_types[5] = NPY_DOUBLE +ufunc__ncf_variance_types[6] = NPY_DOUBLE +ufunc__ncf_variance_types[7] = NPY_DOUBLE +ufunc__ncf_variance_ptr[2*0] = scipy.special._ufuncs_cxx._export_ncf_variance_float +ufunc__ncf_variance_ptr[2*0+1] = ("_ncf_variance") +ufunc__ncf_variance_ptr[2*1] = scipy.special._ufuncs_cxx._export_ncf_variance_double +ufunc__ncf_variance_ptr[2*1+1] = ("_ncf_variance") +ufunc__ncf_variance_data[0] = &ufunc__ncf_variance_ptr[2*0] +ufunc__ncf_variance_data[1] = &ufunc__ncf_variance_ptr[2*1] +_ncf_variance = np.PyUFunc_FromFuncAndData(ufunc__ncf_variance_loops, ufunc__ncf_variance_data, ufunc__ncf_variance_types, 2, 3, 1, 0, "_ncf_variance", ufunc__ncf_variance_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__nct_cdf_loops[2] +cdef void *ufunc__nct_cdf_ptr[4] +cdef void *ufunc__nct_cdf_data[2] +cdef char ufunc__nct_cdf_types[8] +cdef char *ufunc__nct_cdf_doc = ( + "_nct_cdf(x, v, l)\n" + "\n" + "Cumulative density function of noncentral t-distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real-valued\n" + "v : array_like\n" + " Positive, real-valued parameters\n" + "l : array_like\n" + " Real-valued parameters\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__nct_cdf_loops[0] = loop_f_fff__As_fff_f +ufunc__nct_cdf_loops[1] = loop_d_ddd__As_ddd_d +ufunc__nct_cdf_types[0] = NPY_FLOAT +ufunc__nct_cdf_types[1] = NPY_FLOAT +ufunc__nct_cdf_types[2] = NPY_FLOAT +ufunc__nct_cdf_types[3] = NPY_FLOAT +ufunc__nct_cdf_types[4] = NPY_DOUBLE +ufunc__nct_cdf_types[5] = NPY_DOUBLE +ufunc__nct_cdf_types[6] = NPY_DOUBLE +ufunc__nct_cdf_types[7] = NPY_DOUBLE +ufunc__nct_cdf_ptr[2*0] = scipy.special._ufuncs_cxx._export_nct_cdf_float +ufunc__nct_cdf_ptr[2*0+1] = ("_nct_cdf") +ufunc__nct_cdf_ptr[2*1] = scipy.special._ufuncs_cxx._export_nct_cdf_double +ufunc__nct_cdf_ptr[2*1+1] = ("_nct_cdf") +ufunc__nct_cdf_data[0] = &ufunc__nct_cdf_ptr[2*0] +ufunc__nct_cdf_data[1] = &ufunc__nct_cdf_ptr[2*1] +_nct_cdf = np.PyUFunc_FromFuncAndData(ufunc__nct_cdf_loops, ufunc__nct_cdf_data, ufunc__nct_cdf_types, 2, 3, 1, 0, "_nct_cdf", ufunc__nct_cdf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__nct_isf_loops[2] +cdef void *ufunc__nct_isf_ptr[4] +cdef void *ufunc__nct_isf_data[2] +cdef char ufunc__nct_isf_types[8] +cdef char *ufunc__nct_isf_doc = ( + "_nct_isf(x, v, l)\n" + "\n" + "Inverse surivial function of noncentral t-distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real-valued\n" + "v : array_like\n" + " Positive, real-valued parameters\n" + "l : array_like\n" + " Real-valued parameters\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__nct_isf_loops[0] = loop_f_fff__As_fff_f +ufunc__nct_isf_loops[1] = loop_d_ddd__As_ddd_d +ufunc__nct_isf_types[0] = NPY_FLOAT +ufunc__nct_isf_types[1] = NPY_FLOAT +ufunc__nct_isf_types[2] = NPY_FLOAT +ufunc__nct_isf_types[3] = NPY_FLOAT +ufunc__nct_isf_types[4] = NPY_DOUBLE +ufunc__nct_isf_types[5] = NPY_DOUBLE +ufunc__nct_isf_types[6] = NPY_DOUBLE +ufunc__nct_isf_types[7] = NPY_DOUBLE +ufunc__nct_isf_ptr[2*0] = scipy.special._ufuncs_cxx._export_nct_isf_float +ufunc__nct_isf_ptr[2*0+1] = ("_nct_isf") +ufunc__nct_isf_ptr[2*1] = scipy.special._ufuncs_cxx._export_nct_isf_double +ufunc__nct_isf_ptr[2*1+1] = ("_nct_isf") +ufunc__nct_isf_data[0] = &ufunc__nct_isf_ptr[2*0] +ufunc__nct_isf_data[1] = &ufunc__nct_isf_ptr[2*1] +_nct_isf = np.PyUFunc_FromFuncAndData(ufunc__nct_isf_loops, ufunc__nct_isf_data, ufunc__nct_isf_types, 2, 3, 1, 0, "_nct_isf", ufunc__nct_isf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__nct_kurtosis_excess_loops[2] +cdef void *ufunc__nct_kurtosis_excess_ptr[4] +cdef void *ufunc__nct_kurtosis_excess_data[2] +cdef char ufunc__nct_kurtosis_excess_types[6] +cdef char *ufunc__nct_kurtosis_excess_doc = ( + "_nct_kurtosis_excess(v, l)\n" + "\n" + "Kurtosis excess of noncentral t-distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "v : array_like\n" + " Positive, real-valued parameters\n" + "l : array_like\n" + " Real-valued parameters\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__nct_kurtosis_excess_loops[0] = loop_f_ff__As_ff_f +ufunc__nct_kurtosis_excess_loops[1] = loop_d_dd__As_dd_d +ufunc__nct_kurtosis_excess_types[0] = NPY_FLOAT +ufunc__nct_kurtosis_excess_types[1] = NPY_FLOAT +ufunc__nct_kurtosis_excess_types[2] = NPY_FLOAT +ufunc__nct_kurtosis_excess_types[3] = NPY_DOUBLE +ufunc__nct_kurtosis_excess_types[4] = NPY_DOUBLE +ufunc__nct_kurtosis_excess_types[5] = NPY_DOUBLE +ufunc__nct_kurtosis_excess_ptr[2*0] = scipy.special._ufuncs_cxx._export_nct_kurtosis_excess_float +ufunc__nct_kurtosis_excess_ptr[2*0+1] = ("_nct_kurtosis_excess") +ufunc__nct_kurtosis_excess_ptr[2*1] = scipy.special._ufuncs_cxx._export_nct_kurtosis_excess_double +ufunc__nct_kurtosis_excess_ptr[2*1+1] = ("_nct_kurtosis_excess") +ufunc__nct_kurtosis_excess_data[0] = &ufunc__nct_kurtosis_excess_ptr[2*0] +ufunc__nct_kurtosis_excess_data[1] = &ufunc__nct_kurtosis_excess_ptr[2*1] +_nct_kurtosis_excess = np.PyUFunc_FromFuncAndData(ufunc__nct_kurtosis_excess_loops, ufunc__nct_kurtosis_excess_data, ufunc__nct_kurtosis_excess_types, 2, 2, 1, 0, "_nct_kurtosis_excess", ufunc__nct_kurtosis_excess_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__nct_mean_loops[2] +cdef void *ufunc__nct_mean_ptr[4] +cdef void *ufunc__nct_mean_data[2] +cdef char ufunc__nct_mean_types[6] +cdef char *ufunc__nct_mean_doc = ( + "_nct_mean(v, l)\n" + "\n" + "Mean of noncentral t-distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "v : array_like\n" + " Positive, real-valued parameters\n" + "l : array_like\n" + " Real-valued parameters\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__nct_mean_loops[0] = loop_f_ff__As_ff_f +ufunc__nct_mean_loops[1] = loop_d_dd__As_dd_d +ufunc__nct_mean_types[0] = NPY_FLOAT +ufunc__nct_mean_types[1] = NPY_FLOAT +ufunc__nct_mean_types[2] = NPY_FLOAT +ufunc__nct_mean_types[3] = NPY_DOUBLE +ufunc__nct_mean_types[4] = NPY_DOUBLE +ufunc__nct_mean_types[5] = NPY_DOUBLE +ufunc__nct_mean_ptr[2*0] = scipy.special._ufuncs_cxx._export_nct_mean_float +ufunc__nct_mean_ptr[2*0+1] = ("_nct_mean") +ufunc__nct_mean_ptr[2*1] = scipy.special._ufuncs_cxx._export_nct_mean_double +ufunc__nct_mean_ptr[2*1+1] = ("_nct_mean") +ufunc__nct_mean_data[0] = &ufunc__nct_mean_ptr[2*0] +ufunc__nct_mean_data[1] = &ufunc__nct_mean_ptr[2*1] +_nct_mean = np.PyUFunc_FromFuncAndData(ufunc__nct_mean_loops, ufunc__nct_mean_data, ufunc__nct_mean_types, 2, 2, 1, 0, "_nct_mean", ufunc__nct_mean_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__nct_ppf_loops[2] +cdef void *ufunc__nct_ppf_ptr[4] +cdef void *ufunc__nct_ppf_data[2] +cdef char ufunc__nct_ppf_types[8] +cdef char *ufunc__nct_ppf_doc = ( + "_nct_ppf(x, v, l)\n" + "\n" + "Percent point function of noncentral t-distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real-valued\n" + "v : array_like\n" + " Positive, real-valued parameters\n" + "l : array_like\n" + " Real-valued parameters\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__nct_ppf_loops[0] = loop_f_fff__As_fff_f +ufunc__nct_ppf_loops[1] = loop_d_ddd__As_ddd_d +ufunc__nct_ppf_types[0] = NPY_FLOAT +ufunc__nct_ppf_types[1] = NPY_FLOAT +ufunc__nct_ppf_types[2] = NPY_FLOAT +ufunc__nct_ppf_types[3] = NPY_FLOAT +ufunc__nct_ppf_types[4] = NPY_DOUBLE +ufunc__nct_ppf_types[5] = NPY_DOUBLE +ufunc__nct_ppf_types[6] = NPY_DOUBLE +ufunc__nct_ppf_types[7] = NPY_DOUBLE +ufunc__nct_ppf_ptr[2*0] = scipy.special._ufuncs_cxx._export_nct_ppf_float +ufunc__nct_ppf_ptr[2*0+1] = ("_nct_ppf") +ufunc__nct_ppf_ptr[2*1] = scipy.special._ufuncs_cxx._export_nct_ppf_double +ufunc__nct_ppf_ptr[2*1+1] = ("_nct_ppf") +ufunc__nct_ppf_data[0] = &ufunc__nct_ppf_ptr[2*0] +ufunc__nct_ppf_data[1] = &ufunc__nct_ppf_ptr[2*1] +_nct_ppf = np.PyUFunc_FromFuncAndData(ufunc__nct_ppf_loops, ufunc__nct_ppf_data, ufunc__nct_ppf_types, 2, 3, 1, 0, "_nct_ppf", ufunc__nct_ppf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__nct_sf_loops[2] +cdef void *ufunc__nct_sf_ptr[4] +cdef void *ufunc__nct_sf_data[2] +cdef char ufunc__nct_sf_types[8] +cdef char *ufunc__nct_sf_doc = ( + "_nct_sf(x, v, l)\n" + "\n" + "Survival function of noncentral t-distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real-valued\n" + "v : array_like\n" + " Positive, real-valued parameters\n" + "l : array_like\n" + " Real-valued parameters\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__nct_sf_loops[0] = loop_f_fff__As_fff_f +ufunc__nct_sf_loops[1] = loop_d_ddd__As_ddd_d +ufunc__nct_sf_types[0] = NPY_FLOAT +ufunc__nct_sf_types[1] = NPY_FLOAT +ufunc__nct_sf_types[2] = NPY_FLOAT +ufunc__nct_sf_types[3] = NPY_FLOAT +ufunc__nct_sf_types[4] = NPY_DOUBLE +ufunc__nct_sf_types[5] = NPY_DOUBLE +ufunc__nct_sf_types[6] = NPY_DOUBLE +ufunc__nct_sf_types[7] = NPY_DOUBLE +ufunc__nct_sf_ptr[2*0] = scipy.special._ufuncs_cxx._export_nct_sf_float +ufunc__nct_sf_ptr[2*0+1] = ("_nct_sf") +ufunc__nct_sf_ptr[2*1] = scipy.special._ufuncs_cxx._export_nct_sf_double +ufunc__nct_sf_ptr[2*1+1] = ("_nct_sf") +ufunc__nct_sf_data[0] = &ufunc__nct_sf_ptr[2*0] +ufunc__nct_sf_data[1] = &ufunc__nct_sf_ptr[2*1] +_nct_sf = np.PyUFunc_FromFuncAndData(ufunc__nct_sf_loops, ufunc__nct_sf_data, ufunc__nct_sf_types, 2, 3, 1, 0, "_nct_sf", ufunc__nct_sf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__nct_skewness_loops[2] +cdef void *ufunc__nct_skewness_ptr[4] +cdef void *ufunc__nct_skewness_data[2] +cdef char ufunc__nct_skewness_types[6] +cdef char *ufunc__nct_skewness_doc = ( + "_nct_skewness(v, l)\n" + "\n" + "Skewness of noncentral t-distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "v : array_like\n" + " Positive, real-valued parameters\n" + "l : array_like\n" + " Real-valued parameters\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__nct_skewness_loops[0] = loop_f_ff__As_ff_f +ufunc__nct_skewness_loops[1] = loop_d_dd__As_dd_d +ufunc__nct_skewness_types[0] = NPY_FLOAT +ufunc__nct_skewness_types[1] = NPY_FLOAT +ufunc__nct_skewness_types[2] = NPY_FLOAT +ufunc__nct_skewness_types[3] = NPY_DOUBLE +ufunc__nct_skewness_types[4] = NPY_DOUBLE +ufunc__nct_skewness_types[5] = NPY_DOUBLE +ufunc__nct_skewness_ptr[2*0] = scipy.special._ufuncs_cxx._export_nct_skewness_float +ufunc__nct_skewness_ptr[2*0+1] = ("_nct_skewness") +ufunc__nct_skewness_ptr[2*1] = scipy.special._ufuncs_cxx._export_nct_skewness_double +ufunc__nct_skewness_ptr[2*1+1] = ("_nct_skewness") +ufunc__nct_skewness_data[0] = &ufunc__nct_skewness_ptr[2*0] +ufunc__nct_skewness_data[1] = &ufunc__nct_skewness_ptr[2*1] +_nct_skewness = np.PyUFunc_FromFuncAndData(ufunc__nct_skewness_loops, ufunc__nct_skewness_data, ufunc__nct_skewness_types, 2, 2, 1, 0, "_nct_skewness", ufunc__nct_skewness_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__nct_variance_loops[2] +cdef void *ufunc__nct_variance_ptr[4] +cdef void *ufunc__nct_variance_data[2] +cdef char ufunc__nct_variance_types[6] +cdef char *ufunc__nct_variance_doc = ( + "_nct_variance(v, l)\n" + "\n" + "Variance of noncentral t-distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "v : array_like\n" + " Positive, real-valued parameters\n" + "l : array_like\n" + " Real-valued parameters\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__nct_variance_loops[0] = loop_f_ff__As_ff_f +ufunc__nct_variance_loops[1] = loop_d_dd__As_dd_d +ufunc__nct_variance_types[0] = NPY_FLOAT +ufunc__nct_variance_types[1] = NPY_FLOAT +ufunc__nct_variance_types[2] = NPY_FLOAT +ufunc__nct_variance_types[3] = NPY_DOUBLE +ufunc__nct_variance_types[4] = NPY_DOUBLE +ufunc__nct_variance_types[5] = NPY_DOUBLE +ufunc__nct_variance_ptr[2*0] = scipy.special._ufuncs_cxx._export_nct_variance_float +ufunc__nct_variance_ptr[2*0+1] = ("_nct_variance") +ufunc__nct_variance_ptr[2*1] = scipy.special._ufuncs_cxx._export_nct_variance_double +ufunc__nct_variance_ptr[2*1+1] = ("_nct_variance") +ufunc__nct_variance_data[0] = &ufunc__nct_variance_ptr[2*0] +ufunc__nct_variance_data[1] = &ufunc__nct_variance_ptr[2*1] +_nct_variance = np.PyUFunc_FromFuncAndData(ufunc__nct_variance_loops, ufunc__nct_variance_data, ufunc__nct_variance_types, 2, 2, 1, 0, "_nct_variance", ufunc__nct_variance_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__ncx2_cdf_loops[2] +cdef void *ufunc__ncx2_cdf_ptr[4] +cdef void *ufunc__ncx2_cdf_data[2] +cdef char ufunc__ncx2_cdf_types[8] +cdef char *ufunc__ncx2_cdf_doc = ( + "_ncx2_cdf(x, k, l)\n" + "\n" + "Cumulative density function of Non-central chi-squared distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Positive real-valued\n" + "k, l : array_like\n" + " Positive, real-valued parameters\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__ncx2_cdf_loops[0] = loop_f_fff__As_fff_f +ufunc__ncx2_cdf_loops[1] = loop_d_ddd__As_ddd_d +ufunc__ncx2_cdf_types[0] = NPY_FLOAT +ufunc__ncx2_cdf_types[1] = NPY_FLOAT +ufunc__ncx2_cdf_types[2] = NPY_FLOAT +ufunc__ncx2_cdf_types[3] = NPY_FLOAT +ufunc__ncx2_cdf_types[4] = NPY_DOUBLE +ufunc__ncx2_cdf_types[5] = NPY_DOUBLE +ufunc__ncx2_cdf_types[6] = NPY_DOUBLE +ufunc__ncx2_cdf_types[7] = NPY_DOUBLE +ufunc__ncx2_cdf_ptr[2*0] = scipy.special._ufuncs_cxx._export_ncx2_cdf_float +ufunc__ncx2_cdf_ptr[2*0+1] = ("_ncx2_cdf") +ufunc__ncx2_cdf_ptr[2*1] = scipy.special._ufuncs_cxx._export_ncx2_cdf_double +ufunc__ncx2_cdf_ptr[2*1+1] = ("_ncx2_cdf") +ufunc__ncx2_cdf_data[0] = &ufunc__ncx2_cdf_ptr[2*0] +ufunc__ncx2_cdf_data[1] = &ufunc__ncx2_cdf_ptr[2*1] +_ncx2_cdf = np.PyUFunc_FromFuncAndData(ufunc__ncx2_cdf_loops, ufunc__ncx2_cdf_data, ufunc__ncx2_cdf_types, 2, 3, 1, 0, "_ncx2_cdf", ufunc__ncx2_cdf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__ncx2_isf_loops[2] +cdef void *ufunc__ncx2_isf_ptr[4] +cdef void *ufunc__ncx2_isf_data[2] +cdef char ufunc__ncx2_isf_types[8] +cdef char *ufunc__ncx2_isf_doc = ( + "_ncx2_isf(x, k, l)\n" + "\n" + "Inverse survival function of Non-central chi-squared distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Positive real-valued\n" + "k, l : array_like\n" + " Positive, real-valued parameters\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__ncx2_isf_loops[0] = loop_f_fff__As_fff_f +ufunc__ncx2_isf_loops[1] = loop_d_ddd__As_ddd_d +ufunc__ncx2_isf_types[0] = NPY_FLOAT +ufunc__ncx2_isf_types[1] = NPY_FLOAT +ufunc__ncx2_isf_types[2] = NPY_FLOAT +ufunc__ncx2_isf_types[3] = NPY_FLOAT +ufunc__ncx2_isf_types[4] = NPY_DOUBLE +ufunc__ncx2_isf_types[5] = NPY_DOUBLE +ufunc__ncx2_isf_types[6] = NPY_DOUBLE +ufunc__ncx2_isf_types[7] = NPY_DOUBLE +ufunc__ncx2_isf_ptr[2*0] = scipy.special._ufuncs_cxx._export_ncx2_isf_float +ufunc__ncx2_isf_ptr[2*0+1] = ("_ncx2_isf") +ufunc__ncx2_isf_ptr[2*1] = scipy.special._ufuncs_cxx._export_ncx2_isf_double +ufunc__ncx2_isf_ptr[2*1+1] = ("_ncx2_isf") +ufunc__ncx2_isf_data[0] = &ufunc__ncx2_isf_ptr[2*0] +ufunc__ncx2_isf_data[1] = &ufunc__ncx2_isf_ptr[2*1] +_ncx2_isf = np.PyUFunc_FromFuncAndData(ufunc__ncx2_isf_loops, ufunc__ncx2_isf_data, ufunc__ncx2_isf_types, 2, 3, 1, 0, "_ncx2_isf", ufunc__ncx2_isf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__ncx2_pdf_loops[2] +cdef void *ufunc__ncx2_pdf_ptr[4] +cdef void *ufunc__ncx2_pdf_data[2] +cdef char ufunc__ncx2_pdf_types[8] +cdef char *ufunc__ncx2_pdf_doc = ( + "_ncx2_pdf(x, k, l)\n" + "\n" + "Probability density function of Non-central chi-squared distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Positive real-valued\n" + "k, l : array_like\n" + " Positive, real-valued parameters\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__ncx2_pdf_loops[0] = loop_f_fff__As_fff_f +ufunc__ncx2_pdf_loops[1] = loop_d_ddd__As_ddd_d +ufunc__ncx2_pdf_types[0] = NPY_FLOAT +ufunc__ncx2_pdf_types[1] = NPY_FLOAT +ufunc__ncx2_pdf_types[2] = NPY_FLOAT +ufunc__ncx2_pdf_types[3] = NPY_FLOAT +ufunc__ncx2_pdf_types[4] = NPY_DOUBLE +ufunc__ncx2_pdf_types[5] = NPY_DOUBLE +ufunc__ncx2_pdf_types[6] = NPY_DOUBLE +ufunc__ncx2_pdf_types[7] = NPY_DOUBLE +ufunc__ncx2_pdf_ptr[2*0] = scipy.special._ufuncs_cxx._export_ncx2_pdf_float +ufunc__ncx2_pdf_ptr[2*0+1] = ("_ncx2_pdf") +ufunc__ncx2_pdf_ptr[2*1] = scipy.special._ufuncs_cxx._export_ncx2_pdf_double +ufunc__ncx2_pdf_ptr[2*1+1] = ("_ncx2_pdf") +ufunc__ncx2_pdf_data[0] = &ufunc__ncx2_pdf_ptr[2*0] +ufunc__ncx2_pdf_data[1] = &ufunc__ncx2_pdf_ptr[2*1] +_ncx2_pdf = np.PyUFunc_FromFuncAndData(ufunc__ncx2_pdf_loops, ufunc__ncx2_pdf_data, ufunc__ncx2_pdf_types, 2, 3, 1, 0, "_ncx2_pdf", ufunc__ncx2_pdf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__ncx2_ppf_loops[2] +cdef void *ufunc__ncx2_ppf_ptr[4] +cdef void *ufunc__ncx2_ppf_data[2] +cdef char ufunc__ncx2_ppf_types[8] +cdef char *ufunc__ncx2_ppf_doc = ( + "_ncx2_ppf(x, k, l)\n" + "\n" + "Percent point function of Non-central chi-squared distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Positive real-valued\n" + "k, l : array_like\n" + " Positive, real-valued parameters\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__ncx2_ppf_loops[0] = loop_f_fff__As_fff_f +ufunc__ncx2_ppf_loops[1] = loop_d_ddd__As_ddd_d +ufunc__ncx2_ppf_types[0] = NPY_FLOAT +ufunc__ncx2_ppf_types[1] = NPY_FLOAT +ufunc__ncx2_ppf_types[2] = NPY_FLOAT +ufunc__ncx2_ppf_types[3] = NPY_FLOAT +ufunc__ncx2_ppf_types[4] = NPY_DOUBLE +ufunc__ncx2_ppf_types[5] = NPY_DOUBLE +ufunc__ncx2_ppf_types[6] = NPY_DOUBLE +ufunc__ncx2_ppf_types[7] = NPY_DOUBLE +ufunc__ncx2_ppf_ptr[2*0] = scipy.special._ufuncs_cxx._export_ncx2_ppf_float +ufunc__ncx2_ppf_ptr[2*0+1] = ("_ncx2_ppf") +ufunc__ncx2_ppf_ptr[2*1] = scipy.special._ufuncs_cxx._export_ncx2_ppf_double +ufunc__ncx2_ppf_ptr[2*1+1] = ("_ncx2_ppf") +ufunc__ncx2_ppf_data[0] = &ufunc__ncx2_ppf_ptr[2*0] +ufunc__ncx2_ppf_data[1] = &ufunc__ncx2_ppf_ptr[2*1] +_ncx2_ppf = np.PyUFunc_FromFuncAndData(ufunc__ncx2_ppf_loops, ufunc__ncx2_ppf_data, ufunc__ncx2_ppf_types, 2, 3, 1, 0, "_ncx2_ppf", ufunc__ncx2_ppf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__ncx2_sf_loops[2] +cdef void *ufunc__ncx2_sf_ptr[4] +cdef void *ufunc__ncx2_sf_data[2] +cdef char ufunc__ncx2_sf_types[8] +cdef char *ufunc__ncx2_sf_doc = ( + "_ncx2_sf(x, k, l)\n" + "\n" + "Survival function of Non-central chi-squared distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Positive real-valued\n" + "k, l : array_like\n" + " Positive, real-valued parameters\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__ncx2_sf_loops[0] = loop_f_fff__As_fff_f +ufunc__ncx2_sf_loops[1] = loop_d_ddd__As_ddd_d +ufunc__ncx2_sf_types[0] = NPY_FLOAT +ufunc__ncx2_sf_types[1] = NPY_FLOAT +ufunc__ncx2_sf_types[2] = NPY_FLOAT +ufunc__ncx2_sf_types[3] = NPY_FLOAT +ufunc__ncx2_sf_types[4] = NPY_DOUBLE +ufunc__ncx2_sf_types[5] = NPY_DOUBLE +ufunc__ncx2_sf_types[6] = NPY_DOUBLE +ufunc__ncx2_sf_types[7] = NPY_DOUBLE +ufunc__ncx2_sf_ptr[2*0] = scipy.special._ufuncs_cxx._export_ncx2_sf_float +ufunc__ncx2_sf_ptr[2*0+1] = ("_ncx2_sf") +ufunc__ncx2_sf_ptr[2*1] = scipy.special._ufuncs_cxx._export_ncx2_sf_double +ufunc__ncx2_sf_ptr[2*1+1] = ("_ncx2_sf") +ufunc__ncx2_sf_data[0] = &ufunc__ncx2_sf_ptr[2*0] +ufunc__ncx2_sf_data[1] = &ufunc__ncx2_sf_ptr[2*1] +_ncx2_sf = np.PyUFunc_FromFuncAndData(ufunc__ncx2_sf_loops, ufunc__ncx2_sf_data, ufunc__ncx2_sf_types, 2, 3, 1, 0, "_ncx2_sf", ufunc__ncx2_sf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__riemann_zeta_loops[2] +cdef void *ufunc__riemann_zeta_ptr[4] +cdef void *ufunc__riemann_zeta_data[2] +cdef char ufunc__riemann_zeta_types[4] +cdef char *ufunc__riemann_zeta_doc = ( + "Internal function, use `zeta` instead.") +ufunc__riemann_zeta_loops[0] = loop_d_d__As_f_f +ufunc__riemann_zeta_loops[1] = loop_d_d__As_d_d +ufunc__riemann_zeta_types[0] = NPY_FLOAT +ufunc__riemann_zeta_types[1] = NPY_FLOAT +ufunc__riemann_zeta_types[2] = NPY_DOUBLE +ufunc__riemann_zeta_types[3] = NPY_DOUBLE +ufunc__riemann_zeta_ptr[2*0] = _func_cephes_riemann_zeta +ufunc__riemann_zeta_ptr[2*0+1] = ("_riemann_zeta") +ufunc__riemann_zeta_ptr[2*1] = _func_cephes_riemann_zeta +ufunc__riemann_zeta_ptr[2*1+1] = ("_riemann_zeta") +ufunc__riemann_zeta_data[0] = &ufunc__riemann_zeta_ptr[2*0] +ufunc__riemann_zeta_data[1] = &ufunc__riemann_zeta_ptr[2*1] +_riemann_zeta = np.PyUFunc_FromFuncAndData(ufunc__riemann_zeta_loops, ufunc__riemann_zeta_data, ufunc__riemann_zeta_types, 2, 1, 1, 0, "_riemann_zeta", ufunc__riemann_zeta_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__sf_error_test_function_loops[1] +cdef void *ufunc__sf_error_test_function_ptr[2] +cdef void *ufunc__sf_error_test_function_data[1] +cdef char ufunc__sf_error_test_function_types[2] +cdef char *ufunc__sf_error_test_function_doc = ( + "Private function; do not use.") +ufunc__sf_error_test_function_loops[0] = loop_i_i__As_l_l +ufunc__sf_error_test_function_types[0] = NPY_LONG +ufunc__sf_error_test_function_types[1] = NPY_LONG +ufunc__sf_error_test_function_ptr[2*0] = _func__sf_error_test_function +ufunc__sf_error_test_function_ptr[2*0+1] = ("_sf_error_test_function") +ufunc__sf_error_test_function_data[0] = &ufunc__sf_error_test_function_ptr[2*0] +_sf_error_test_function = np.PyUFunc_FromFuncAndData(ufunc__sf_error_test_function_loops, ufunc__sf_error_test_function_data, ufunc__sf_error_test_function_types, 1, 1, 1, 0, "_sf_error_test_function", ufunc__sf_error_test_function_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__skewnorm_cdf_loops[2] +cdef void *ufunc__skewnorm_cdf_ptr[4] +cdef void *ufunc__skewnorm_cdf_data[2] +cdef char ufunc__skewnorm_cdf_types[10] +cdef char *ufunc__skewnorm_cdf_doc = ( + "_skewnorm_cdf(x, l, sc, sh)\n" + "\n" + "Cumulative density function of skewnorm distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real-valued\n" + "l : array_like\n" + " Real-valued parameters\n" + "sc : array_like\n" + " Positive, Real-valued parameters\n" + "sh : array_like\n" + " Real-valued parameters\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__skewnorm_cdf_loops[0] = loop_f_ffff__As_ffff_f +ufunc__skewnorm_cdf_loops[1] = loop_d_dddd__As_dddd_d +ufunc__skewnorm_cdf_types[0] = NPY_FLOAT +ufunc__skewnorm_cdf_types[1] = NPY_FLOAT +ufunc__skewnorm_cdf_types[2] = NPY_FLOAT +ufunc__skewnorm_cdf_types[3] = NPY_FLOAT +ufunc__skewnorm_cdf_types[4] = NPY_FLOAT +ufunc__skewnorm_cdf_types[5] = NPY_DOUBLE +ufunc__skewnorm_cdf_types[6] = NPY_DOUBLE +ufunc__skewnorm_cdf_types[7] = NPY_DOUBLE +ufunc__skewnorm_cdf_types[8] = NPY_DOUBLE +ufunc__skewnorm_cdf_types[9] = NPY_DOUBLE +ufunc__skewnorm_cdf_ptr[2*0] = scipy.special._ufuncs_cxx._export_skewnorm_cdf_float +ufunc__skewnorm_cdf_ptr[2*0+1] = ("_skewnorm_cdf") +ufunc__skewnorm_cdf_ptr[2*1] = scipy.special._ufuncs_cxx._export_skewnorm_cdf_double +ufunc__skewnorm_cdf_ptr[2*1+1] = ("_skewnorm_cdf") +ufunc__skewnorm_cdf_data[0] = &ufunc__skewnorm_cdf_ptr[2*0] +ufunc__skewnorm_cdf_data[1] = &ufunc__skewnorm_cdf_ptr[2*1] +_skewnorm_cdf = np.PyUFunc_FromFuncAndData(ufunc__skewnorm_cdf_loops, ufunc__skewnorm_cdf_data, ufunc__skewnorm_cdf_types, 2, 4, 1, 0, "_skewnorm_cdf", ufunc__skewnorm_cdf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__skewnorm_isf_loops[2] +cdef void *ufunc__skewnorm_isf_ptr[4] +cdef void *ufunc__skewnorm_isf_data[2] +cdef char ufunc__skewnorm_isf_types[10] +cdef char *ufunc__skewnorm_isf_doc = ( + "_skewnorm_isf(x, l, sc, sh)\n" + "\n" + "Inverse surivial function of skewnorm distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real-valued\n" + "l : array_like\n" + " Real-valued parameters\n" + "sc : array_like\n" + " Positive, Real-valued parameters\n" + "sh : array_like\n" + " Real-valued parameters\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__skewnorm_isf_loops[0] = loop_f_ffff__As_ffff_f +ufunc__skewnorm_isf_loops[1] = loop_d_dddd__As_dddd_d +ufunc__skewnorm_isf_types[0] = NPY_FLOAT +ufunc__skewnorm_isf_types[1] = NPY_FLOAT +ufunc__skewnorm_isf_types[2] = NPY_FLOAT +ufunc__skewnorm_isf_types[3] = NPY_FLOAT +ufunc__skewnorm_isf_types[4] = NPY_FLOAT +ufunc__skewnorm_isf_types[5] = NPY_DOUBLE +ufunc__skewnorm_isf_types[6] = NPY_DOUBLE +ufunc__skewnorm_isf_types[7] = NPY_DOUBLE +ufunc__skewnorm_isf_types[8] = NPY_DOUBLE +ufunc__skewnorm_isf_types[9] = NPY_DOUBLE +ufunc__skewnorm_isf_ptr[2*0] = scipy.special._ufuncs_cxx._export_skewnorm_isf_float +ufunc__skewnorm_isf_ptr[2*0+1] = ("_skewnorm_isf") +ufunc__skewnorm_isf_ptr[2*1] = scipy.special._ufuncs_cxx._export_skewnorm_isf_double +ufunc__skewnorm_isf_ptr[2*1+1] = ("_skewnorm_isf") +ufunc__skewnorm_isf_data[0] = &ufunc__skewnorm_isf_ptr[2*0] +ufunc__skewnorm_isf_data[1] = &ufunc__skewnorm_isf_ptr[2*1] +_skewnorm_isf = np.PyUFunc_FromFuncAndData(ufunc__skewnorm_isf_loops, ufunc__skewnorm_isf_data, ufunc__skewnorm_isf_types, 2, 4, 1, 0, "_skewnorm_isf", ufunc__skewnorm_isf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__skewnorm_ppf_loops[2] +cdef void *ufunc__skewnorm_ppf_ptr[4] +cdef void *ufunc__skewnorm_ppf_data[2] +cdef char ufunc__skewnorm_ppf_types[10] +cdef char *ufunc__skewnorm_ppf_doc = ( + "_skewnorm_ppf(x, l, sc, sh)\n" + "\n" + "Percent point function of skewnorm distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real-valued\n" + "l : array_like\n" + " Real-valued parameters\n" + "sc : array_like\n" + " Positive, Real-valued parameters\n" + "sh : array_like\n" + " Real-valued parameters\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray") +ufunc__skewnorm_ppf_loops[0] = loop_f_ffff__As_ffff_f +ufunc__skewnorm_ppf_loops[1] = loop_d_dddd__As_dddd_d +ufunc__skewnorm_ppf_types[0] = NPY_FLOAT +ufunc__skewnorm_ppf_types[1] = NPY_FLOAT +ufunc__skewnorm_ppf_types[2] = NPY_FLOAT +ufunc__skewnorm_ppf_types[3] = NPY_FLOAT +ufunc__skewnorm_ppf_types[4] = NPY_FLOAT +ufunc__skewnorm_ppf_types[5] = NPY_DOUBLE +ufunc__skewnorm_ppf_types[6] = NPY_DOUBLE +ufunc__skewnorm_ppf_types[7] = NPY_DOUBLE +ufunc__skewnorm_ppf_types[8] = NPY_DOUBLE +ufunc__skewnorm_ppf_types[9] = NPY_DOUBLE +ufunc__skewnorm_ppf_ptr[2*0] = scipy.special._ufuncs_cxx._export_skewnorm_ppf_float +ufunc__skewnorm_ppf_ptr[2*0+1] = ("_skewnorm_ppf") +ufunc__skewnorm_ppf_ptr[2*1] = scipy.special._ufuncs_cxx._export_skewnorm_ppf_double +ufunc__skewnorm_ppf_ptr[2*1+1] = ("_skewnorm_ppf") +ufunc__skewnorm_ppf_data[0] = &ufunc__skewnorm_ppf_ptr[2*0] +ufunc__skewnorm_ppf_data[1] = &ufunc__skewnorm_ppf_ptr[2*1] +_skewnorm_ppf = np.PyUFunc_FromFuncAndData(ufunc__skewnorm_ppf_loops, ufunc__skewnorm_ppf_data, ufunc__skewnorm_ppf_types, 2, 4, 1, 0, "_skewnorm_ppf", ufunc__skewnorm_ppf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__smirnovc_loops[3] +cdef void *ufunc__smirnovc_ptr[6] +cdef void *ufunc__smirnovc_data[3] +cdef char ufunc__smirnovc_types[9] +cdef char *ufunc__smirnovc_doc = ( + "_smirnovc(n, d)\n" + " Internal function, do not use.") +ufunc__smirnovc_loops[0] = loop_d_pd__As_pd_d +ufunc__smirnovc_loops[1] = loop_d_dd__As_ff_f +ufunc__smirnovc_loops[2] = loop_d_dd__As_dd_d +ufunc__smirnovc_types[0] = NPY_INTP +ufunc__smirnovc_types[1] = NPY_DOUBLE +ufunc__smirnovc_types[2] = NPY_DOUBLE +ufunc__smirnovc_types[3] = NPY_FLOAT +ufunc__smirnovc_types[4] = NPY_FLOAT +ufunc__smirnovc_types[5] = NPY_FLOAT +ufunc__smirnovc_types[6] = NPY_DOUBLE +ufunc__smirnovc_types[7] = NPY_DOUBLE +ufunc__smirnovc_types[8] = NPY_DOUBLE +ufunc__smirnovc_ptr[2*0] = _func_cephes_smirnovc_wrap +ufunc__smirnovc_ptr[2*0+1] = ("_smirnovc") +ufunc__smirnovc_ptr[2*1] = _func_smirnovc_unsafe +ufunc__smirnovc_ptr[2*1+1] = ("_smirnovc") +ufunc__smirnovc_ptr[2*2] = _func_smirnovc_unsafe +ufunc__smirnovc_ptr[2*2+1] = ("_smirnovc") +ufunc__smirnovc_data[0] = &ufunc__smirnovc_ptr[2*0] +ufunc__smirnovc_data[1] = &ufunc__smirnovc_ptr[2*1] +ufunc__smirnovc_data[2] = &ufunc__smirnovc_ptr[2*2] +_smirnovc = np.PyUFunc_FromFuncAndData(ufunc__smirnovc_loops, ufunc__smirnovc_data, ufunc__smirnovc_types, 3, 2, 1, 0, "_smirnovc", ufunc__smirnovc_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__smirnovci_loops[3] +cdef void *ufunc__smirnovci_ptr[6] +cdef void *ufunc__smirnovci_data[3] +cdef char ufunc__smirnovci_types[9] +cdef char *ufunc__smirnovci_doc = ( + "Internal function, do not use.") +ufunc__smirnovci_loops[0] = loop_d_pd__As_pd_d +ufunc__smirnovci_loops[1] = loop_d_dd__As_ff_f +ufunc__smirnovci_loops[2] = loop_d_dd__As_dd_d +ufunc__smirnovci_types[0] = NPY_INTP +ufunc__smirnovci_types[1] = NPY_DOUBLE +ufunc__smirnovci_types[2] = NPY_DOUBLE +ufunc__smirnovci_types[3] = NPY_FLOAT +ufunc__smirnovci_types[4] = NPY_FLOAT +ufunc__smirnovci_types[5] = NPY_FLOAT +ufunc__smirnovci_types[6] = NPY_DOUBLE +ufunc__smirnovci_types[7] = NPY_DOUBLE +ufunc__smirnovci_types[8] = NPY_DOUBLE +ufunc__smirnovci_ptr[2*0] = _func_cephes_smirnovci_wrap +ufunc__smirnovci_ptr[2*0+1] = ("_smirnovci") +ufunc__smirnovci_ptr[2*1] = _func_smirnovci_unsafe +ufunc__smirnovci_ptr[2*1+1] = ("_smirnovci") +ufunc__smirnovci_ptr[2*2] = _func_smirnovci_unsafe +ufunc__smirnovci_ptr[2*2+1] = ("_smirnovci") +ufunc__smirnovci_data[0] = &ufunc__smirnovci_ptr[2*0] +ufunc__smirnovci_data[1] = &ufunc__smirnovci_ptr[2*1] +ufunc__smirnovci_data[2] = &ufunc__smirnovci_ptr[2*2] +_smirnovci = np.PyUFunc_FromFuncAndData(ufunc__smirnovci_loops, ufunc__smirnovci_data, ufunc__smirnovci_types, 3, 2, 1, 0, "_smirnovci", ufunc__smirnovci_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__smirnovp_loops[3] +cdef void *ufunc__smirnovp_ptr[6] +cdef void *ufunc__smirnovp_data[3] +cdef char ufunc__smirnovp_types[9] +cdef char *ufunc__smirnovp_doc = ( + "_smirnovp(n, p)\n" + " Internal function, do not use.") +ufunc__smirnovp_loops[0] = loop_d_pd__As_pd_d +ufunc__smirnovp_loops[1] = loop_d_dd__As_ff_f +ufunc__smirnovp_loops[2] = loop_d_dd__As_dd_d +ufunc__smirnovp_types[0] = NPY_INTP +ufunc__smirnovp_types[1] = NPY_DOUBLE +ufunc__smirnovp_types[2] = NPY_DOUBLE +ufunc__smirnovp_types[3] = NPY_FLOAT +ufunc__smirnovp_types[4] = NPY_FLOAT +ufunc__smirnovp_types[5] = NPY_FLOAT +ufunc__smirnovp_types[6] = NPY_DOUBLE +ufunc__smirnovp_types[7] = NPY_DOUBLE +ufunc__smirnovp_types[8] = NPY_DOUBLE +ufunc__smirnovp_ptr[2*0] = _func_cephes_smirnovp_wrap +ufunc__smirnovp_ptr[2*0+1] = ("_smirnovp") +ufunc__smirnovp_ptr[2*1] = _func_smirnovp_unsafe +ufunc__smirnovp_ptr[2*1+1] = ("_smirnovp") +ufunc__smirnovp_ptr[2*2] = _func_smirnovp_unsafe +ufunc__smirnovp_ptr[2*2+1] = ("_smirnovp") +ufunc__smirnovp_data[0] = &ufunc__smirnovp_ptr[2*0] +ufunc__smirnovp_data[1] = &ufunc__smirnovp_ptr[2*1] +ufunc__smirnovp_data[2] = &ufunc__smirnovp_ptr[2*2] +_smirnovp = np.PyUFunc_FromFuncAndData(ufunc__smirnovp_loops, ufunc__smirnovp_data, ufunc__smirnovp_types, 3, 2, 1, 0, "_smirnovp", ufunc__smirnovp_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__stirling2_inexact_loops[2] +cdef void *ufunc__stirling2_inexact_ptr[4] +cdef void *ufunc__stirling2_inexact_data[2] +cdef char ufunc__stirling2_inexact_types[6] +cdef char *ufunc__stirling2_inexact_doc = ( + "Internal function, do not use.") +ufunc__stirling2_inexact_loops[0] = loop_d_dd__As_ff_f +ufunc__stirling2_inexact_loops[1] = loop_d_dd__As_dd_d +ufunc__stirling2_inexact_types[0] = NPY_FLOAT +ufunc__stirling2_inexact_types[1] = NPY_FLOAT +ufunc__stirling2_inexact_types[2] = NPY_FLOAT +ufunc__stirling2_inexact_types[3] = NPY_DOUBLE +ufunc__stirling2_inexact_types[4] = NPY_DOUBLE +ufunc__stirling2_inexact_types[5] = NPY_DOUBLE +ufunc__stirling2_inexact_ptr[2*0] = scipy.special._ufuncs_cxx._export__stirling2_inexact +ufunc__stirling2_inexact_ptr[2*0+1] = ("_stirling2_inexact") +ufunc__stirling2_inexact_ptr[2*1] = scipy.special._ufuncs_cxx._export__stirling2_inexact +ufunc__stirling2_inexact_ptr[2*1+1] = ("_stirling2_inexact") +ufunc__stirling2_inexact_data[0] = &ufunc__stirling2_inexact_ptr[2*0] +ufunc__stirling2_inexact_data[1] = &ufunc__stirling2_inexact_ptr[2*1] +_stirling2_inexact = np.PyUFunc_FromFuncAndData(ufunc__stirling2_inexact_loops, ufunc__stirling2_inexact_data, ufunc__stirling2_inexact_types, 2, 2, 1, 0, "_stirling2_inexact", ufunc__stirling2_inexact_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__struve_asymp_large_z_loops[1] +cdef void *ufunc__struve_asymp_large_z_ptr[2] +cdef void *ufunc__struve_asymp_large_z_data[1] +cdef char ufunc__struve_asymp_large_z_types[5] +cdef char *ufunc__struve_asymp_large_z_doc = ( + "_struve_asymp_large_z(v, z, is_h)\n" + "\n" + "Internal function for testing `struve` & `modstruve`\n" + "\n" + "Evaluates using asymptotic expansion\n" + "\n" + "Returns\n" + "-------\n" + "v, err") +ufunc__struve_asymp_large_z_loops[0] = loop_d_ddp_d_As_ddp_dd +ufunc__struve_asymp_large_z_types[0] = NPY_DOUBLE +ufunc__struve_asymp_large_z_types[1] = NPY_DOUBLE +ufunc__struve_asymp_large_z_types[2] = NPY_INTP +ufunc__struve_asymp_large_z_types[3] = NPY_DOUBLE +ufunc__struve_asymp_large_z_types[4] = NPY_DOUBLE +ufunc__struve_asymp_large_z_ptr[2*0] = _func_cephes__struve_asymp_large_z +ufunc__struve_asymp_large_z_ptr[2*0+1] = ("_struve_asymp_large_z") +ufunc__struve_asymp_large_z_data[0] = &ufunc__struve_asymp_large_z_ptr[2*0] +_struve_asymp_large_z = np.PyUFunc_FromFuncAndData(ufunc__struve_asymp_large_z_loops, ufunc__struve_asymp_large_z_data, ufunc__struve_asymp_large_z_types, 1, 3, 2, 0, "_struve_asymp_large_z", ufunc__struve_asymp_large_z_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__struve_bessel_series_loops[1] +cdef void *ufunc__struve_bessel_series_ptr[2] +cdef void *ufunc__struve_bessel_series_data[1] +cdef char ufunc__struve_bessel_series_types[5] +cdef char *ufunc__struve_bessel_series_doc = ( + "_struve_bessel_series(v, z, is_h)\n" + "\n" + "Internal function for testing `struve` & `modstruve`\n" + "\n" + "Evaluates using Bessel function series\n" + "\n" + "Returns\n" + "-------\n" + "v, err") +ufunc__struve_bessel_series_loops[0] = loop_d_ddp_d_As_ddp_dd +ufunc__struve_bessel_series_types[0] = NPY_DOUBLE +ufunc__struve_bessel_series_types[1] = NPY_DOUBLE +ufunc__struve_bessel_series_types[2] = NPY_INTP +ufunc__struve_bessel_series_types[3] = NPY_DOUBLE +ufunc__struve_bessel_series_types[4] = NPY_DOUBLE +ufunc__struve_bessel_series_ptr[2*0] = _func_cephes__struve_bessel_series +ufunc__struve_bessel_series_ptr[2*0+1] = ("_struve_bessel_series") +ufunc__struve_bessel_series_data[0] = &ufunc__struve_bessel_series_ptr[2*0] +_struve_bessel_series = np.PyUFunc_FromFuncAndData(ufunc__struve_bessel_series_loops, ufunc__struve_bessel_series_data, ufunc__struve_bessel_series_types, 1, 3, 2, 0, "_struve_bessel_series", ufunc__struve_bessel_series_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__struve_power_series_loops[1] +cdef void *ufunc__struve_power_series_ptr[2] +cdef void *ufunc__struve_power_series_data[1] +cdef char ufunc__struve_power_series_types[5] +cdef char *ufunc__struve_power_series_doc = ( + "_struve_power_series(v, z, is_h)\n" + "\n" + "Internal function for testing `struve` & `modstruve`\n" + "\n" + "Evaluates using power series\n" + "\n" + "Returns\n" + "-------\n" + "v, err") +ufunc__struve_power_series_loops[0] = loop_d_ddp_d_As_ddp_dd +ufunc__struve_power_series_types[0] = NPY_DOUBLE +ufunc__struve_power_series_types[1] = NPY_DOUBLE +ufunc__struve_power_series_types[2] = NPY_INTP +ufunc__struve_power_series_types[3] = NPY_DOUBLE +ufunc__struve_power_series_types[4] = NPY_DOUBLE +ufunc__struve_power_series_ptr[2*0] = _func_cephes__struve_power_series +ufunc__struve_power_series_ptr[2*0+1] = ("_struve_power_series") +ufunc__struve_power_series_data[0] = &ufunc__struve_power_series_ptr[2*0] +_struve_power_series = np.PyUFunc_FromFuncAndData(ufunc__struve_power_series_loops, ufunc__struve_power_series_data, ufunc__struve_power_series_types, 1, 3, 2, 0, "_struve_power_series", ufunc__struve_power_series_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_agm_loops[2] +cdef void *ufunc_agm_ptr[4] +cdef void *ufunc_agm_data[2] +cdef char ufunc_agm_types[6] +cdef char *ufunc_agm_doc = ( + "agm(a, b, out=None)\n" + "\n" + "Compute the arithmetic-geometric mean of `a` and `b`.\n" + "\n" + "Start with a_0 = a and b_0 = b and iteratively compute::\n" + "\n" + " a_{n+1} = (a_n + b_n)/2\n" + " b_{n+1} = sqrt(a_n*b_n)\n" + "\n" + "a_n and b_n converge to the same limit as n increases; their common\n" + "limit is agm(a, b).\n" + "\n" + "Parameters\n" + "----------\n" + "a, b : array_like\n" + " Real values only. If the values are both negative, the result\n" + " is negative. If one value is negative and the other is positive,\n" + " `nan` is returned.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " The arithmetic-geometric mean of `a` and `b`.\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> from scipy.special import agm\n" + ">>> a, b = 24.0, 6.0\n" + ">>> agm(a, b)\n" + "13.458171481725614\n" + "\n" + "Compare that result to the iteration:\n" + "\n" + ">>> while a != b:\n" + "... a, b = (a + b)/2, np.sqrt(a*b)\n" + "... print(\"a = %19.16f b=%19.16f\" % (a, b))\n" + "...\n" + "a = 15.0000000000000000 b=12.0000000000000000\n" + "a = 13.5000000000000000 b=13.4164078649987388\n" + "a = 13.4582039324993694 b=13.4581390309909850\n" + "a = 13.4581714817451772 b=13.4581714817060547\n" + "a = 13.4581714817256159 b=13.4581714817256159\n" + "\n" + "When array-like arguments are given, broadcasting applies:\n" + "\n" + ">>> a = np.array([[1.5], [3], [6]]) # a has shape (3, 1).\n" + ">>> b = np.array([6, 12, 24, 48]) # b has shape (4,).\n" + ">>> agm(a, b)\n" + "array([[ 3.36454287, 5.42363427, 9.05798751, 15.53650756],\n" + " [ 4.37037309, 6.72908574, 10.84726853, 18.11597502],\n" + " [ 6. , 8.74074619, 13.45817148, 21.69453707]])") +ufunc_agm_loops[0] = loop_d_dd__As_ff_f +ufunc_agm_loops[1] = loop_d_dd__As_dd_d +ufunc_agm_types[0] = NPY_FLOAT +ufunc_agm_types[1] = NPY_FLOAT +ufunc_agm_types[2] = NPY_FLOAT +ufunc_agm_types[3] = NPY_DOUBLE +ufunc_agm_types[4] = NPY_DOUBLE +ufunc_agm_types[5] = NPY_DOUBLE +ufunc_agm_ptr[2*0] = _func_agm +ufunc_agm_ptr[2*0+1] = ("agm") +ufunc_agm_ptr[2*1] = _func_agm +ufunc_agm_ptr[2*1+1] = ("agm") +ufunc_agm_data[0] = &ufunc_agm_ptr[2*0] +ufunc_agm_data[1] = &ufunc_agm_ptr[2*1] +agm = np.PyUFunc_FromFuncAndData(ufunc_agm_loops, ufunc_agm_data, ufunc_agm_types, 2, 2, 1, 0, "agm", ufunc_agm_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_bdtr_loops[3] +cdef void *ufunc_bdtr_ptr[6] +cdef void *ufunc_bdtr_data[3] +cdef char ufunc_bdtr_types[12] +cdef char *ufunc_bdtr_doc = ( + "bdtr(k, n, p, out=None)\n" + "\n" + "Binomial distribution cumulative distribution function.\n" + "\n" + "Sum of the terms 0 through `floor(k)` of the Binomial probability density.\n" + "\n" + ".. math::\n" + " \\mathrm{bdtr}(k, n, p) =\n" + " \\sum_{j=0}^{\\lfloor k \\rfloor} {{n}\\choose{j}} p^j (1-p)^{n-j}\n" + "\n" + "Parameters\n" + "----------\n" + "k : array_like\n" + " Number of successes (double), rounded down to the nearest integer.\n" + "n : array_like\n" + " Number of events (int).\n" + "p : array_like\n" + " Probability of success in a single event (float).\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "y : scalar or ndarray\n" + " Probability of `floor(k)` or fewer successes in `n` independent events with\n" + " success probabilities of `p`.\n" + "\n" + "Notes\n" + "-----\n" + "The terms are not summed directly; instead the regularized incomplete beta\n" + "function is employed, according to the formula,\n" + "\n" + ".. math::\n" + " \\mathrm{bdtr}(k, n, p) =\n" + " I_{1 - p}(n - \\lfloor k \\rfloor, \\lfloor k \\rfloor + 1).\n" + "\n" + "Wrapper for the Cephes [1]_ routine `bdtr`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/") +ufunc_bdtr_loops[0] = loop_d_ddd__As_fff_f +ufunc_bdtr_loops[1] = loop_d_dpd__As_dpd_d +ufunc_bdtr_loops[2] = loop_d_ddd__As_ddd_d +ufunc_bdtr_types[0] = NPY_FLOAT +ufunc_bdtr_types[1] = NPY_FLOAT +ufunc_bdtr_types[2] = NPY_FLOAT +ufunc_bdtr_types[3] = NPY_FLOAT +ufunc_bdtr_types[4] = NPY_DOUBLE +ufunc_bdtr_types[5] = NPY_INTP +ufunc_bdtr_types[6] = NPY_DOUBLE +ufunc_bdtr_types[7] = NPY_DOUBLE +ufunc_bdtr_types[8] = NPY_DOUBLE +ufunc_bdtr_types[9] = NPY_DOUBLE +ufunc_bdtr_types[10] = NPY_DOUBLE +ufunc_bdtr_types[11] = NPY_DOUBLE +ufunc_bdtr_ptr[2*0] = _func_bdtr_unsafe +ufunc_bdtr_ptr[2*0+1] = ("bdtr") +ufunc_bdtr_ptr[2*1] = _func_cephes_bdtr_wrap +ufunc_bdtr_ptr[2*1+1] = ("bdtr") +ufunc_bdtr_ptr[2*2] = _func_bdtr_unsafe +ufunc_bdtr_ptr[2*2+1] = ("bdtr") +ufunc_bdtr_data[0] = &ufunc_bdtr_ptr[2*0] +ufunc_bdtr_data[1] = &ufunc_bdtr_ptr[2*1] +ufunc_bdtr_data[2] = &ufunc_bdtr_ptr[2*2] +bdtr = np.PyUFunc_FromFuncAndData(ufunc_bdtr_loops, ufunc_bdtr_data, ufunc_bdtr_types, 3, 3, 1, 0, "bdtr", ufunc_bdtr_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_bdtrc_loops[3] +cdef void *ufunc_bdtrc_ptr[6] +cdef void *ufunc_bdtrc_data[3] +cdef char ufunc_bdtrc_types[12] +cdef char *ufunc_bdtrc_doc = ( + "bdtrc(k, n, p, out=None)\n" + "\n" + "Binomial distribution survival function.\n" + "\n" + "Sum of the terms `floor(k) + 1` through `n` of the binomial probability\n" + "density,\n" + "\n" + ".. math::\n" + " \\mathrm{bdtrc}(k, n, p) =\n" + " \\sum_{j=\\lfloor k \\rfloor +1}^n {{n}\\choose{j}} p^j (1-p)^{n-j}\n" + "\n" + "Parameters\n" + "----------\n" + "k : array_like\n" + " Number of successes (double), rounded down to nearest integer.\n" + "n : array_like\n" + " Number of events (int)\n" + "p : array_like\n" + " Probability of success in a single event.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "y : scalar or ndarray\n" + " Probability of `floor(k) + 1` or more successes in `n` independent\n" + " events with success probabilities of `p`.\n" + "\n" + "See Also\n" + "--------\n" + "bdtr\n" + "betainc\n" + "\n" + "Notes\n" + "-----\n" + "The terms are not summed directly; instead the regularized incomplete beta\n" + "function is employed, according to the formula,\n" + "\n" + ".. math::\n" + " \\mathrm{bdtrc}(k, n, p) = I_{p}(\\lfloor k \\rfloor + 1, n - \\lfloor k \\rfloor).\n" + "\n" + "Wrapper for the Cephes [1]_ routine `bdtrc`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/") +ufunc_bdtrc_loops[0] = loop_d_ddd__As_fff_f +ufunc_bdtrc_loops[1] = loop_d_dpd__As_dpd_d +ufunc_bdtrc_loops[2] = loop_d_ddd__As_ddd_d +ufunc_bdtrc_types[0] = NPY_FLOAT +ufunc_bdtrc_types[1] = NPY_FLOAT +ufunc_bdtrc_types[2] = NPY_FLOAT +ufunc_bdtrc_types[3] = NPY_FLOAT +ufunc_bdtrc_types[4] = NPY_DOUBLE +ufunc_bdtrc_types[5] = NPY_INTP +ufunc_bdtrc_types[6] = NPY_DOUBLE +ufunc_bdtrc_types[7] = NPY_DOUBLE +ufunc_bdtrc_types[8] = NPY_DOUBLE +ufunc_bdtrc_types[9] = NPY_DOUBLE +ufunc_bdtrc_types[10] = NPY_DOUBLE +ufunc_bdtrc_types[11] = NPY_DOUBLE +ufunc_bdtrc_ptr[2*0] = _func_bdtrc_unsafe +ufunc_bdtrc_ptr[2*0+1] = ("bdtrc") +ufunc_bdtrc_ptr[2*1] = _func_cephes_bdtrc_wrap +ufunc_bdtrc_ptr[2*1+1] = ("bdtrc") +ufunc_bdtrc_ptr[2*2] = _func_bdtrc_unsafe +ufunc_bdtrc_ptr[2*2+1] = ("bdtrc") +ufunc_bdtrc_data[0] = &ufunc_bdtrc_ptr[2*0] +ufunc_bdtrc_data[1] = &ufunc_bdtrc_ptr[2*1] +ufunc_bdtrc_data[2] = &ufunc_bdtrc_ptr[2*2] +bdtrc = np.PyUFunc_FromFuncAndData(ufunc_bdtrc_loops, ufunc_bdtrc_data, ufunc_bdtrc_types, 3, 3, 1, 0, "bdtrc", ufunc_bdtrc_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_bdtri_loops[3] +cdef void *ufunc_bdtri_ptr[6] +cdef void *ufunc_bdtri_data[3] +cdef char ufunc_bdtri_types[12] +cdef char *ufunc_bdtri_doc = ( + "bdtri(k, n, y, out=None)\n" + "\n" + "Inverse function to `bdtr` with respect to `p`.\n" + "\n" + "Finds the event probability `p` such that the sum of the terms 0 through\n" + "`k` of the binomial probability density is equal to the given cumulative\n" + "probability `y`.\n" + "\n" + "Parameters\n" + "----------\n" + "k : array_like\n" + " Number of successes (float), rounded down to the nearest integer.\n" + "n : array_like\n" + " Number of events (float)\n" + "y : array_like\n" + " Cumulative probability (probability of `k` or fewer successes in `n`\n" + " events).\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "p : scalar or ndarray\n" + " The event probability such that `bdtr(\\lfloor k \\rfloor, n, p) = y`.\n" + "\n" + "See Also\n" + "--------\n" + "bdtr\n" + "betaincinv\n" + "\n" + "Notes\n" + "-----\n" + "The computation is carried out using the inverse beta integral function\n" + "and the relation,::\n" + "\n" + " 1 - p = betaincinv(n - k, k + 1, y).\n" + "\n" + "Wrapper for the Cephes [1]_ routine `bdtri`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/") +ufunc_bdtri_loops[0] = loop_d_ddd__As_fff_f +ufunc_bdtri_loops[1] = loop_d_dpd__As_dpd_d +ufunc_bdtri_loops[2] = loop_d_ddd__As_ddd_d +ufunc_bdtri_types[0] = NPY_FLOAT +ufunc_bdtri_types[1] = NPY_FLOAT +ufunc_bdtri_types[2] = NPY_FLOAT +ufunc_bdtri_types[3] = NPY_FLOAT +ufunc_bdtri_types[4] = NPY_DOUBLE +ufunc_bdtri_types[5] = NPY_INTP +ufunc_bdtri_types[6] = NPY_DOUBLE +ufunc_bdtri_types[7] = NPY_DOUBLE +ufunc_bdtri_types[8] = NPY_DOUBLE +ufunc_bdtri_types[9] = NPY_DOUBLE +ufunc_bdtri_types[10] = NPY_DOUBLE +ufunc_bdtri_types[11] = NPY_DOUBLE +ufunc_bdtri_ptr[2*0] = _func_bdtri_unsafe +ufunc_bdtri_ptr[2*0+1] = ("bdtri") +ufunc_bdtri_ptr[2*1] = _func_cephes_bdtri_wrap +ufunc_bdtri_ptr[2*1+1] = ("bdtri") +ufunc_bdtri_ptr[2*2] = _func_bdtri_unsafe +ufunc_bdtri_ptr[2*2+1] = ("bdtri") +ufunc_bdtri_data[0] = &ufunc_bdtri_ptr[2*0] +ufunc_bdtri_data[1] = &ufunc_bdtri_ptr[2*1] +ufunc_bdtri_data[2] = &ufunc_bdtri_ptr[2*2] +bdtri = np.PyUFunc_FromFuncAndData(ufunc_bdtri_loops, ufunc_bdtri_data, ufunc_bdtri_types, 3, 3, 1, 0, "bdtri", ufunc_bdtri_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_bdtrik_loops[2] +cdef void *ufunc_bdtrik_ptr[4] +cdef void *ufunc_bdtrik_data[2] +cdef char ufunc_bdtrik_types[8] +cdef char *ufunc_bdtrik_doc = ( + "bdtrik(y, n, p, out=None)\n" + "\n" + "Inverse function to `bdtr` with respect to `k`.\n" + "\n" + "Finds the number of successes `k` such that the sum of the terms 0 through\n" + "`k` of the Binomial probability density for `n` events with probability\n" + "`p` is equal to the given cumulative probability `y`.\n" + "\n" + "Parameters\n" + "----------\n" + "y : array_like\n" + " Cumulative probability (probability of `k` or fewer successes in `n`\n" + " events).\n" + "n : array_like\n" + " Number of events (float).\n" + "p : array_like\n" + " Success probability (float).\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "k : scalar or ndarray\n" + " The number of successes `k` such that `bdtr(k, n, p) = y`.\n" + "\n" + "See Also\n" + "--------\n" + "bdtr\n" + "\n" + "Notes\n" + "-----\n" + "Formula 26.5.24 of [1]_ is used to reduce the binomial distribution to the\n" + "cumulative incomplete beta distribution.\n" + "\n" + "Computation of `k` involves a search for a value that produces the desired\n" + "value of `y`. The search relies on the monotonicity of `y` with `k`.\n" + "\n" + "Wrapper for the CDFLIB [2]_ Fortran routine `cdfbin`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.\n" + ".. [2] Barry Brown, James Lovato, and Kathy Russell,\n" + " CDFLIB: Library of Fortran Routines for Cumulative Distribution\n" + " Functions, Inverses, and Other Parameters.") +ufunc_bdtrik_loops[0] = loop_d_ddd__As_fff_f +ufunc_bdtrik_loops[1] = loop_d_ddd__As_ddd_d +ufunc_bdtrik_types[0] = NPY_FLOAT +ufunc_bdtrik_types[1] = NPY_FLOAT +ufunc_bdtrik_types[2] = NPY_FLOAT +ufunc_bdtrik_types[3] = NPY_FLOAT +ufunc_bdtrik_types[4] = NPY_DOUBLE +ufunc_bdtrik_types[5] = NPY_DOUBLE +ufunc_bdtrik_types[6] = NPY_DOUBLE +ufunc_bdtrik_types[7] = NPY_DOUBLE +ufunc_bdtrik_ptr[2*0] = _func_bdtrik +ufunc_bdtrik_ptr[2*0+1] = ("bdtrik") +ufunc_bdtrik_ptr[2*1] = _func_bdtrik +ufunc_bdtrik_ptr[2*1+1] = ("bdtrik") +ufunc_bdtrik_data[0] = &ufunc_bdtrik_ptr[2*0] +ufunc_bdtrik_data[1] = &ufunc_bdtrik_ptr[2*1] +bdtrik = np.PyUFunc_FromFuncAndData(ufunc_bdtrik_loops, ufunc_bdtrik_data, ufunc_bdtrik_types, 2, 3, 1, 0, "bdtrik", ufunc_bdtrik_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_bdtrin_loops[2] +cdef void *ufunc_bdtrin_ptr[4] +cdef void *ufunc_bdtrin_data[2] +cdef char ufunc_bdtrin_types[8] +cdef char *ufunc_bdtrin_doc = ( + "bdtrin(k, y, p, out=None)\n" + "\n" + "Inverse function to `bdtr` with respect to `n`.\n" + "\n" + "Finds the number of events `n` such that the sum of the terms 0 through\n" + "`k` of the Binomial probability density for events with probability `p` is\n" + "equal to the given cumulative probability `y`.\n" + "\n" + "Parameters\n" + "----------\n" + "k : array_like\n" + " Number of successes (float).\n" + "y : array_like\n" + " Cumulative probability (probability of `k` or fewer successes in `n`\n" + " events).\n" + "p : array_like\n" + " Success probability (float).\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "n : scalar or ndarray\n" + " The number of events `n` such that `bdtr(k, n, p) = y`.\n" + "\n" + "See Also\n" + "--------\n" + "bdtr\n" + "\n" + "Notes\n" + "-----\n" + "Formula 26.5.24 of [1]_ is used to reduce the binomial distribution to the\n" + "cumulative incomplete beta distribution.\n" + "\n" + "Computation of `n` involves a search for a value that produces the desired\n" + "value of `y`. The search relies on the monotonicity of `y` with `n`.\n" + "\n" + "Wrapper for the CDFLIB [2]_ Fortran routine `cdfbin`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.\n" + ".. [2] Barry Brown, James Lovato, and Kathy Russell,\n" + " CDFLIB: Library of Fortran Routines for Cumulative Distribution\n" + " Functions, Inverses, and Other Parameters.") +ufunc_bdtrin_loops[0] = loop_d_ddd__As_fff_f +ufunc_bdtrin_loops[1] = loop_d_ddd__As_ddd_d +ufunc_bdtrin_types[0] = NPY_FLOAT +ufunc_bdtrin_types[1] = NPY_FLOAT +ufunc_bdtrin_types[2] = NPY_FLOAT +ufunc_bdtrin_types[3] = NPY_FLOAT +ufunc_bdtrin_types[4] = NPY_DOUBLE +ufunc_bdtrin_types[5] = NPY_DOUBLE +ufunc_bdtrin_types[6] = NPY_DOUBLE +ufunc_bdtrin_types[7] = NPY_DOUBLE +ufunc_bdtrin_ptr[2*0] = _func_bdtrin +ufunc_bdtrin_ptr[2*0+1] = ("bdtrin") +ufunc_bdtrin_ptr[2*1] = _func_bdtrin +ufunc_bdtrin_ptr[2*1+1] = ("bdtrin") +ufunc_bdtrin_data[0] = &ufunc_bdtrin_ptr[2*0] +ufunc_bdtrin_data[1] = &ufunc_bdtrin_ptr[2*1] +bdtrin = np.PyUFunc_FromFuncAndData(ufunc_bdtrin_loops, ufunc_bdtrin_data, ufunc_bdtrin_types, 2, 3, 1, 0, "bdtrin", ufunc_bdtrin_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_besselpoly_loops[2] +cdef void *ufunc_besselpoly_ptr[4] +cdef void *ufunc_besselpoly_data[2] +cdef char ufunc_besselpoly_types[8] +cdef char *ufunc_besselpoly_doc = ( + "besselpoly(a, lmb, nu, out=None)\n" + "\n" + "Weighted integral of the Bessel function of the first kind.\n" + "\n" + "Computes\n" + "\n" + ".. math::\n" + "\n" + " \\int_0^1 x^\\lambda J_\\nu(2 a x) \\, dx\n" + "\n" + "where :math:`J_\\nu` is a Bessel function and :math:`\\lambda=lmb`,\n" + ":math:`\\nu=nu`.\n" + "\n" + "Parameters\n" + "----------\n" + "a : array_like\n" + " Scale factor inside the Bessel function.\n" + "lmb : array_like\n" + " Power of `x`\n" + "nu : array_like\n" + " Order of the Bessel function.\n" + "out : ndarray, optional\n" + " Optional output array for the function results.\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Value of the integral.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + "\n" + "Examples\n" + "--------\n" + "Evaluate the function for one parameter set.\n" + "\n" + ">>> from scipy.special import besselpoly\n" + ">>> besselpoly(1, 1, 1)\n" + "0.24449718372863877\n" + "\n" + "Evaluate the function for different scale factors.\n" + "\n" + ">>> import numpy as np\n" + ">>> factors = np.array([0., 3., 6.])\n" + ">>> besselpoly(factors, 1, 1)\n" + "array([ 0. , -0.00549029, 0.00140174])\n" + "\n" + "Plot the function for varying powers, orders and scales.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> fig, ax = plt.subplots()\n" + ">>> powers = np.linspace(0, 10, 100)\n" + ">>> orders = [1, 2, 3]\n" + ">>> scales = [1, 2]\n" + ">>> all_combinations = [(order, scale) for order in orders\n" + "... for scale in scales]\n" + ">>> for order, scale in all_combinations:\n" + "... ax.plot(powers, besselpoly(scale, powers, order),\n" + "... label=rf\"$\\nu={order}, a={scale}$\")\n" + ">>> ax.legend()\n" + ">>> ax.set_xlabel(r\"$\\lambda$\")\n" + ">>> ax.set_ylabel(r\"$\\int_0^1 x^{\\lambda} J_{\\nu}(2ax)\\,dx$\")\n" + ">>> plt.show()") +ufunc_besselpoly_loops[0] = loop_d_ddd__As_fff_f +ufunc_besselpoly_loops[1] = loop_d_ddd__As_ddd_d +ufunc_besselpoly_types[0] = NPY_FLOAT +ufunc_besselpoly_types[1] = NPY_FLOAT +ufunc_besselpoly_types[2] = NPY_FLOAT +ufunc_besselpoly_types[3] = NPY_FLOAT +ufunc_besselpoly_types[4] = NPY_DOUBLE +ufunc_besselpoly_types[5] = NPY_DOUBLE +ufunc_besselpoly_types[6] = NPY_DOUBLE +ufunc_besselpoly_types[7] = NPY_DOUBLE +ufunc_besselpoly_ptr[2*0] = _func_cephes_besselpoly +ufunc_besselpoly_ptr[2*0+1] = ("besselpoly") +ufunc_besselpoly_ptr[2*1] = _func_cephes_besselpoly +ufunc_besselpoly_ptr[2*1+1] = ("besselpoly") +ufunc_besselpoly_data[0] = &ufunc_besselpoly_ptr[2*0] +ufunc_besselpoly_data[1] = &ufunc_besselpoly_ptr[2*1] +besselpoly = np.PyUFunc_FromFuncAndData(ufunc_besselpoly_loops, ufunc_besselpoly_data, ufunc_besselpoly_types, 2, 3, 1, 0, "besselpoly", ufunc_besselpoly_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_beta_loops[2] +cdef void *ufunc_beta_ptr[4] +cdef void *ufunc_beta_data[2] +cdef char ufunc_beta_types[6] +cdef char *ufunc_beta_doc = ( + "beta(a, b, out=None)\n" + "\n" + "Beta function.\n" + "\n" + "This function is defined in [1]_ as\n" + "\n" + ".. math::\n" + "\n" + " B(a, b) = \\int_0^1 t^{a-1}(1-t)^{b-1}dt\n" + " = \\frac{\\Gamma(a)\\Gamma(b)}{\\Gamma(a+b)},\n" + "\n" + "where :math:`\\Gamma` is the gamma function.\n" + "\n" + "Parameters\n" + "----------\n" + "a, b : array_like\n" + " Real-valued arguments\n" + "out : ndarray, optional\n" + " Optional output array for the function result\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Value of the beta function\n" + "\n" + "See Also\n" + "--------\n" + "gamma : the gamma function\n" + "betainc : the regularized incomplete beta function\n" + "betaln : the natural logarithm of the absolute\n" + " value of the beta function\n" + "\n" + "References\n" + "----------\n" + ".. [1] NIST Digital Library of Mathematical Functions,\n" + " Eq. 5.12.1. https://dlmf.nist.gov/5.12\n" + "\n" + "Examples\n" + "--------\n" + ">>> import scipy.special as sc\n" + "\n" + "The beta function relates to the gamma function by the\n" + "definition given above:\n" + "\n" + ">>> sc.beta(2, 3)\n" + "0.08333333333333333\n" + ">>> sc.gamma(2)*sc.gamma(3)/sc.gamma(2 + 3)\n" + "0.08333333333333333\n" + "\n" + "As this relationship demonstrates, the beta function\n" + "is symmetric:\n" + "\n" + ">>> sc.beta(1.7, 2.4)\n" + "0.16567527689031739\n" + ">>> sc.beta(2.4, 1.7)\n" + "0.16567527689031739\n" + "\n" + "This function satisfies :math:`B(1, b) = 1/b`:\n" + "\n" + ">>> sc.beta(1, 4)\n" + "0.25") +ufunc_beta_loops[0] = loop_d_dd__As_ff_f +ufunc_beta_loops[1] = loop_d_dd__As_dd_d +ufunc_beta_types[0] = NPY_FLOAT +ufunc_beta_types[1] = NPY_FLOAT +ufunc_beta_types[2] = NPY_FLOAT +ufunc_beta_types[3] = NPY_DOUBLE +ufunc_beta_types[4] = NPY_DOUBLE +ufunc_beta_types[5] = NPY_DOUBLE +ufunc_beta_ptr[2*0] = _func_cephes_beta +ufunc_beta_ptr[2*0+1] = ("beta") +ufunc_beta_ptr[2*1] = _func_cephes_beta +ufunc_beta_ptr[2*1+1] = ("beta") +ufunc_beta_data[0] = &ufunc_beta_ptr[2*0] +ufunc_beta_data[1] = &ufunc_beta_ptr[2*1] +beta = np.PyUFunc_FromFuncAndData(ufunc_beta_loops, ufunc_beta_data, ufunc_beta_types, 2, 2, 1, 0, "beta", ufunc_beta_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_betainc_loops[2] +cdef void *ufunc_betainc_ptr[4] +cdef void *ufunc_betainc_data[2] +cdef char ufunc_betainc_types[8] +cdef char *ufunc_betainc_doc = ( + "betainc(a, b, x, out=None)\n" + "\n" + "Regularized incomplete beta function.\n" + "\n" + "Computes the regularized incomplete beta function, defined as [1]_:\n" + "\n" + ".. math::\n" + "\n" + " I_x(a, b) = \\frac{\\Gamma(a+b)}{\\Gamma(a)\\Gamma(b)} \\int_0^x\n" + " t^{a-1}(1-t)^{b-1}dt,\n" + "\n" + "for :math:`0 \\leq x \\leq 1`.\n" + "\n" + "This function is the cumulative distribution function for the beta\n" + "distribution; its range is [0, 1].\n" + "\n" + "Parameters\n" + "----------\n" + "a, b : array_like\n" + " Positive, real-valued parameters\n" + "x : array_like\n" + " Real-valued such that :math:`0 \\leq x \\leq 1`,\n" + " the upper limit of integration\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Value of the regularized incomplete beta function\n" + "\n" + "See Also\n" + "--------\n" + "beta : beta function\n" + "betaincinv : inverse of the regularized incomplete beta function\n" + "betaincc : complement of the regularized incomplete beta function\n" + "scipy.stats.beta : beta distribution\n" + "\n" + "Notes\n" + "-----\n" + "The term *regularized* in the name of this function refers to the\n" + "scaling of the function by the gamma function terms shown in the\n" + "formula. When not qualified as *regularized*, the name *incomplete\n" + "beta function* often refers to just the integral expression,\n" + "without the gamma terms. One can use the function `beta` from\n" + "`scipy.special` to get this \"nonregularized\" incomplete beta\n" + "function by multiplying the result of ``betainc(a, b, x)`` by\n" + "``beta(a, b)``.\n" + "\n" + "References\n" + "----------\n" + ".. [1] NIST Digital Library of Mathematical Functions\n" + " https://dlmf.nist.gov/8.17\n" + "\n" + "Examples\n" + "--------\n" + "\n" + "Let :math:`B(a, b)` be the `beta` function.\n" + "\n" + ">>> import scipy.special as sc\n" + "\n" + "The coefficient in terms of `gamma` is equal to\n" + ":math:`1/B(a, b)`. Also, when :math:`x=1`\n" + "the integral is equal to :math:`B(a, b)`.\n" + "Therefore, :math:`I_{x=1}(a, b) = 1` for any :math:`a, b`.\n" + "\n" + ">>> sc.betainc(0.2, 3.5, 1.0)\n" + "1.0\n" + "\n" + "It satisfies\n" + ":math:`I_x(a, b) = x^a F(a, 1-b, a+1, x)/ (aB(a, b))`,\n" + "where :math:`F` is the hypergeometric function `hyp2f1`:\n" + "\n" + ">>> a, b, x = 1.4, 3.1, 0.5\n" + ">>> x**a * sc.hyp2f1(a, 1 - b, a + 1, x)/(a * sc.beta(a, b))\n" + "0.8148904036225295\n" + ">>> sc.betainc(a, b, x)\n" + "0.8148904036225296\n" + "\n" + "This functions satisfies the relationship\n" + ":math:`I_x(a, b) = 1 - I_{1-x}(b, a)`:\n" + "\n" + ">>> sc.betainc(2.2, 3.1, 0.4)\n" + "0.49339638807619446\n" + ">>> 1 - sc.betainc(3.1, 2.2, 1 - 0.4)\n" + "0.49339638807619446") +ufunc_betainc_loops[0] = loop_f_fff__As_fff_f +ufunc_betainc_loops[1] = loop_d_ddd__As_ddd_d +ufunc_betainc_types[0] = NPY_FLOAT +ufunc_betainc_types[1] = NPY_FLOAT +ufunc_betainc_types[2] = NPY_FLOAT +ufunc_betainc_types[3] = NPY_FLOAT +ufunc_betainc_types[4] = NPY_DOUBLE +ufunc_betainc_types[5] = NPY_DOUBLE +ufunc_betainc_types[6] = NPY_DOUBLE +ufunc_betainc_types[7] = NPY_DOUBLE +ufunc_betainc_ptr[2*0] = scipy.special._ufuncs_cxx._export_ibeta_float +ufunc_betainc_ptr[2*0+1] = ("betainc") +ufunc_betainc_ptr[2*1] = scipy.special._ufuncs_cxx._export_ibeta_double +ufunc_betainc_ptr[2*1+1] = ("betainc") +ufunc_betainc_data[0] = &ufunc_betainc_ptr[2*0] +ufunc_betainc_data[1] = &ufunc_betainc_ptr[2*1] +betainc = np.PyUFunc_FromFuncAndData(ufunc_betainc_loops, ufunc_betainc_data, ufunc_betainc_types, 2, 3, 1, 0, "betainc", ufunc_betainc_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_betaincc_loops[2] +cdef void *ufunc_betaincc_ptr[4] +cdef void *ufunc_betaincc_data[2] +cdef char ufunc_betaincc_types[8] +cdef char *ufunc_betaincc_doc = ( + "betaincc(a, b, x, out=None)\n" + "\n" + "Complement of the regularized incomplete beta function.\n" + "\n" + "Computes the complement of the regularized incomplete beta function,\n" + "defined as [1]_:\n" + "\n" + ".. math::\n" + "\n" + " \\bar{I}_x(a, b) = 1 - I_x(a, b)\n" + " = 1 - \\frac{\\Gamma(a+b)}{\\Gamma(a)\\Gamma(b)} \\int_0^x\n" + " t^{a-1}(1-t)^{b-1}dt,\n" + "\n" + "for :math:`0 \\leq x \\leq 1`.\n" + "\n" + "Parameters\n" + "----------\n" + "a, b : array_like\n" + " Positive, real-valued parameters\n" + "x : array_like\n" + " Real-valued such that :math:`0 \\leq x \\leq 1`,\n" + " the upper limit of integration\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Value of the regularized incomplete beta function\n" + "\n" + "See Also\n" + "--------\n" + "betainc : regularized incomplete beta function\n" + "betaincinv : inverse of the regularized incomplete beta function\n" + "betainccinv :\n" + " inverse of the complement of the regularized incomplete beta function\n" + "beta : beta function\n" + "scipy.stats.beta : beta distribution\n" + "\n" + "Notes\n" + "-----\n" + ".. versionadded:: 1.11.0\n" + "\n" + "References\n" + "----------\n" + ".. [1] NIST Digital Library of Mathematical Functions\n" + " https://dlmf.nist.gov/8.17\n" + "\n" + "Examples\n" + "--------\n" + ">>> from scipy.special import betaincc, betainc\n" + "\n" + "The naive calculation ``1 - betainc(a, b, x)`` loses precision when\n" + "the values of ``betainc(a, b, x)`` are close to 1:\n" + "\n" + ">>> 1 - betainc(0.5, 8, [0.9, 0.99, 0.999])\n" + "array([2.0574632e-09, 0.0000000e+00, 0.0000000e+00])\n" + "\n" + "By using ``betaincc``, we get the correct values:\n" + "\n" + ">>> betaincc(0.5, 8, [0.9, 0.99, 0.999])\n" + "array([2.05746321e-09, 1.97259354e-17, 1.96467954e-25])") +ufunc_betaincc_loops[0] = loop_f_fff__As_fff_f +ufunc_betaincc_loops[1] = loop_d_ddd__As_ddd_d +ufunc_betaincc_types[0] = NPY_FLOAT +ufunc_betaincc_types[1] = NPY_FLOAT +ufunc_betaincc_types[2] = NPY_FLOAT +ufunc_betaincc_types[3] = NPY_FLOAT +ufunc_betaincc_types[4] = NPY_DOUBLE +ufunc_betaincc_types[5] = NPY_DOUBLE +ufunc_betaincc_types[6] = NPY_DOUBLE +ufunc_betaincc_types[7] = NPY_DOUBLE +ufunc_betaincc_ptr[2*0] = scipy.special._ufuncs_cxx._export_ibetac_float +ufunc_betaincc_ptr[2*0+1] = ("betaincc") +ufunc_betaincc_ptr[2*1] = scipy.special._ufuncs_cxx._export_ibetac_double +ufunc_betaincc_ptr[2*1+1] = ("betaincc") +ufunc_betaincc_data[0] = &ufunc_betaincc_ptr[2*0] +ufunc_betaincc_data[1] = &ufunc_betaincc_ptr[2*1] +betaincc = np.PyUFunc_FromFuncAndData(ufunc_betaincc_loops, ufunc_betaincc_data, ufunc_betaincc_types, 2, 3, 1, 0, "betaincc", ufunc_betaincc_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_betainccinv_loops[2] +cdef void *ufunc_betainccinv_ptr[4] +cdef void *ufunc_betainccinv_data[2] +cdef char ufunc_betainccinv_types[8] +cdef char *ufunc_betainccinv_doc = ( + "betainccinv(a, b, y, out=None)\n" + "\n" + "Inverse of the complemented regularized incomplete beta function.\n" + "\n" + "Computes :math:`x` such that:\n" + "\n" + ".. math::\n" + "\n" + " y = 1 - I_x(a, b) = 1 - \\frac{\\Gamma(a+b)}{\\Gamma(a)\\Gamma(b)}\n" + " \\int_0^x t^{a-1}(1-t)^{b-1}dt,\n" + "\n" + "where :math:`I_x` is the normalized incomplete beta function `betainc`\n" + "and :math:`\\Gamma` is the `gamma` function [1]_.\n" + "\n" + "Parameters\n" + "----------\n" + "a, b : array_like\n" + " Positive, real-valued parameters\n" + "y : array_like\n" + " Real-valued input\n" + "out : ndarray, optional\n" + " Optional output array for function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Value of the inverse of the regularized incomplete beta function\n" + "\n" + "See Also\n" + "--------\n" + "betainc : regularized incomplete beta function\n" + "betaincc : complement of the regularized incomplete beta function\n" + "\n" + "Notes\n" + "-----\n" + ".. versionadded:: 1.11.0\n" + "\n" + "References\n" + "----------\n" + ".. [1] NIST Digital Library of Mathematical Functions\n" + " https://dlmf.nist.gov/8.17\n" + "\n" + "Examples\n" + "--------\n" + ">>> from scipy.special import betainccinv, betaincc\n" + "\n" + "This function is the inverse of `betaincc` for fixed\n" + "values of :math:`a` and :math:`b`.\n" + "\n" + ">>> a, b = 1.2, 3.1\n" + ">>> y = betaincc(a, b, 0.2)\n" + ">>> betainccinv(a, b, y)\n" + "0.2\n" + "\n" + ">>> a, b = 7, 2.5\n" + ">>> x = betainccinv(a, b, 0.875)\n" + ">>> betaincc(a, b, x)\n" + "0.875") +ufunc_betainccinv_loops[0] = loop_f_fff__As_fff_f +ufunc_betainccinv_loops[1] = loop_d_ddd__As_ddd_d +ufunc_betainccinv_types[0] = NPY_FLOAT +ufunc_betainccinv_types[1] = NPY_FLOAT +ufunc_betainccinv_types[2] = NPY_FLOAT +ufunc_betainccinv_types[3] = NPY_FLOAT +ufunc_betainccinv_types[4] = NPY_DOUBLE +ufunc_betainccinv_types[5] = NPY_DOUBLE +ufunc_betainccinv_types[6] = NPY_DOUBLE +ufunc_betainccinv_types[7] = NPY_DOUBLE +ufunc_betainccinv_ptr[2*0] = scipy.special._ufuncs_cxx._export_ibetac_inv_float +ufunc_betainccinv_ptr[2*0+1] = ("betainccinv") +ufunc_betainccinv_ptr[2*1] = scipy.special._ufuncs_cxx._export_ibetac_inv_double +ufunc_betainccinv_ptr[2*1+1] = ("betainccinv") +ufunc_betainccinv_data[0] = &ufunc_betainccinv_ptr[2*0] +ufunc_betainccinv_data[1] = &ufunc_betainccinv_ptr[2*1] +betainccinv = np.PyUFunc_FromFuncAndData(ufunc_betainccinv_loops, ufunc_betainccinv_data, ufunc_betainccinv_types, 2, 3, 1, 0, "betainccinv", ufunc_betainccinv_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_betaincinv_loops[2] +cdef void *ufunc_betaincinv_ptr[4] +cdef void *ufunc_betaincinv_data[2] +cdef char ufunc_betaincinv_types[8] +cdef char *ufunc_betaincinv_doc = ( + "betaincinv(a, b, y, out=None)\n" + "\n" + "Inverse of the regularized incomplete beta function.\n" + "\n" + "Computes :math:`x` such that:\n" + "\n" + ".. math::\n" + "\n" + " y = I_x(a, b) = \\frac{\\Gamma(a+b)}{\\Gamma(a)\\Gamma(b)}\n" + " \\int_0^x t^{a-1}(1-t)^{b-1}dt,\n" + "\n" + "where :math:`I_x` is the normalized incomplete beta function `betainc`\n" + "and :math:`\\Gamma` is the `gamma` function [1]_.\n" + "\n" + "Parameters\n" + "----------\n" + "a, b : array_like\n" + " Positive, real-valued parameters\n" + "y : array_like\n" + " Real-valued input\n" + "out : ndarray, optional\n" + " Optional output array for function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Value of the inverse of the regularized incomplete beta function\n" + "\n" + "See Also\n" + "--------\n" + "betainc : regularized incomplete beta function\n" + "gamma : gamma function\n" + "\n" + "References\n" + "----------\n" + ".. [1] NIST Digital Library of Mathematical Functions\n" + " https://dlmf.nist.gov/8.17\n" + "\n" + "Examples\n" + "--------\n" + ">>> import scipy.special as sc\n" + "\n" + "This function is the inverse of `betainc` for fixed\n" + "values of :math:`a` and :math:`b`.\n" + "\n" + ">>> a, b = 1.2, 3.1\n" + ">>> y = sc.betainc(a, b, 0.2)\n" + ">>> sc.betaincinv(a, b, y)\n" + "0.2\n" + ">>>\n" + ">>> a, b = 7.5, 0.4\n" + ">>> x = sc.betaincinv(a, b, 0.5)\n" + ">>> sc.betainc(a, b, x)\n" + "0.5") +ufunc_betaincinv_loops[0] = loop_f_fff__As_fff_f +ufunc_betaincinv_loops[1] = loop_d_ddd__As_ddd_d +ufunc_betaincinv_types[0] = NPY_FLOAT +ufunc_betaincinv_types[1] = NPY_FLOAT +ufunc_betaincinv_types[2] = NPY_FLOAT +ufunc_betaincinv_types[3] = NPY_FLOAT +ufunc_betaincinv_types[4] = NPY_DOUBLE +ufunc_betaincinv_types[5] = NPY_DOUBLE +ufunc_betaincinv_types[6] = NPY_DOUBLE +ufunc_betaincinv_types[7] = NPY_DOUBLE +ufunc_betaincinv_ptr[2*0] = scipy.special._ufuncs_cxx._export_ibeta_inv_float +ufunc_betaincinv_ptr[2*0+1] = ("betaincinv") +ufunc_betaincinv_ptr[2*1] = scipy.special._ufuncs_cxx._export_ibeta_inv_double +ufunc_betaincinv_ptr[2*1+1] = ("betaincinv") +ufunc_betaincinv_data[0] = &ufunc_betaincinv_ptr[2*0] +ufunc_betaincinv_data[1] = &ufunc_betaincinv_ptr[2*1] +betaincinv = np.PyUFunc_FromFuncAndData(ufunc_betaincinv_loops, ufunc_betaincinv_data, ufunc_betaincinv_types, 2, 3, 1, 0, "betaincinv", ufunc_betaincinv_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_betaln_loops[2] +cdef void *ufunc_betaln_ptr[4] +cdef void *ufunc_betaln_data[2] +cdef char ufunc_betaln_types[6] +cdef char *ufunc_betaln_doc = ( + "betaln(a, b, out=None)\n" + "\n" + "Natural logarithm of absolute value of beta function.\n" + "\n" + "Computes ``ln(abs(beta(a, b)))``.\n" + "\n" + "Parameters\n" + "----------\n" + "a, b : array_like\n" + " Positive, real-valued parameters\n" + "out : ndarray, optional\n" + " Optional output array for function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Value of the betaln function\n" + "\n" + "See Also\n" + "--------\n" + "gamma : the gamma function\n" + "betainc : the regularized incomplete beta function\n" + "beta : the beta function\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> from scipy.special import betaln, beta\n" + "\n" + "Verify that, for moderate values of ``a`` and ``b``, ``betaln(a, b)``\n" + "is the same as ``log(beta(a, b))``:\n" + "\n" + ">>> betaln(3, 4)\n" + "-4.0943445622221\n" + "\n" + ">>> np.log(beta(3, 4))\n" + "-4.0943445622221\n" + "\n" + "In the following ``beta(a, b)`` underflows to 0, so we can't compute\n" + "the logarithm of the actual value.\n" + "\n" + ">>> a = 400\n" + ">>> b = 900\n" + ">>> beta(a, b)\n" + "0.0\n" + "\n" + "We can compute the logarithm of ``beta(a, b)`` by using `betaln`:\n" + "\n" + ">>> betaln(a, b)\n" + "-804.3069951764146") +ufunc_betaln_loops[0] = loop_d_dd__As_ff_f +ufunc_betaln_loops[1] = loop_d_dd__As_dd_d +ufunc_betaln_types[0] = NPY_FLOAT +ufunc_betaln_types[1] = NPY_FLOAT +ufunc_betaln_types[2] = NPY_FLOAT +ufunc_betaln_types[3] = NPY_DOUBLE +ufunc_betaln_types[4] = NPY_DOUBLE +ufunc_betaln_types[5] = NPY_DOUBLE +ufunc_betaln_ptr[2*0] = _func_cephes_lbeta +ufunc_betaln_ptr[2*0+1] = ("betaln") +ufunc_betaln_ptr[2*1] = _func_cephes_lbeta +ufunc_betaln_ptr[2*1+1] = ("betaln") +ufunc_betaln_data[0] = &ufunc_betaln_ptr[2*0] +ufunc_betaln_data[1] = &ufunc_betaln_ptr[2*1] +betaln = np.PyUFunc_FromFuncAndData(ufunc_betaln_loops, ufunc_betaln_data, ufunc_betaln_types, 2, 2, 1, 0, "betaln", ufunc_betaln_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_boxcox_loops[2] +cdef void *ufunc_boxcox_ptr[4] +cdef void *ufunc_boxcox_data[2] +cdef char ufunc_boxcox_types[6] +cdef char *ufunc_boxcox_doc = ( + "boxcox(x, lmbda, out=None)\n" + "\n" + "Compute the Box-Cox transformation.\n" + "\n" + "The Box-Cox transformation is::\n" + "\n" + " y = (x**lmbda - 1) / lmbda if lmbda != 0\n" + " log(x) if lmbda == 0\n" + "\n" + "Returns `nan` if ``x < 0``.\n" + "Returns `-inf` if ``x == 0`` and ``lmbda < 0``.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Data to be transformed.\n" + "lmbda : array_like\n" + " Power parameter of the Box-Cox transform.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "y : scalar or ndarray\n" + " Transformed data.\n" + "\n" + "Notes\n" + "-----\n" + "\n" + ".. versionadded:: 0.14.0\n" + "\n" + "Examples\n" + "--------\n" + ">>> from scipy.special import boxcox\n" + ">>> boxcox([1, 4, 10], 2.5)\n" + "array([ 0. , 12.4 , 126.09110641])\n" + ">>> boxcox(2, [0, 1, 2])\n" + "array([ 0.69314718, 1. , 1.5 ])") +ufunc_boxcox_loops[0] = loop_d_dd__As_ff_f +ufunc_boxcox_loops[1] = loop_d_dd__As_dd_d +ufunc_boxcox_types[0] = NPY_FLOAT +ufunc_boxcox_types[1] = NPY_FLOAT +ufunc_boxcox_types[2] = NPY_FLOAT +ufunc_boxcox_types[3] = NPY_DOUBLE +ufunc_boxcox_types[4] = NPY_DOUBLE +ufunc_boxcox_types[5] = NPY_DOUBLE +ufunc_boxcox_ptr[2*0] = _func_boxcox +ufunc_boxcox_ptr[2*0+1] = ("boxcox") +ufunc_boxcox_ptr[2*1] = _func_boxcox +ufunc_boxcox_ptr[2*1+1] = ("boxcox") +ufunc_boxcox_data[0] = &ufunc_boxcox_ptr[2*0] +ufunc_boxcox_data[1] = &ufunc_boxcox_ptr[2*1] +boxcox = np.PyUFunc_FromFuncAndData(ufunc_boxcox_loops, ufunc_boxcox_data, ufunc_boxcox_types, 2, 2, 1, 0, "boxcox", ufunc_boxcox_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_boxcox1p_loops[2] +cdef void *ufunc_boxcox1p_ptr[4] +cdef void *ufunc_boxcox1p_data[2] +cdef char ufunc_boxcox1p_types[6] +cdef char *ufunc_boxcox1p_doc = ( + "boxcox1p(x, lmbda, out=None)\n" + "\n" + "Compute the Box-Cox transformation of 1 + `x`.\n" + "\n" + "The Box-Cox transformation computed by `boxcox1p` is::\n" + "\n" + " y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0\n" + " log(1+x) if lmbda == 0\n" + "\n" + "Returns `nan` if ``x < -1``.\n" + "Returns `-inf` if ``x == -1`` and ``lmbda < 0``.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Data to be transformed.\n" + "lmbda : array_like\n" + " Power parameter of the Box-Cox transform.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "y : scalar or ndarray\n" + " Transformed data.\n" + "\n" + "Notes\n" + "-----\n" + "\n" + ".. versionadded:: 0.14.0\n" + "\n" + "Examples\n" + "--------\n" + ">>> from scipy.special import boxcox1p\n" + ">>> boxcox1p(1e-4, [0, 0.5, 1])\n" + "array([ 9.99950003e-05, 9.99975001e-05, 1.00000000e-04])\n" + ">>> boxcox1p([0.01, 0.1], 0.25)\n" + "array([ 0.00996272, 0.09645476])") +ufunc_boxcox1p_loops[0] = loop_d_dd__As_ff_f +ufunc_boxcox1p_loops[1] = loop_d_dd__As_dd_d +ufunc_boxcox1p_types[0] = NPY_FLOAT +ufunc_boxcox1p_types[1] = NPY_FLOAT +ufunc_boxcox1p_types[2] = NPY_FLOAT +ufunc_boxcox1p_types[3] = NPY_DOUBLE +ufunc_boxcox1p_types[4] = NPY_DOUBLE +ufunc_boxcox1p_types[5] = NPY_DOUBLE +ufunc_boxcox1p_ptr[2*0] = _func_boxcox1p +ufunc_boxcox1p_ptr[2*0+1] = ("boxcox1p") +ufunc_boxcox1p_ptr[2*1] = _func_boxcox1p +ufunc_boxcox1p_ptr[2*1+1] = ("boxcox1p") +ufunc_boxcox1p_data[0] = &ufunc_boxcox1p_ptr[2*0] +ufunc_boxcox1p_data[1] = &ufunc_boxcox1p_ptr[2*1] +boxcox1p = np.PyUFunc_FromFuncAndData(ufunc_boxcox1p_loops, ufunc_boxcox1p_data, ufunc_boxcox1p_types, 2, 2, 1, 0, "boxcox1p", ufunc_boxcox1p_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_btdtr_loops[2] +cdef void *ufunc_btdtr_ptr[4] +cdef void *ufunc_btdtr_data[2] +cdef char ufunc_btdtr_types[8] +cdef char *ufunc_btdtr_doc = ( + "btdtr(a, b, x, out=None)\n" + "\n" + "Cumulative distribution function of the beta distribution.\n" + "\n" + "Returns the integral from zero to `x` of the beta probability density\n" + "function,\n" + "\n" + ".. math::\n" + " I = \\int_0^x \\frac{\\Gamma(a + b)}{\\Gamma(a)\\Gamma(b)} t^{a-1} (1-t)^{b-1}\\,dt\n" + "\n" + "where :math:`\\Gamma` is the gamma function.\n" + "\n" + ".. deprecated:: 1.12.0\n" + " This function is deprecated and will be removed from SciPy 1.14.0.\n" + " Use `scipy.special.betainc` instead.\n" + "\n" + "Parameters\n" + "----------\n" + "a : array_like\n" + " Shape parameter (a > 0).\n" + "b : array_like\n" + " Shape parameter (b > 0).\n" + "x : array_like\n" + " Upper limit of integration, in [0, 1].\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "I : scalar or ndarray\n" + " Cumulative distribution function of the beta distribution with\n" + " parameters `a` and `b` at `x`.\n" + "\n" + "See Also\n" + "--------\n" + "betainc\n" + "\n" + "Notes\n" + "-----\n" + "This function is identical to the incomplete beta integral function\n" + "`betainc`.\n" + "\n" + "Wrapper for the Cephes [1]_ routine `btdtr`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/") +ufunc_btdtr_loops[0] = loop_d_ddd__As_fff_f +ufunc_btdtr_loops[1] = loop_d_ddd__As_ddd_d +ufunc_btdtr_types[0] = NPY_FLOAT +ufunc_btdtr_types[1] = NPY_FLOAT +ufunc_btdtr_types[2] = NPY_FLOAT +ufunc_btdtr_types[3] = NPY_FLOAT +ufunc_btdtr_types[4] = NPY_DOUBLE +ufunc_btdtr_types[5] = NPY_DOUBLE +ufunc_btdtr_types[6] = NPY_DOUBLE +ufunc_btdtr_types[7] = NPY_DOUBLE +ufunc_btdtr_ptr[2*0] = _func_cephes_btdtr +ufunc_btdtr_ptr[2*0+1] = ("btdtr") +ufunc_btdtr_ptr[2*1] = _func_cephes_btdtr +ufunc_btdtr_ptr[2*1+1] = ("btdtr") +ufunc_btdtr_data[0] = &ufunc_btdtr_ptr[2*0] +ufunc_btdtr_data[1] = &ufunc_btdtr_ptr[2*1] +btdtr = np.PyUFunc_FromFuncAndData(ufunc_btdtr_loops, ufunc_btdtr_data, ufunc_btdtr_types, 2, 3, 1, 0, "btdtr", ufunc_btdtr_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_btdtri_loops[2] +cdef void *ufunc_btdtri_ptr[4] +cdef void *ufunc_btdtri_data[2] +cdef char ufunc_btdtri_types[8] +cdef char *ufunc_btdtri_doc = ( + "btdtri(a, b, p, out=None)\n" + "\n" + "The `p`-th quantile of the beta distribution.\n" + "\n" + "This function is the inverse of the beta cumulative distribution function,\n" + "`btdtr`, returning the value of `x` for which `btdtr(a, b, x) = p`, or\n" + "\n" + ".. math::\n" + " p = \\int_0^x \\frac{\\Gamma(a + b)}{\\Gamma(a)\\Gamma(b)} t^{a-1} (1-t)^{b-1}\\,dt\n" + "\n" + ".. deprecated:: 1.12.0\n" + " This function is deprecated and will be removed from SciPy 1.14.0.\n" + " Use `scipy.special.betaincinv` instead.\n" + "\n" + "Parameters\n" + "----------\n" + "a : array_like\n" + " Shape parameter (`a` > 0).\n" + "b : array_like\n" + " Shape parameter (`b` > 0).\n" + "p : array_like\n" + " Cumulative probability, in [0, 1].\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "x : scalar or ndarray\n" + " The quantile corresponding to `p`.\n" + "\n" + "See Also\n" + "--------\n" + "betaincinv\n" + "btdtr\n" + "\n" + "Notes\n" + "-----\n" + "The value of `x` is found by interval halving or Newton iterations.\n" + "\n" + "Wrapper for the Cephes [1]_ routine `incbi`, which solves the equivalent\n" + "problem of finding the inverse of the incomplete beta integral.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/") +ufunc_btdtri_loops[0] = loop_d_ddd__As_fff_f +ufunc_btdtri_loops[1] = loop_d_ddd__As_ddd_d +ufunc_btdtri_types[0] = NPY_FLOAT +ufunc_btdtri_types[1] = NPY_FLOAT +ufunc_btdtri_types[2] = NPY_FLOAT +ufunc_btdtri_types[3] = NPY_FLOAT +ufunc_btdtri_types[4] = NPY_DOUBLE +ufunc_btdtri_types[5] = NPY_DOUBLE +ufunc_btdtri_types[6] = NPY_DOUBLE +ufunc_btdtri_types[7] = NPY_DOUBLE +ufunc_btdtri_ptr[2*0] = _func_cephes_btdtri +ufunc_btdtri_ptr[2*0+1] = ("btdtri") +ufunc_btdtri_ptr[2*1] = _func_cephes_btdtri +ufunc_btdtri_ptr[2*1+1] = ("btdtri") +ufunc_btdtri_data[0] = &ufunc_btdtri_ptr[2*0] +ufunc_btdtri_data[1] = &ufunc_btdtri_ptr[2*1] +btdtri = np.PyUFunc_FromFuncAndData(ufunc_btdtri_loops, ufunc_btdtri_data, ufunc_btdtri_types, 2, 3, 1, 0, "btdtri", ufunc_btdtri_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_btdtria_loops[2] +cdef void *ufunc_btdtria_ptr[4] +cdef void *ufunc_btdtria_data[2] +cdef char ufunc_btdtria_types[8] +cdef char *ufunc_btdtria_doc = ( + "btdtria(p, b, x, out=None)\n" + "\n" + "Inverse of `btdtr` with respect to `a`.\n" + "\n" + "This is the inverse of the beta cumulative distribution function, `btdtr`,\n" + "considered as a function of `a`, returning the value of `a` for which\n" + "`btdtr(a, b, x) = p`, or\n" + "\n" + ".. math::\n" + " p = \\int_0^x \\frac{\\Gamma(a + b)}{\\Gamma(a)\\Gamma(b)} t^{a-1} (1-t)^{b-1}\\,dt\n" + "\n" + "Parameters\n" + "----------\n" + "p : array_like\n" + " Cumulative probability, in [0, 1].\n" + "b : array_like\n" + " Shape parameter (`b` > 0).\n" + "x : array_like\n" + " The quantile, in [0, 1].\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "a : scalar or ndarray\n" + " The value of the shape parameter `a` such that `btdtr(a, b, x) = p`.\n" + "\n" + "See Also\n" + "--------\n" + "btdtr : Cumulative distribution function of the beta distribution.\n" + "btdtri : Inverse with respect to `x`.\n" + "btdtrib : Inverse with respect to `b`.\n" + "\n" + "Notes\n" + "-----\n" + "Wrapper for the CDFLIB [1]_ Fortran routine `cdfbet`.\n" + "\n" + "The cumulative distribution function `p` is computed using a routine by\n" + "DiDinato and Morris [2]_. Computation of `a` involves a search for a value\n" + "that produces the desired value of `p`. The search relies on the\n" + "monotonicity of `p` with `a`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Barry Brown, James Lovato, and Kathy Russell,\n" + " CDFLIB: Library of Fortran Routines for Cumulative Distribution\n" + " Functions, Inverses, and Other Parameters.\n" + ".. [2] DiDinato, A. R. and Morris, A. H.,\n" + " Algorithm 708: Significant Digit Computation of the Incomplete Beta\n" + " Function Ratios. ACM Trans. Math. Softw. 18 (1993), 360-373.") +ufunc_btdtria_loops[0] = loop_d_ddd__As_fff_f +ufunc_btdtria_loops[1] = loop_d_ddd__As_ddd_d +ufunc_btdtria_types[0] = NPY_FLOAT +ufunc_btdtria_types[1] = NPY_FLOAT +ufunc_btdtria_types[2] = NPY_FLOAT +ufunc_btdtria_types[3] = NPY_FLOAT +ufunc_btdtria_types[4] = NPY_DOUBLE +ufunc_btdtria_types[5] = NPY_DOUBLE +ufunc_btdtria_types[6] = NPY_DOUBLE +ufunc_btdtria_types[7] = NPY_DOUBLE +ufunc_btdtria_ptr[2*0] = _func_btdtria +ufunc_btdtria_ptr[2*0+1] = ("btdtria") +ufunc_btdtria_ptr[2*1] = _func_btdtria +ufunc_btdtria_ptr[2*1+1] = ("btdtria") +ufunc_btdtria_data[0] = &ufunc_btdtria_ptr[2*0] +ufunc_btdtria_data[1] = &ufunc_btdtria_ptr[2*1] +btdtria = np.PyUFunc_FromFuncAndData(ufunc_btdtria_loops, ufunc_btdtria_data, ufunc_btdtria_types, 2, 3, 1, 0, "btdtria", ufunc_btdtria_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_btdtrib_loops[2] +cdef void *ufunc_btdtrib_ptr[4] +cdef void *ufunc_btdtrib_data[2] +cdef char ufunc_btdtrib_types[8] +cdef char *ufunc_btdtrib_doc = ( + "btdtria(a, p, x, out=None)\n" + "\n" + "Inverse of `btdtr` with respect to `b`.\n" + "\n" + "This is the inverse of the beta cumulative distribution function, `btdtr`,\n" + "considered as a function of `b`, returning the value of `b` for which\n" + "`btdtr(a, b, x) = p`, or\n" + "\n" + ".. math::\n" + " p = \\int_0^x \\frac{\\Gamma(a + b)}{\\Gamma(a)\\Gamma(b)} t^{a-1} (1-t)^{b-1}\\,dt\n" + "\n" + "Parameters\n" + "----------\n" + "a : array_like\n" + " Shape parameter (`a` > 0).\n" + "p : array_like\n" + " Cumulative probability, in [0, 1].\n" + "x : array_like\n" + " The quantile, in [0, 1].\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "b : scalar or ndarray\n" + " The value of the shape parameter `b` such that `btdtr(a, b, x) = p`.\n" + "\n" + "See Also\n" + "--------\n" + "btdtr : Cumulative distribution function of the beta distribution.\n" + "btdtri : Inverse with respect to `x`.\n" + "btdtria : Inverse with respect to `a`.\n" + "\n" + "Notes\n" + "-----\n" + "Wrapper for the CDFLIB [1]_ Fortran routine `cdfbet`.\n" + "\n" + "The cumulative distribution function `p` is computed using a routine by\n" + "DiDinato and Morris [2]_. Computation of `b` involves a search for a value\n" + "that produces the desired value of `p`. The search relies on the\n" + "monotonicity of `p` with `b`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Barry Brown, James Lovato, and Kathy Russell,\n" + " CDFLIB: Library of Fortran Routines for Cumulative Distribution\n" + " Functions, Inverses, and Other Parameters.\n" + ".. [2] DiDinato, A. R. and Morris, A. H.,\n" + " Algorithm 708: Significant Digit Computation of the Incomplete Beta\n" + " Function Ratios. ACM Trans. Math. Softw. 18 (1993), 360-373.") +ufunc_btdtrib_loops[0] = loop_d_ddd__As_fff_f +ufunc_btdtrib_loops[1] = loop_d_ddd__As_ddd_d +ufunc_btdtrib_types[0] = NPY_FLOAT +ufunc_btdtrib_types[1] = NPY_FLOAT +ufunc_btdtrib_types[2] = NPY_FLOAT +ufunc_btdtrib_types[3] = NPY_FLOAT +ufunc_btdtrib_types[4] = NPY_DOUBLE +ufunc_btdtrib_types[5] = NPY_DOUBLE +ufunc_btdtrib_types[6] = NPY_DOUBLE +ufunc_btdtrib_types[7] = NPY_DOUBLE +ufunc_btdtrib_ptr[2*0] = _func_btdtrib +ufunc_btdtrib_ptr[2*0+1] = ("btdtrib") +ufunc_btdtrib_ptr[2*1] = _func_btdtrib +ufunc_btdtrib_ptr[2*1+1] = ("btdtrib") +ufunc_btdtrib_data[0] = &ufunc_btdtrib_ptr[2*0] +ufunc_btdtrib_data[1] = &ufunc_btdtrib_ptr[2*1] +btdtrib = np.PyUFunc_FromFuncAndData(ufunc_btdtrib_loops, ufunc_btdtrib_data, ufunc_btdtrib_types, 2, 3, 1, 0, "btdtrib", ufunc_btdtrib_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_cbrt_loops[2] +cdef void *ufunc_cbrt_ptr[4] +cdef void *ufunc_cbrt_data[2] +cdef char ufunc_cbrt_types[4] +cdef char *ufunc_cbrt_doc = ( + "cbrt(x, out=None)\n" + "\n" + "Element-wise cube root of `x`.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " `x` must contain real numbers.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " The cube root of each value in `x`.\n" + "\n" + "Examples\n" + "--------\n" + ">>> from scipy.special import cbrt\n" + "\n" + ">>> cbrt(8)\n" + "2.0\n" + ">>> cbrt([-8, -3, 0.125, 1.331])\n" + "array([-2. , -1.44224957, 0.5 , 1.1 ])") +ufunc_cbrt_loops[0] = loop_d_d__As_f_f +ufunc_cbrt_loops[1] = loop_d_d__As_d_d +ufunc_cbrt_types[0] = NPY_FLOAT +ufunc_cbrt_types[1] = NPY_FLOAT +ufunc_cbrt_types[2] = NPY_DOUBLE +ufunc_cbrt_types[3] = NPY_DOUBLE +ufunc_cbrt_ptr[2*0] = _func_cephes_cbrt +ufunc_cbrt_ptr[2*0+1] = ("cbrt") +ufunc_cbrt_ptr[2*1] = _func_cephes_cbrt +ufunc_cbrt_ptr[2*1+1] = ("cbrt") +ufunc_cbrt_data[0] = &ufunc_cbrt_ptr[2*0] +ufunc_cbrt_data[1] = &ufunc_cbrt_ptr[2*1] +cbrt = np.PyUFunc_FromFuncAndData(ufunc_cbrt_loops, ufunc_cbrt_data, ufunc_cbrt_types, 2, 1, 1, 0, "cbrt", ufunc_cbrt_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_chdtr_loops[2] +cdef void *ufunc_chdtr_ptr[4] +cdef void *ufunc_chdtr_data[2] +cdef char ufunc_chdtr_types[6] +cdef char *ufunc_chdtr_doc = ( + "chdtr(v, x, out=None)\n" + "\n" + "Chi square cumulative distribution function.\n" + "\n" + "Returns the area under the left tail (from 0 to `x`) of the Chi\n" + "square probability density function with `v` degrees of freedom:\n" + "\n" + ".. math::\n" + "\n" + " \\frac{1}{2^{v/2} \\Gamma(v/2)} \\int_0^x t^{v/2 - 1} e^{-t/2} dt\n" + "\n" + "Here :math:`\\Gamma` is the Gamma function; see `gamma`. This\n" + "integral can be expressed in terms of the regularized lower\n" + "incomplete gamma function `gammainc` as\n" + "``gammainc(v / 2, x / 2)``. [1]_\n" + "\n" + "Parameters\n" + "----------\n" + "v : array_like\n" + " Degrees of freedom.\n" + "x : array_like\n" + " Upper bound of the integral.\n" + "out : ndarray, optional\n" + " Optional output array for the function results.\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the cumulative distribution function.\n" + "\n" + "See Also\n" + "--------\n" + "chdtrc, chdtri, chdtriv, gammainc\n" + "\n" + "References\n" + "----------\n" + ".. [1] Chi-Square distribution,\n" + " https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import scipy.special as sc\n" + "\n" + "It can be expressed in terms of the regularized lower incomplete\n" + "gamma function.\n" + "\n" + ">>> v = 1\n" + ">>> x = np.arange(4)\n" + ">>> sc.chdtr(v, x)\n" + "array([0. , 0.68268949, 0.84270079, 0.91673548])\n" + ">>> sc.gammainc(v / 2, x / 2)\n" + "array([0. , 0.68268949, 0.84270079, 0.91673548])") +ufunc_chdtr_loops[0] = loop_d_dd__As_ff_f +ufunc_chdtr_loops[1] = loop_d_dd__As_dd_d +ufunc_chdtr_types[0] = NPY_FLOAT +ufunc_chdtr_types[1] = NPY_FLOAT +ufunc_chdtr_types[2] = NPY_FLOAT +ufunc_chdtr_types[3] = NPY_DOUBLE +ufunc_chdtr_types[4] = NPY_DOUBLE +ufunc_chdtr_types[5] = NPY_DOUBLE +ufunc_chdtr_ptr[2*0] = _func_cephes_chdtr +ufunc_chdtr_ptr[2*0+1] = ("chdtr") +ufunc_chdtr_ptr[2*1] = _func_cephes_chdtr +ufunc_chdtr_ptr[2*1+1] = ("chdtr") +ufunc_chdtr_data[0] = &ufunc_chdtr_ptr[2*0] +ufunc_chdtr_data[1] = &ufunc_chdtr_ptr[2*1] +chdtr = np.PyUFunc_FromFuncAndData(ufunc_chdtr_loops, ufunc_chdtr_data, ufunc_chdtr_types, 2, 2, 1, 0, "chdtr", ufunc_chdtr_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_chdtrc_loops[2] +cdef void *ufunc_chdtrc_ptr[4] +cdef void *ufunc_chdtrc_data[2] +cdef char ufunc_chdtrc_types[6] +cdef char *ufunc_chdtrc_doc = ( + "chdtrc(v, x, out=None)\n" + "\n" + "Chi square survival function.\n" + "\n" + "Returns the area under the right hand tail (from `x` to infinity)\n" + "of the Chi square probability density function with `v` degrees of\n" + "freedom:\n" + "\n" + ".. math::\n" + "\n" + " \\frac{1}{2^{v/2} \\Gamma(v/2)} \\int_x^\\infty t^{v/2 - 1} e^{-t/2} dt\n" + "\n" + "Here :math:`\\Gamma` is the Gamma function; see `gamma`. This\n" + "integral can be expressed in terms of the regularized upper\n" + "incomplete gamma function `gammaincc` as\n" + "``gammaincc(v / 2, x / 2)``. [1]_\n" + "\n" + "Parameters\n" + "----------\n" + "v : array_like\n" + " Degrees of freedom.\n" + "x : array_like\n" + " Lower bound of the integral.\n" + "out : ndarray, optional\n" + " Optional output array for the function results.\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the survival function.\n" + "\n" + "See Also\n" + "--------\n" + "chdtr, chdtri, chdtriv, gammaincc\n" + "\n" + "References\n" + "----------\n" + ".. [1] Chi-Square distribution,\n" + " https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import scipy.special as sc\n" + "\n" + "It can be expressed in terms of the regularized upper incomplete\n" + "gamma function.\n" + "\n" + ">>> v = 1\n" + ">>> x = np.arange(4)\n" + ">>> sc.chdtrc(v, x)\n" + "array([1. , 0.31731051, 0.15729921, 0.08326452])\n" + ">>> sc.gammaincc(v / 2, x / 2)\n" + "array([1. , 0.31731051, 0.15729921, 0.08326452])") +ufunc_chdtrc_loops[0] = loop_d_dd__As_ff_f +ufunc_chdtrc_loops[1] = loop_d_dd__As_dd_d +ufunc_chdtrc_types[0] = NPY_FLOAT +ufunc_chdtrc_types[1] = NPY_FLOAT +ufunc_chdtrc_types[2] = NPY_FLOAT +ufunc_chdtrc_types[3] = NPY_DOUBLE +ufunc_chdtrc_types[4] = NPY_DOUBLE +ufunc_chdtrc_types[5] = NPY_DOUBLE +ufunc_chdtrc_ptr[2*0] = _func_cephes_chdtrc +ufunc_chdtrc_ptr[2*0+1] = ("chdtrc") +ufunc_chdtrc_ptr[2*1] = _func_cephes_chdtrc +ufunc_chdtrc_ptr[2*1+1] = ("chdtrc") +ufunc_chdtrc_data[0] = &ufunc_chdtrc_ptr[2*0] +ufunc_chdtrc_data[1] = &ufunc_chdtrc_ptr[2*1] +chdtrc = np.PyUFunc_FromFuncAndData(ufunc_chdtrc_loops, ufunc_chdtrc_data, ufunc_chdtrc_types, 2, 2, 1, 0, "chdtrc", ufunc_chdtrc_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_chdtri_loops[2] +cdef void *ufunc_chdtri_ptr[4] +cdef void *ufunc_chdtri_data[2] +cdef char ufunc_chdtri_types[6] +cdef char *ufunc_chdtri_doc = ( + "chdtri(v, p, out=None)\n" + "\n" + "Inverse to `chdtrc` with respect to `x`.\n" + "\n" + "Returns `x` such that ``chdtrc(v, x) == p``.\n" + "\n" + "Parameters\n" + "----------\n" + "v : array_like\n" + " Degrees of freedom.\n" + "p : array_like\n" + " Probability.\n" + "out : ndarray, optional\n" + " Optional output array for the function results.\n" + "\n" + "Returns\n" + "-------\n" + "x : scalar or ndarray\n" + " Value so that the probability a Chi square random variable\n" + " with `v` degrees of freedom is greater than `x` equals `p`.\n" + "\n" + "See Also\n" + "--------\n" + "chdtrc, chdtr, chdtriv\n" + "\n" + "References\n" + "----------\n" + ".. [1] Chi-Square distribution,\n" + " https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm\n" + "\n" + "Examples\n" + "--------\n" + ">>> import scipy.special as sc\n" + "\n" + "It inverts `chdtrc`.\n" + "\n" + ">>> v, p = 1, 0.3\n" + ">>> sc.chdtrc(v, sc.chdtri(v, p))\n" + "0.3\n" + ">>> x = 1\n" + ">>> sc.chdtri(v, sc.chdtrc(v, x))\n" + "1.0") +ufunc_chdtri_loops[0] = loop_d_dd__As_ff_f +ufunc_chdtri_loops[1] = loop_d_dd__As_dd_d +ufunc_chdtri_types[0] = NPY_FLOAT +ufunc_chdtri_types[1] = NPY_FLOAT +ufunc_chdtri_types[2] = NPY_FLOAT +ufunc_chdtri_types[3] = NPY_DOUBLE +ufunc_chdtri_types[4] = NPY_DOUBLE +ufunc_chdtri_types[5] = NPY_DOUBLE +ufunc_chdtri_ptr[2*0] = _func_cephes_chdtri +ufunc_chdtri_ptr[2*0+1] = ("chdtri") +ufunc_chdtri_ptr[2*1] = _func_cephes_chdtri +ufunc_chdtri_ptr[2*1+1] = ("chdtri") +ufunc_chdtri_data[0] = &ufunc_chdtri_ptr[2*0] +ufunc_chdtri_data[1] = &ufunc_chdtri_ptr[2*1] +chdtri = np.PyUFunc_FromFuncAndData(ufunc_chdtri_loops, ufunc_chdtri_data, ufunc_chdtri_types, 2, 2, 1, 0, "chdtri", ufunc_chdtri_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_chdtriv_loops[2] +cdef void *ufunc_chdtriv_ptr[4] +cdef void *ufunc_chdtriv_data[2] +cdef char ufunc_chdtriv_types[6] +cdef char *ufunc_chdtriv_doc = ( + "chdtriv(p, x, out=None)\n" + "\n" + "Inverse to `chdtr` with respect to `v`.\n" + "\n" + "Returns `v` such that ``chdtr(v, x) == p``.\n" + "\n" + "Parameters\n" + "----------\n" + "p : array_like\n" + " Probability that the Chi square random variable is less than\n" + " or equal to `x`.\n" + "x : array_like\n" + " Nonnegative input.\n" + "out : ndarray, optional\n" + " Optional output array for the function results.\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Degrees of freedom.\n" + "\n" + "See Also\n" + "--------\n" + "chdtr, chdtrc, chdtri\n" + "\n" + "References\n" + "----------\n" + ".. [1] Chi-Square distribution,\n" + " https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm\n" + "\n" + "Examples\n" + "--------\n" + ">>> import scipy.special as sc\n" + "\n" + "It inverts `chdtr`.\n" + "\n" + ">>> p, x = 0.5, 1\n" + ">>> sc.chdtr(sc.chdtriv(p, x), x)\n" + "0.5000000000202172\n" + ">>> v = 1\n" + ">>> sc.chdtriv(sc.chdtr(v, x), v)\n" + "1.0000000000000013") +ufunc_chdtriv_loops[0] = loop_d_dd__As_ff_f +ufunc_chdtriv_loops[1] = loop_d_dd__As_dd_d +ufunc_chdtriv_types[0] = NPY_FLOAT +ufunc_chdtriv_types[1] = NPY_FLOAT +ufunc_chdtriv_types[2] = NPY_FLOAT +ufunc_chdtriv_types[3] = NPY_DOUBLE +ufunc_chdtriv_types[4] = NPY_DOUBLE +ufunc_chdtriv_types[5] = NPY_DOUBLE +ufunc_chdtriv_ptr[2*0] = _func_chdtriv +ufunc_chdtriv_ptr[2*0+1] = ("chdtriv") +ufunc_chdtriv_ptr[2*1] = _func_chdtriv +ufunc_chdtriv_ptr[2*1+1] = ("chdtriv") +ufunc_chdtriv_data[0] = &ufunc_chdtriv_ptr[2*0] +ufunc_chdtriv_data[1] = &ufunc_chdtriv_ptr[2*1] +chdtriv = np.PyUFunc_FromFuncAndData(ufunc_chdtriv_loops, ufunc_chdtriv_data, ufunc_chdtriv_types, 2, 2, 1, 0, "chdtriv", ufunc_chdtriv_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_chndtr_loops[2] +cdef void *ufunc_chndtr_ptr[4] +cdef void *ufunc_chndtr_data[2] +cdef char ufunc_chndtr_types[8] +cdef char *ufunc_chndtr_doc = ( + "chndtr(x, df, nc, out=None)\n" + "\n" + "Non-central chi square cumulative distribution function\n" + "\n" + "The cumulative distribution function is given by:\n" + "\n" + ".. math::\n" + "\n" + " P(\\chi^{\\prime 2} \\vert \\nu, \\lambda) =\\sum_{j=0}^{\\infty}\n" + " e^{-\\lambda /2}\n" + " \\frac{(\\lambda /2)^j}{j!} P(\\chi^{\\prime 2} \\vert \\nu + 2j),\n" + "\n" + "where :math:`\\nu > 0` is the degrees of freedom (``df``) and\n" + ":math:`\\lambda \\geq 0` is the non-centrality parameter (``nc``).\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Upper bound of the integral; must satisfy ``x >= 0``\n" + "df : array_like\n" + " Degrees of freedom; must satisfy ``df > 0``\n" + "nc : array_like\n" + " Non-centrality parameter; must satisfy ``nc >= 0``\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "x : scalar or ndarray\n" + " Value of the non-central chi square cumulative distribution function.\n" + "\n" + "See Also\n" + "--------\n" + "chndtrix, chndtridf, chndtrinc") +ufunc_chndtr_loops[0] = loop_d_ddd__As_fff_f +ufunc_chndtr_loops[1] = loop_d_ddd__As_ddd_d +ufunc_chndtr_types[0] = NPY_FLOAT +ufunc_chndtr_types[1] = NPY_FLOAT +ufunc_chndtr_types[2] = NPY_FLOAT +ufunc_chndtr_types[3] = NPY_FLOAT +ufunc_chndtr_types[4] = NPY_DOUBLE +ufunc_chndtr_types[5] = NPY_DOUBLE +ufunc_chndtr_types[6] = NPY_DOUBLE +ufunc_chndtr_types[7] = NPY_DOUBLE +ufunc_chndtr_ptr[2*0] = _func_chndtr +ufunc_chndtr_ptr[2*0+1] = ("chndtr") +ufunc_chndtr_ptr[2*1] = _func_chndtr +ufunc_chndtr_ptr[2*1+1] = ("chndtr") +ufunc_chndtr_data[0] = &ufunc_chndtr_ptr[2*0] +ufunc_chndtr_data[1] = &ufunc_chndtr_ptr[2*1] +chndtr = np.PyUFunc_FromFuncAndData(ufunc_chndtr_loops, ufunc_chndtr_data, ufunc_chndtr_types, 2, 3, 1, 0, "chndtr", ufunc_chndtr_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_chndtridf_loops[2] +cdef void *ufunc_chndtridf_ptr[4] +cdef void *ufunc_chndtridf_data[2] +cdef char ufunc_chndtridf_types[8] +cdef char *ufunc_chndtridf_doc = ( + "chndtridf(x, p, nc, out=None)\n" + "\n" + "Inverse to `chndtr` vs `df`\n" + "\n" + "Calculated using a search to find a value for `df` that produces the\n" + "desired value of `p`.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Upper bound of the integral; must satisfy ``x >= 0``\n" + "p : array_like\n" + " Probability; must satisfy ``0 <= p < 1``\n" + "nc : array_like\n" + " Non-centrality parameter; must satisfy ``nc >= 0``\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "df : scalar or ndarray\n" + " Degrees of freedom\n" + "\n" + "See Also\n" + "--------\n" + "chndtr, chndtrix, chndtrinc") +ufunc_chndtridf_loops[0] = loop_d_ddd__As_fff_f +ufunc_chndtridf_loops[1] = loop_d_ddd__As_ddd_d +ufunc_chndtridf_types[0] = NPY_FLOAT +ufunc_chndtridf_types[1] = NPY_FLOAT +ufunc_chndtridf_types[2] = NPY_FLOAT +ufunc_chndtridf_types[3] = NPY_FLOAT +ufunc_chndtridf_types[4] = NPY_DOUBLE +ufunc_chndtridf_types[5] = NPY_DOUBLE +ufunc_chndtridf_types[6] = NPY_DOUBLE +ufunc_chndtridf_types[7] = NPY_DOUBLE +ufunc_chndtridf_ptr[2*0] = _func_chndtridf +ufunc_chndtridf_ptr[2*0+1] = ("chndtridf") +ufunc_chndtridf_ptr[2*1] = _func_chndtridf +ufunc_chndtridf_ptr[2*1+1] = ("chndtridf") +ufunc_chndtridf_data[0] = &ufunc_chndtridf_ptr[2*0] +ufunc_chndtridf_data[1] = &ufunc_chndtridf_ptr[2*1] +chndtridf = np.PyUFunc_FromFuncAndData(ufunc_chndtridf_loops, ufunc_chndtridf_data, ufunc_chndtridf_types, 2, 3, 1, 0, "chndtridf", ufunc_chndtridf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_chndtrinc_loops[2] +cdef void *ufunc_chndtrinc_ptr[4] +cdef void *ufunc_chndtrinc_data[2] +cdef char ufunc_chndtrinc_types[8] +cdef char *ufunc_chndtrinc_doc = ( + "chndtrinc(x, df, p, out=None)\n" + "\n" + "Inverse to `chndtr` vs `nc`\n" + "\n" + "Calculated using a search to find a value for `df` that produces the\n" + "desired value of `p`.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Upper bound of the integral; must satisfy ``x >= 0``\n" + "df : array_like\n" + " Degrees of freedom; must satisfy ``df > 0``\n" + "p : array_like\n" + " Probability; must satisfy ``0 <= p < 1``\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "nc : scalar or ndarray\n" + " Non-centrality\n" + "\n" + "See Also\n" + "--------\n" + "chndtr, chndtrix, chndtrinc") +ufunc_chndtrinc_loops[0] = loop_d_ddd__As_fff_f +ufunc_chndtrinc_loops[1] = loop_d_ddd__As_ddd_d +ufunc_chndtrinc_types[0] = NPY_FLOAT +ufunc_chndtrinc_types[1] = NPY_FLOAT +ufunc_chndtrinc_types[2] = NPY_FLOAT +ufunc_chndtrinc_types[3] = NPY_FLOAT +ufunc_chndtrinc_types[4] = NPY_DOUBLE +ufunc_chndtrinc_types[5] = NPY_DOUBLE +ufunc_chndtrinc_types[6] = NPY_DOUBLE +ufunc_chndtrinc_types[7] = NPY_DOUBLE +ufunc_chndtrinc_ptr[2*0] = _func_chndtrinc +ufunc_chndtrinc_ptr[2*0+1] = ("chndtrinc") +ufunc_chndtrinc_ptr[2*1] = _func_chndtrinc +ufunc_chndtrinc_ptr[2*1+1] = ("chndtrinc") +ufunc_chndtrinc_data[0] = &ufunc_chndtrinc_ptr[2*0] +ufunc_chndtrinc_data[1] = &ufunc_chndtrinc_ptr[2*1] +chndtrinc = np.PyUFunc_FromFuncAndData(ufunc_chndtrinc_loops, ufunc_chndtrinc_data, ufunc_chndtrinc_types, 2, 3, 1, 0, "chndtrinc", ufunc_chndtrinc_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_chndtrix_loops[2] +cdef void *ufunc_chndtrix_ptr[4] +cdef void *ufunc_chndtrix_data[2] +cdef char ufunc_chndtrix_types[8] +cdef char *ufunc_chndtrix_doc = ( + "chndtrix(p, df, nc, out=None)\n" + "\n" + "Inverse to `chndtr` vs `x`\n" + "\n" + "Calculated using a search to find a value for `x` that produces the\n" + "desired value of `p`.\n" + "\n" + "Parameters\n" + "----------\n" + "p : array_like\n" + " Probability; must satisfy ``0 <= p < 1``\n" + "df : array_like\n" + " Degrees of freedom; must satisfy ``df > 0``\n" + "nc : array_like\n" + " Non-centrality parameter; must satisfy ``nc >= 0``\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "x : scalar or ndarray\n" + " Value so that the probability a non-central Chi square random variable\n" + " with `df` degrees of freedom and non-centrality, `nc`, is greater than\n" + " `x` equals `p`.\n" + "\n" + "See Also\n" + "--------\n" + "chndtr, chndtridf, chndtrinc") +ufunc_chndtrix_loops[0] = loop_d_ddd__As_fff_f +ufunc_chndtrix_loops[1] = loop_d_ddd__As_ddd_d +ufunc_chndtrix_types[0] = NPY_FLOAT +ufunc_chndtrix_types[1] = NPY_FLOAT +ufunc_chndtrix_types[2] = NPY_FLOAT +ufunc_chndtrix_types[3] = NPY_FLOAT +ufunc_chndtrix_types[4] = NPY_DOUBLE +ufunc_chndtrix_types[5] = NPY_DOUBLE +ufunc_chndtrix_types[6] = NPY_DOUBLE +ufunc_chndtrix_types[7] = NPY_DOUBLE +ufunc_chndtrix_ptr[2*0] = _func_chndtrix +ufunc_chndtrix_ptr[2*0+1] = ("chndtrix") +ufunc_chndtrix_ptr[2*1] = _func_chndtrix +ufunc_chndtrix_ptr[2*1+1] = ("chndtrix") +ufunc_chndtrix_data[0] = &ufunc_chndtrix_ptr[2*0] +ufunc_chndtrix_data[1] = &ufunc_chndtrix_ptr[2*1] +chndtrix = np.PyUFunc_FromFuncAndData(ufunc_chndtrix_loops, ufunc_chndtrix_data, ufunc_chndtrix_types, 2, 3, 1, 0, "chndtrix", ufunc_chndtrix_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_cosdg_loops[2] +cdef void *ufunc_cosdg_ptr[4] +cdef void *ufunc_cosdg_data[2] +cdef char ufunc_cosdg_types[4] +cdef char *ufunc_cosdg_doc = ( + "cosdg(x, out=None)\n" + "\n" + "Cosine of the angle `x` given in degrees.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Angle, given in degrees.\n" + "out : ndarray, optional\n" + " Optional output array for the function results.\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Cosine of the input.\n" + "\n" + "See Also\n" + "--------\n" + "sindg, tandg, cotdg\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import scipy.special as sc\n" + "\n" + "It is more accurate than using cosine directly.\n" + "\n" + ">>> x = 90 + 180 * np.arange(3)\n" + ">>> sc.cosdg(x)\n" + "array([-0., 0., -0.])\n" + ">>> np.cos(x * np.pi / 180)\n" + "array([ 6.1232340e-17, -1.8369702e-16, 3.0616170e-16])") +ufunc_cosdg_loops[0] = loop_d_d__As_f_f +ufunc_cosdg_loops[1] = loop_d_d__As_d_d +ufunc_cosdg_types[0] = NPY_FLOAT +ufunc_cosdg_types[1] = NPY_FLOAT +ufunc_cosdg_types[2] = NPY_DOUBLE +ufunc_cosdg_types[3] = NPY_DOUBLE +ufunc_cosdg_ptr[2*0] = _func_cephes_cosdg +ufunc_cosdg_ptr[2*0+1] = ("cosdg") +ufunc_cosdg_ptr[2*1] = _func_cephes_cosdg +ufunc_cosdg_ptr[2*1+1] = ("cosdg") +ufunc_cosdg_data[0] = &ufunc_cosdg_ptr[2*0] +ufunc_cosdg_data[1] = &ufunc_cosdg_ptr[2*1] +cosdg = np.PyUFunc_FromFuncAndData(ufunc_cosdg_loops, ufunc_cosdg_data, ufunc_cosdg_types, 2, 1, 1, 0, "cosdg", ufunc_cosdg_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_cosm1_loops[2] +cdef void *ufunc_cosm1_ptr[4] +cdef void *ufunc_cosm1_data[2] +cdef char ufunc_cosm1_types[4] +cdef char *ufunc_cosm1_doc = ( + "cosm1(x, out=None)\n" + "\n" + "cos(x) - 1 for use when `x` is near zero.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real valued argument.\n" + "out : ndarray, optional\n" + " Optional output array for the function results.\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of ``cos(x) - 1``.\n" + "\n" + "See Also\n" + "--------\n" + "expm1, log1p\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import scipy.special as sc\n" + "\n" + "It is more accurate than computing ``cos(x) - 1`` directly for\n" + "``x`` around 0.\n" + "\n" + ">>> x = 1e-30\n" + ">>> np.cos(x) - 1\n" + "0.0\n" + ">>> sc.cosm1(x)\n" + "-5.0000000000000005e-61") +ufunc_cosm1_loops[0] = loop_d_d__As_f_f +ufunc_cosm1_loops[1] = loop_d_d__As_d_d +ufunc_cosm1_types[0] = NPY_FLOAT +ufunc_cosm1_types[1] = NPY_FLOAT +ufunc_cosm1_types[2] = NPY_DOUBLE +ufunc_cosm1_types[3] = NPY_DOUBLE +ufunc_cosm1_ptr[2*0] = _func_cephes_cosm1 +ufunc_cosm1_ptr[2*0+1] = ("cosm1") +ufunc_cosm1_ptr[2*1] = _func_cephes_cosm1 +ufunc_cosm1_ptr[2*1+1] = ("cosm1") +ufunc_cosm1_data[0] = &ufunc_cosm1_ptr[2*0] +ufunc_cosm1_data[1] = &ufunc_cosm1_ptr[2*1] +cosm1 = np.PyUFunc_FromFuncAndData(ufunc_cosm1_loops, ufunc_cosm1_data, ufunc_cosm1_types, 2, 1, 1, 0, "cosm1", ufunc_cosm1_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_cotdg_loops[2] +cdef void *ufunc_cotdg_ptr[4] +cdef void *ufunc_cotdg_data[2] +cdef char ufunc_cotdg_types[4] +cdef char *ufunc_cotdg_doc = ( + "cotdg(x, out=None)\n" + "\n" + "Cotangent of the angle `x` given in degrees.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Angle, given in degrees.\n" + "out : ndarray, optional\n" + " Optional output array for the function results.\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Cotangent at the input.\n" + "\n" + "See Also\n" + "--------\n" + "sindg, cosdg, tandg\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import scipy.special as sc\n" + "\n" + "It is more accurate than using cotangent directly.\n" + "\n" + ">>> x = 90 + 180 * np.arange(3)\n" + ">>> sc.cotdg(x)\n" + "array([0., 0., 0.])\n" + ">>> 1 / np.tan(x * np.pi / 180)\n" + "array([6.1232340e-17, 1.8369702e-16, 3.0616170e-16])") +ufunc_cotdg_loops[0] = loop_d_d__As_f_f +ufunc_cotdg_loops[1] = loop_d_d__As_d_d +ufunc_cotdg_types[0] = NPY_FLOAT +ufunc_cotdg_types[1] = NPY_FLOAT +ufunc_cotdg_types[2] = NPY_DOUBLE +ufunc_cotdg_types[3] = NPY_DOUBLE +ufunc_cotdg_ptr[2*0] = _func_cephes_cotdg +ufunc_cotdg_ptr[2*0+1] = ("cotdg") +ufunc_cotdg_ptr[2*1] = _func_cephes_cotdg +ufunc_cotdg_ptr[2*1+1] = ("cotdg") +ufunc_cotdg_data[0] = &ufunc_cotdg_ptr[2*0] +ufunc_cotdg_data[1] = &ufunc_cotdg_ptr[2*1] +cotdg = np.PyUFunc_FromFuncAndData(ufunc_cotdg_loops, ufunc_cotdg_data, ufunc_cotdg_types, 2, 1, 1, 0, "cotdg", ufunc_cotdg_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_dawsn_loops[4] +cdef void *ufunc_dawsn_ptr[8] +cdef void *ufunc_dawsn_data[4] +cdef char ufunc_dawsn_types[8] +cdef char *ufunc_dawsn_doc = ( + "dawsn(x, out=None)\n" + "\n" + "Dawson's integral.\n" + "\n" + "Computes::\n" + "\n" + " exp(-x**2) * integral(exp(t**2), t=0..x).\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Function parameter.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "y : scalar or ndarray\n" + " Value of the integral.\n" + "\n" + "See Also\n" + "--------\n" + "wofz, erf, erfc, erfcx, erfi\n" + "\n" + "References\n" + "----------\n" + ".. [1] Steven G. Johnson, Faddeeva W function implementation.\n" + " http://ab-initio.mit.edu/Faddeeva\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> from scipy import special\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> x = np.linspace(-15, 15, num=1000)\n" + ">>> plt.plot(x, special.dawsn(x))\n" + ">>> plt.xlabel('$x$')\n" + ">>> plt.ylabel('$dawsn(x)$')\n" + ">>> plt.show()") +ufunc_dawsn_loops[0] = loop_d_d__As_f_f +ufunc_dawsn_loops[1] = loop_d_d__As_d_d +ufunc_dawsn_loops[2] = loop_D_D__As_F_F +ufunc_dawsn_loops[3] = loop_D_D__As_D_D +ufunc_dawsn_types[0] = NPY_FLOAT +ufunc_dawsn_types[1] = NPY_FLOAT +ufunc_dawsn_types[2] = NPY_DOUBLE +ufunc_dawsn_types[3] = NPY_DOUBLE +ufunc_dawsn_types[4] = NPY_CFLOAT +ufunc_dawsn_types[5] = NPY_CFLOAT +ufunc_dawsn_types[6] = NPY_CDOUBLE +ufunc_dawsn_types[7] = NPY_CDOUBLE +ufunc_dawsn_ptr[2*0] = scipy.special._ufuncs_cxx._export_faddeeva_dawsn +ufunc_dawsn_ptr[2*0+1] = ("dawsn") +ufunc_dawsn_ptr[2*1] = scipy.special._ufuncs_cxx._export_faddeeva_dawsn +ufunc_dawsn_ptr[2*1+1] = ("dawsn") +ufunc_dawsn_ptr[2*2] = scipy.special._ufuncs_cxx._export_faddeeva_dawsn_complex +ufunc_dawsn_ptr[2*2+1] = ("dawsn") +ufunc_dawsn_ptr[2*3] = scipy.special._ufuncs_cxx._export_faddeeva_dawsn_complex +ufunc_dawsn_ptr[2*3+1] = ("dawsn") +ufunc_dawsn_data[0] = &ufunc_dawsn_ptr[2*0] +ufunc_dawsn_data[1] = &ufunc_dawsn_ptr[2*1] +ufunc_dawsn_data[2] = &ufunc_dawsn_ptr[2*2] +ufunc_dawsn_data[3] = &ufunc_dawsn_ptr[2*3] +dawsn = np.PyUFunc_FromFuncAndData(ufunc_dawsn_loops, ufunc_dawsn_data, ufunc_dawsn_types, 4, 1, 1, 0, "dawsn", ufunc_dawsn_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_ellipe_loops[2] +cdef void *ufunc_ellipe_ptr[4] +cdef void *ufunc_ellipe_data[2] +cdef char ufunc_ellipe_types[4] +cdef char *ufunc_ellipe_doc = ( + "ellipe(m, out=None)\n" + "\n" + "Complete elliptic integral of the second kind\n" + "\n" + "This function is defined as\n" + "\n" + ".. math:: E(m) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{1/2} dt\n" + "\n" + "Parameters\n" + "----------\n" + "m : array_like\n" + " Defines the parameter of the elliptic integral.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "E : scalar or ndarray\n" + " Value of the elliptic integral.\n" + "\n" + "See Also\n" + "--------\n" + "ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1\n" + "ellipk : Complete elliptic integral of the first kind\n" + "ellipkinc : Incomplete elliptic integral of the first kind\n" + "ellipeinc : Incomplete elliptic integral of the second kind\n" + "elliprd : Symmetric elliptic integral of the second kind.\n" + "elliprg : Completely-symmetric elliptic integral of the second kind.\n" + "\n" + "Notes\n" + "-----\n" + "Wrapper for the Cephes [1]_ routine `ellpe`.\n" + "\n" + "For `m > 0` the computation uses the approximation,\n" + "\n" + ".. math:: E(m) \\approx P(1-m) - (1-m) \\log(1-m) Q(1-m),\n" + "\n" + "where :math:`P` and :math:`Q` are tenth-order polynomials. For\n" + "`m < 0`, the relation\n" + "\n" + ".. math:: E(m) = E(m/(m - 1)) \\sqrt(1-m)\n" + "\n" + "is used.\n" + "\n" + "The parameterization in terms of :math:`m` follows that of section\n" + "17.2 in [2]_. Other parameterizations in terms of the\n" + "complementary parameter :math:`1 - m`, modular angle\n" + ":math:`\\sin^2(\\alpha) = m`, or modulus :math:`k^2 = m` are also\n" + "used, so be careful that you choose the correct parameter.\n" + "\n" + "The Legendre E integral is related to Carlson's symmetric R_D or R_G\n" + "functions in multiple ways [3]_. For example,\n" + "\n" + ".. math:: E(m) = 2 R_G(0, 1-k^2, 1) .\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + ".. [2] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.\n" + ".. [3] NIST Digital Library of Mathematical\n" + " Functions. http://dlmf.nist.gov/, Release 1.0.28 of\n" + " 2020-09-15. See Sec. 19.25(i) https://dlmf.nist.gov/19.25#i\n" + "\n" + "Examples\n" + "--------\n" + "This function is used in finding the circumference of an\n" + "ellipse with semi-major axis `a` and semi-minor axis `b`.\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy import special\n" + "\n" + ">>> a = 3.5\n" + ">>> b = 2.1\n" + ">>> e_sq = 1.0 - b**2/a**2 # eccentricity squared\n" + "\n" + "Then the circumference is found using the following:\n" + "\n" + ">>> C = 4*a*special.ellipe(e_sq) # circumference formula\n" + ">>> C\n" + "17.868899204378693\n" + "\n" + "When `a` and `b` are the same (meaning eccentricity is 0),\n" + "this reduces to the circumference of a circle.\n" + "\n" + ">>> 4*a*special.ellipe(0.0) # formula for ellipse with a = b\n" + "21.991148575128552\n" + ">>> 2*np.pi*a # formula for circle of radius a\n" + "21.991148575128552") +ufunc_ellipe_loops[0] = loop_d_d__As_f_f +ufunc_ellipe_loops[1] = loop_d_d__As_d_d +ufunc_ellipe_types[0] = NPY_FLOAT +ufunc_ellipe_types[1] = NPY_FLOAT +ufunc_ellipe_types[2] = NPY_DOUBLE +ufunc_ellipe_types[3] = NPY_DOUBLE +ufunc_ellipe_ptr[2*0] = _func_cephes_ellpe +ufunc_ellipe_ptr[2*0+1] = ("ellipe") +ufunc_ellipe_ptr[2*1] = _func_cephes_ellpe +ufunc_ellipe_ptr[2*1+1] = ("ellipe") +ufunc_ellipe_data[0] = &ufunc_ellipe_ptr[2*0] +ufunc_ellipe_data[1] = &ufunc_ellipe_ptr[2*1] +ellipe = np.PyUFunc_FromFuncAndData(ufunc_ellipe_loops, ufunc_ellipe_data, ufunc_ellipe_types, 2, 1, 1, 0, "ellipe", ufunc_ellipe_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_ellipeinc_loops[2] +cdef void *ufunc_ellipeinc_ptr[4] +cdef void *ufunc_ellipeinc_data[2] +cdef char ufunc_ellipeinc_types[6] +cdef char *ufunc_ellipeinc_doc = ( + "ellipeinc(phi, m, out=None)\n" + "\n" + "Incomplete elliptic integral of the second kind\n" + "\n" + "This function is defined as\n" + "\n" + ".. math:: E(\\phi, m) = \\int_0^{\\phi} [1 - m \\sin(t)^2]^{1/2} dt\n" + "\n" + "Parameters\n" + "----------\n" + "phi : array_like\n" + " amplitude of the elliptic integral.\n" + "m : array_like\n" + " parameter of the elliptic integral.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "E : scalar or ndarray\n" + " Value of the elliptic integral.\n" + "\n" + "See Also\n" + "--------\n" + "ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1\n" + "ellipk : Complete elliptic integral of the first kind\n" + "ellipkinc : Incomplete elliptic integral of the first kind\n" + "ellipe : Complete elliptic integral of the second kind\n" + "elliprd : Symmetric elliptic integral of the second kind.\n" + "elliprf : Completely-symmetric elliptic integral of the first kind.\n" + "elliprg : Completely-symmetric elliptic integral of the second kind.\n" + "\n" + "Notes\n" + "-----\n" + "Wrapper for the Cephes [1]_ routine `ellie`.\n" + "\n" + "Computation uses arithmetic-geometric means algorithm.\n" + "\n" + "The parameterization in terms of :math:`m` follows that of section\n" + "17.2 in [2]_. Other parameterizations in terms of the\n" + "complementary parameter :math:`1 - m`, modular angle\n" + ":math:`\\sin^2(\\alpha) = m`, or modulus :math:`k^2 = m` are also\n" + "used, so be careful that you choose the correct parameter.\n" + "\n" + "The Legendre E incomplete integral can be related to combinations\n" + "of Carlson's symmetric integrals R_D, R_F, and R_G in multiple\n" + "ways [3]_. For example, with :math:`c = \\csc^2\\phi`,\n" + "\n" + ".. math::\n" + " E(\\phi, m) = R_F(c-1, c-k^2, c)\n" + " - \\frac{1}{3} k^2 R_D(c-1, c-k^2, c) .\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + ".. [2] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.\n" + ".. [3] NIST Digital Library of Mathematical\n" + " Functions. http://dlmf.nist.gov/, Release 1.0.28 of\n" + " 2020-09-15. See Sec. 19.25(i) https://dlmf.nist.gov/19.25#i") +ufunc_ellipeinc_loops[0] = loop_d_dd__As_ff_f +ufunc_ellipeinc_loops[1] = loop_d_dd__As_dd_d +ufunc_ellipeinc_types[0] = NPY_FLOAT +ufunc_ellipeinc_types[1] = NPY_FLOAT +ufunc_ellipeinc_types[2] = NPY_FLOAT +ufunc_ellipeinc_types[3] = NPY_DOUBLE +ufunc_ellipeinc_types[4] = NPY_DOUBLE +ufunc_ellipeinc_types[5] = NPY_DOUBLE +ufunc_ellipeinc_ptr[2*0] = _func_cephes_ellie +ufunc_ellipeinc_ptr[2*0+1] = ("ellipeinc") +ufunc_ellipeinc_ptr[2*1] = _func_cephes_ellie +ufunc_ellipeinc_ptr[2*1+1] = ("ellipeinc") +ufunc_ellipeinc_data[0] = &ufunc_ellipeinc_ptr[2*0] +ufunc_ellipeinc_data[1] = &ufunc_ellipeinc_ptr[2*1] +ellipeinc = np.PyUFunc_FromFuncAndData(ufunc_ellipeinc_loops, ufunc_ellipeinc_data, ufunc_ellipeinc_types, 2, 2, 1, 0, "ellipeinc", ufunc_ellipeinc_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_ellipj_loops[2] +cdef void *ufunc_ellipj_ptr[4] +cdef void *ufunc_ellipj_data[2] +cdef char ufunc_ellipj_types[12] +cdef char *ufunc_ellipj_doc = ( + "ellipj(u, m, out=None)\n" + "\n" + "Jacobian elliptic functions\n" + "\n" + "Calculates the Jacobian elliptic functions of parameter `m` between\n" + "0 and 1, and real argument `u`.\n" + "\n" + "Parameters\n" + "----------\n" + "m : array_like\n" + " Parameter.\n" + "u : array_like\n" + " Argument.\n" + "out : tuple of ndarray, optional\n" + " Optional output arrays for the function values\n" + "\n" + "Returns\n" + "-------\n" + "sn, cn, dn, ph : 4-tuple of scalar or ndarray\n" + " The returned functions::\n" + "\n" + " sn(u|m), cn(u|m), dn(u|m)\n" + "\n" + " The value `ph` is such that if `u = ellipkinc(ph, m)`,\n" + " then `sn(u|m) = sin(ph)` and `cn(u|m) = cos(ph)`.\n" + "\n" + "See Also\n" + "--------\n" + "ellipk : Complete elliptic integral of the first kind\n" + "ellipkinc : Incomplete elliptic integral of the first kind\n" + "\n" + "Notes\n" + "-----\n" + "Wrapper for the Cephes [1]_ routine `ellpj`.\n" + "\n" + "These functions are periodic, with quarter-period on the real axis\n" + "equal to the complete elliptic integral `ellipk(m)`.\n" + "\n" + "Relation to incomplete elliptic integral: If `u = ellipkinc(phi,m)`, then\n" + "`sn(u|m) = sin(phi)`, and `cn(u|m) = cos(phi)`. The `phi` is called\n" + "the amplitude of `u`.\n" + "\n" + "Computation is by means of the arithmetic-geometric mean algorithm,\n" + "except when `m` is within 1e-9 of 0 or 1. In the latter case with `m`\n" + "close to 1, the approximation applies only for `phi < pi/2`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/") +ufunc_ellipj_loops[0] = loop_i_dd_dddd_As_ff_ffff +ufunc_ellipj_loops[1] = loop_i_dd_dddd_As_dd_dddd +ufunc_ellipj_types[0] = NPY_FLOAT +ufunc_ellipj_types[1] = NPY_FLOAT +ufunc_ellipj_types[2] = NPY_FLOAT +ufunc_ellipj_types[3] = NPY_FLOAT +ufunc_ellipj_types[4] = NPY_FLOAT +ufunc_ellipj_types[5] = NPY_FLOAT +ufunc_ellipj_types[6] = NPY_DOUBLE +ufunc_ellipj_types[7] = NPY_DOUBLE +ufunc_ellipj_types[8] = NPY_DOUBLE +ufunc_ellipj_types[9] = NPY_DOUBLE +ufunc_ellipj_types[10] = NPY_DOUBLE +ufunc_ellipj_types[11] = NPY_DOUBLE +ufunc_ellipj_ptr[2*0] = _func_cephes_ellpj_wrap +ufunc_ellipj_ptr[2*0+1] = ("ellipj") +ufunc_ellipj_ptr[2*1] = _func_cephes_ellpj_wrap +ufunc_ellipj_ptr[2*1+1] = ("ellipj") +ufunc_ellipj_data[0] = &ufunc_ellipj_ptr[2*0] +ufunc_ellipj_data[1] = &ufunc_ellipj_ptr[2*1] +ellipj = np.PyUFunc_FromFuncAndData(ufunc_ellipj_loops, ufunc_ellipj_data, ufunc_ellipj_types, 2, 2, 4, 0, "ellipj", ufunc_ellipj_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_ellipk_loops[2] +cdef void *ufunc_ellipk_ptr[4] +cdef void *ufunc_ellipk_data[2] +cdef char ufunc_ellipk_types[4] +cdef char *ufunc_ellipk_doc = ( + "ellipk(m, out=None)\n" + "\n" + "Complete elliptic integral of the first kind.\n" + "\n" + "This function is defined as\n" + "\n" + ".. math:: K(m) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{-1/2} dt\n" + "\n" + "Parameters\n" + "----------\n" + "m : array_like\n" + " The parameter of the elliptic integral.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "K : scalar or ndarray\n" + " Value of the elliptic integral.\n" + "\n" + "See Also\n" + "--------\n" + "ellipkm1 : Complete elliptic integral of the first kind around m = 1\n" + "ellipkinc : Incomplete elliptic integral of the first kind\n" + "ellipe : Complete elliptic integral of the second kind\n" + "ellipeinc : Incomplete elliptic integral of the second kind\n" + "elliprf : Completely-symmetric elliptic integral of the first kind.\n" + "\n" + "Notes\n" + "-----\n" + "For more precision around point m = 1, use `ellipkm1`, which this\n" + "function calls.\n" + "\n" + "The parameterization in terms of :math:`m` follows that of section\n" + "17.2 in [1]_. Other parameterizations in terms of the\n" + "complementary parameter :math:`1 - m`, modular angle\n" + ":math:`\\sin^2(\\alpha) = m`, or modulus :math:`k^2 = m` are also\n" + "used, so be careful that you choose the correct parameter.\n" + "\n" + "The Legendre K integral is related to Carlson's symmetric R_F\n" + "function by [2]_:\n" + "\n" + ".. math:: K(m) = R_F(0, 1-k^2, 1) .\n" + "\n" + "References\n" + "----------\n" + ".. [1] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.\n" + ".. [2] NIST Digital Library of Mathematical\n" + " Functions. http://dlmf.nist.gov/, Release 1.0.28 of\n" + " 2020-09-15. See Sec. 19.25(i) https://dlmf.nist.gov/19.25#i") +ufunc_ellipk_loops[0] = loop_d_d__As_f_f +ufunc_ellipk_loops[1] = loop_d_d__As_d_d +ufunc_ellipk_types[0] = NPY_FLOAT +ufunc_ellipk_types[1] = NPY_FLOAT +ufunc_ellipk_types[2] = NPY_DOUBLE +ufunc_ellipk_types[3] = NPY_DOUBLE +ufunc_ellipk_ptr[2*0] = _func_special_ellipk +ufunc_ellipk_ptr[2*0+1] = ("ellipk") +ufunc_ellipk_ptr[2*1] = _func_special_ellipk +ufunc_ellipk_ptr[2*1+1] = ("ellipk") +ufunc_ellipk_data[0] = &ufunc_ellipk_ptr[2*0] +ufunc_ellipk_data[1] = &ufunc_ellipk_ptr[2*1] +ellipk = np.PyUFunc_FromFuncAndData(ufunc_ellipk_loops, ufunc_ellipk_data, ufunc_ellipk_types, 2, 1, 1, 0, "ellipk", ufunc_ellipk_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_ellipkinc_loops[2] +cdef void *ufunc_ellipkinc_ptr[4] +cdef void *ufunc_ellipkinc_data[2] +cdef char ufunc_ellipkinc_types[6] +cdef char *ufunc_ellipkinc_doc = ( + "ellipkinc(phi, m, out=None)\n" + "\n" + "Incomplete elliptic integral of the first kind\n" + "\n" + "This function is defined as\n" + "\n" + ".. math:: K(\\phi, m) = \\int_0^{\\phi} [1 - m \\sin(t)^2]^{-1/2} dt\n" + "\n" + "This function is also called :math:`F(\\phi, m)`.\n" + "\n" + "Parameters\n" + "----------\n" + "phi : array_like\n" + " amplitude of the elliptic integral\n" + "m : array_like\n" + " parameter of the elliptic integral\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "K : scalar or ndarray\n" + " Value of the elliptic integral\n" + "\n" + "See Also\n" + "--------\n" + "ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1\n" + "ellipk : Complete elliptic integral of the first kind\n" + "ellipe : Complete elliptic integral of the second kind\n" + "ellipeinc : Incomplete elliptic integral of the second kind\n" + "elliprf : Completely-symmetric elliptic integral of the first kind.\n" + "\n" + "Notes\n" + "-----\n" + "Wrapper for the Cephes [1]_ routine `ellik`. The computation is\n" + "carried out using the arithmetic-geometric mean algorithm.\n" + "\n" + "The parameterization in terms of :math:`m` follows that of section\n" + "17.2 in [2]_. Other parameterizations in terms of the\n" + "complementary parameter :math:`1 - m`, modular angle\n" + ":math:`\\sin^2(\\alpha) = m`, or modulus :math:`k^2 = m` are also\n" + "used, so be careful that you choose the correct parameter.\n" + "\n" + "The Legendre K incomplete integral (or F integral) is related to\n" + "Carlson's symmetric R_F function [3]_.\n" + "Setting :math:`c = \\csc^2\\phi`,\n" + "\n" + ".. math:: F(\\phi, m) = R_F(c-1, c-k^2, c) .\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + ".. [2] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.\n" + ".. [3] NIST Digital Library of Mathematical\n" + " Functions. http://dlmf.nist.gov/, Release 1.0.28 of\n" + " 2020-09-15. See Sec. 19.25(i) https://dlmf.nist.gov/19.25#i") +ufunc_ellipkinc_loops[0] = loop_d_dd__As_ff_f +ufunc_ellipkinc_loops[1] = loop_d_dd__As_dd_d +ufunc_ellipkinc_types[0] = NPY_FLOAT +ufunc_ellipkinc_types[1] = NPY_FLOAT +ufunc_ellipkinc_types[2] = NPY_FLOAT +ufunc_ellipkinc_types[3] = NPY_DOUBLE +ufunc_ellipkinc_types[4] = NPY_DOUBLE +ufunc_ellipkinc_types[5] = NPY_DOUBLE +ufunc_ellipkinc_ptr[2*0] = _func_cephes_ellik +ufunc_ellipkinc_ptr[2*0+1] = ("ellipkinc") +ufunc_ellipkinc_ptr[2*1] = _func_cephes_ellik +ufunc_ellipkinc_ptr[2*1+1] = ("ellipkinc") +ufunc_ellipkinc_data[0] = &ufunc_ellipkinc_ptr[2*0] +ufunc_ellipkinc_data[1] = &ufunc_ellipkinc_ptr[2*1] +ellipkinc = np.PyUFunc_FromFuncAndData(ufunc_ellipkinc_loops, ufunc_ellipkinc_data, ufunc_ellipkinc_types, 2, 2, 1, 0, "ellipkinc", ufunc_ellipkinc_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_ellipkm1_loops[2] +cdef void *ufunc_ellipkm1_ptr[4] +cdef void *ufunc_ellipkm1_data[2] +cdef char ufunc_ellipkm1_types[4] +cdef char *ufunc_ellipkm1_doc = ( + "ellipkm1(p, out=None)\n" + "\n" + "Complete elliptic integral of the first kind around `m` = 1\n" + "\n" + "This function is defined as\n" + "\n" + ".. math:: K(p) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{-1/2} dt\n" + "\n" + "where `m = 1 - p`.\n" + "\n" + "Parameters\n" + "----------\n" + "p : array_like\n" + " Defines the parameter of the elliptic integral as `m = 1 - p`.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "K : scalar or ndarray\n" + " Value of the elliptic integral.\n" + "\n" + "See Also\n" + "--------\n" + "ellipk : Complete elliptic integral of the first kind\n" + "ellipkinc : Incomplete elliptic integral of the first kind\n" + "ellipe : Complete elliptic integral of the second kind\n" + "ellipeinc : Incomplete elliptic integral of the second kind\n" + "elliprf : Completely-symmetric elliptic integral of the first kind.\n" + "\n" + "Notes\n" + "-----\n" + "Wrapper for the Cephes [1]_ routine `ellpk`.\n" + "\n" + "For `p <= 1`, computation uses the approximation,\n" + "\n" + ".. math:: K(p) \\approx P(p) - \\log(p) Q(p),\n" + "\n" + "where :math:`P` and :math:`Q` are tenth-order polynomials. The\n" + "argument `p` is used internally rather than `m` so that the logarithmic\n" + "singularity at `m = 1` will be shifted to the origin; this preserves\n" + "maximum accuracy. For `p > 1`, the identity\n" + "\n" + ".. math:: K(p) = K(1/p)/\\sqrt(p)\n" + "\n" + "is used.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/") +ufunc_ellipkm1_loops[0] = loop_d_d__As_f_f +ufunc_ellipkm1_loops[1] = loop_d_d__As_d_d +ufunc_ellipkm1_types[0] = NPY_FLOAT +ufunc_ellipkm1_types[1] = NPY_FLOAT +ufunc_ellipkm1_types[2] = NPY_DOUBLE +ufunc_ellipkm1_types[3] = NPY_DOUBLE +ufunc_ellipkm1_ptr[2*0] = _func_cephes_ellpk +ufunc_ellipkm1_ptr[2*0+1] = ("ellipkm1") +ufunc_ellipkm1_ptr[2*1] = _func_cephes_ellpk +ufunc_ellipkm1_ptr[2*1+1] = ("ellipkm1") +ufunc_ellipkm1_data[0] = &ufunc_ellipkm1_ptr[2*0] +ufunc_ellipkm1_data[1] = &ufunc_ellipkm1_ptr[2*1] +ellipkm1 = np.PyUFunc_FromFuncAndData(ufunc_ellipkm1_loops, ufunc_ellipkm1_data, ufunc_ellipkm1_types, 2, 1, 1, 0, "ellipkm1", ufunc_ellipkm1_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_elliprc_loops[4] +cdef void *ufunc_elliprc_ptr[8] +cdef void *ufunc_elliprc_data[4] +cdef char ufunc_elliprc_types[12] +cdef char *ufunc_elliprc_doc = ( + "elliprc(x, y, out=None)\n" + "\n" + "Degenerate symmetric elliptic integral.\n" + "\n" + "The function RC is defined as [1]_\n" + "\n" + ".. math::\n" + "\n" + " R_{\\mathrm{C}}(x, y) =\n" + " \\frac{1}{2} \\int_0^{+\\infty} (t + x)^{-1/2} (t + y)^{-1} dt\n" + " = R_{\\mathrm{F}}(x, y, y)\n" + "\n" + "Parameters\n" + "----------\n" + "x, y : array_like\n" + " Real or complex input parameters. `x` can be any number in the\n" + " complex plane cut along the negative real axis. `y` must be non-zero.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "R : scalar or ndarray\n" + " Value of the integral. If `y` is real and negative, the Cauchy\n" + " principal value is returned. If both of `x` and `y` are real, the\n" + " return value is real. Otherwise, the return value is complex.\n" + "\n" + "See Also\n" + "--------\n" + "elliprf : Completely-symmetric elliptic integral of the first kind.\n" + "elliprd : Symmetric elliptic integral of the second kind.\n" + "elliprg : Completely-symmetric elliptic integral of the second kind.\n" + "elliprj : Symmetric elliptic integral of the third kind.\n" + "\n" + "Notes\n" + "-----\n" + "RC is a degenerate case of the symmetric integral RF: ``elliprc(x, y) ==\n" + "elliprf(x, y, y)``. It is an elementary function rather than an elliptic\n" + "integral.\n" + "\n" + "The code implements Carlson's algorithm based on the duplication theorems\n" + "and series expansion up to the 7th order. [2]_\n" + "\n" + ".. versionadded:: 1.8.0\n" + "\n" + "References\n" + "----------\n" + ".. [1] B. C. Carlson, ed., Chapter 19 in \"Digital Library of Mathematical\n" + " Functions,\" NIST, US Dept. of Commerce.\n" + " https://dlmf.nist.gov/19.16.E6\n" + ".. [2] B. C. Carlson, \"Numerical computation of real or complex elliptic\n" + " integrals,\" Numer. Algorithm, vol. 10, no. 1, pp. 13-26, 1995.\n" + " https://arxiv.org/abs/math/9409227\n" + " https://doi.org/10.1007/BF02198293\n" + "\n" + "Examples\n" + "--------\n" + "Basic homogeneity property:\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import elliprc\n" + "\n" + ">>> x = 1.2 + 3.4j\n" + ">>> y = 5.\n" + ">>> scale = 0.3 + 0.4j\n" + ">>> elliprc(scale*x, scale*y)\n" + "(0.5484493976710874-0.4169557678995833j)\n" + "\n" + ">>> elliprc(x, y)/np.sqrt(scale)\n" + "(0.5484493976710874-0.41695576789958333j)\n" + "\n" + "When the two arguments coincide, the integral is particularly\n" + "simple:\n" + "\n" + ">>> x = 1.2 + 3.4j\n" + ">>> elliprc(x, x)\n" + "(0.4299173120614631-0.3041729818745595j)\n" + "\n" + ">>> 1/np.sqrt(x)\n" + "(0.4299173120614631-0.30417298187455954j)\n" + "\n" + "Another simple case: the first argument vanishes:\n" + "\n" + ">>> y = 1.2 + 3.4j\n" + ">>> elliprc(0, y)\n" + "(0.6753125346116815-0.47779380263880866j)\n" + "\n" + ">>> np.pi/2/np.sqrt(y)\n" + "(0.6753125346116815-0.4777938026388088j)\n" + "\n" + "When `x` and `y` are both positive, we can express\n" + ":math:`R_C(x,y)` in terms of more elementary functions. For the\n" + "case :math:`0 \\le x < y`,\n" + "\n" + ">>> x = 3.2\n" + ">>> y = 6.\n" + ">>> elliprc(x, y)\n" + "0.44942991498453444\n" + "\n" + ">>> np.arctan(np.sqrt((y-x)/x))/np.sqrt(y-x)\n" + "0.44942991498453433\n" + "\n" + "And for the case :math:`0 \\le y < x`,\n" + "\n" + ">>> x = 6.\n" + ">>> y = 3.2\n" + ">>> elliprc(x,y)\n" + "0.4989837501576147\n" + "\n" + ">>> np.log((np.sqrt(x)+np.sqrt(x-y))/np.sqrt(y))/np.sqrt(x-y)\n" + "0.49898375015761476") +ufunc_elliprc_loops[0] = loop_d_dd__As_ff_f +ufunc_elliprc_loops[1] = loop_d_dd__As_dd_d +ufunc_elliprc_loops[2] = loop_D_DD__As_FF_F +ufunc_elliprc_loops[3] = loop_D_DD__As_DD_D +ufunc_elliprc_types[0] = NPY_FLOAT +ufunc_elliprc_types[1] = NPY_FLOAT +ufunc_elliprc_types[2] = NPY_FLOAT +ufunc_elliprc_types[3] = NPY_DOUBLE +ufunc_elliprc_types[4] = NPY_DOUBLE +ufunc_elliprc_types[5] = NPY_DOUBLE +ufunc_elliprc_types[6] = NPY_CFLOAT +ufunc_elliprc_types[7] = NPY_CFLOAT +ufunc_elliprc_types[8] = NPY_CFLOAT +ufunc_elliprc_types[9] = NPY_CDOUBLE +ufunc_elliprc_types[10] = NPY_CDOUBLE +ufunc_elliprc_types[11] = NPY_CDOUBLE +ufunc_elliprc_ptr[2*0] = scipy.special._ufuncs_cxx._export_fellint_RC +ufunc_elliprc_ptr[2*0+1] = ("elliprc") +ufunc_elliprc_ptr[2*1] = scipy.special._ufuncs_cxx._export_fellint_RC +ufunc_elliprc_ptr[2*1+1] = ("elliprc") +ufunc_elliprc_ptr[2*2] = scipy.special._ufuncs_cxx._export_cellint_RC +ufunc_elliprc_ptr[2*2+1] = ("elliprc") +ufunc_elliprc_ptr[2*3] = scipy.special._ufuncs_cxx._export_cellint_RC +ufunc_elliprc_ptr[2*3+1] = ("elliprc") +ufunc_elliprc_data[0] = &ufunc_elliprc_ptr[2*0] +ufunc_elliprc_data[1] = &ufunc_elliprc_ptr[2*1] +ufunc_elliprc_data[2] = &ufunc_elliprc_ptr[2*2] +ufunc_elliprc_data[3] = &ufunc_elliprc_ptr[2*3] +elliprc = np.PyUFunc_FromFuncAndData(ufunc_elliprc_loops, ufunc_elliprc_data, ufunc_elliprc_types, 4, 2, 1, 0, "elliprc", ufunc_elliprc_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_elliprd_loops[4] +cdef void *ufunc_elliprd_ptr[8] +cdef void *ufunc_elliprd_data[4] +cdef char ufunc_elliprd_types[16] +cdef char *ufunc_elliprd_doc = ( + "elliprd(x, y, z, out=None)\n" + "\n" + "Symmetric elliptic integral of the second kind.\n" + "\n" + "The function RD is defined as [1]_\n" + "\n" + ".. math::\n" + "\n" + " R_{\\mathrm{D}}(x, y, z) =\n" + " \\frac{3}{2} \\int_0^{+\\infty} [(t + x) (t + y)]^{-1/2} (t + z)^{-3/2}\n" + " dt\n" + "\n" + "Parameters\n" + "----------\n" + "x, y, z : array_like\n" + " Real or complex input parameters. `x` or `y` can be any number in the\n" + " complex plane cut along the negative real axis, but at most one of them\n" + " can be zero, while `z` must be non-zero.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "R : scalar or ndarray\n" + " Value of the integral. If all of `x`, `y`, and `z` are real, the\n" + " return value is real. Otherwise, the return value is complex.\n" + "\n" + "See Also\n" + "--------\n" + "elliprc : Degenerate symmetric elliptic integral.\n" + "elliprf : Completely-symmetric elliptic integral of the first kind.\n" + "elliprg : Completely-symmetric elliptic integral of the second kind.\n" + "elliprj : Symmetric elliptic integral of the third kind.\n" + "\n" + "Notes\n" + "-----\n" + "RD is a degenerate case of the elliptic integral RJ: ``elliprd(x, y, z) ==\n" + "elliprj(x, y, z, z)``.\n" + "\n" + "The code implements Carlson's algorithm based on the duplication theorems\n" + "and series expansion up to the 7th order. [2]_\n" + "\n" + ".. versionadded:: 1.8.0\n" + "\n" + "References\n" + "----------\n" + ".. [1] B. C. Carlson, ed., Chapter 19 in \"Digital Library of Mathematical\n" + " Functions,\" NIST, US Dept. of Commerce.\n" + " https://dlmf.nist.gov/19.16.E5\n" + ".. [2] B. C. Carlson, \"Numerical computation of real or complex elliptic\n" + " integrals,\" Numer. Algorithm, vol. 10, no. 1, pp. 13-26, 1995.\n" + " https://arxiv.org/abs/math/9409227\n" + " https://doi.org/10.1007/BF02198293\n" + "\n" + "Examples\n" + "--------\n" + "Basic homogeneity property:\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import elliprd\n" + "\n" + ">>> x = 1.2 + 3.4j\n" + ">>> y = 5.\n" + ">>> z = 6.\n" + ">>> scale = 0.3 + 0.4j\n" + ">>> elliprd(scale*x, scale*y, scale*z)\n" + "(-0.03703043835680379-0.24500934665683802j)\n" + "\n" + ">>> elliprd(x, y, z)*np.power(scale, -1.5)\n" + "(-0.0370304383568038-0.24500934665683805j)\n" + "\n" + "All three arguments coincide:\n" + "\n" + ">>> x = 1.2 + 3.4j\n" + ">>> elliprd(x, x, x)\n" + "(-0.03986825876151896-0.14051741840449586j)\n" + "\n" + ">>> np.power(x, -1.5)\n" + "(-0.03986825876151894-0.14051741840449583j)\n" + "\n" + "The so-called \"second lemniscate constant\":\n" + "\n" + ">>> elliprd(0, 2, 1)/3\n" + "0.5990701173677961\n" + "\n" + ">>> from scipy.special import gamma\n" + ">>> gamma(0.75)**2/np.sqrt(2*np.pi)\n" + "0.5990701173677959") +ufunc_elliprd_loops[0] = loop_d_ddd__As_fff_f +ufunc_elliprd_loops[1] = loop_d_ddd__As_ddd_d +ufunc_elliprd_loops[2] = loop_D_DDD__As_FFF_F +ufunc_elliprd_loops[3] = loop_D_DDD__As_DDD_D +ufunc_elliprd_types[0] = NPY_FLOAT +ufunc_elliprd_types[1] = NPY_FLOAT +ufunc_elliprd_types[2] = NPY_FLOAT +ufunc_elliprd_types[3] = NPY_FLOAT +ufunc_elliprd_types[4] = NPY_DOUBLE +ufunc_elliprd_types[5] = NPY_DOUBLE +ufunc_elliprd_types[6] = NPY_DOUBLE +ufunc_elliprd_types[7] = NPY_DOUBLE +ufunc_elliprd_types[8] = NPY_CFLOAT +ufunc_elliprd_types[9] = NPY_CFLOAT +ufunc_elliprd_types[10] = NPY_CFLOAT +ufunc_elliprd_types[11] = NPY_CFLOAT +ufunc_elliprd_types[12] = NPY_CDOUBLE +ufunc_elliprd_types[13] = NPY_CDOUBLE +ufunc_elliprd_types[14] = NPY_CDOUBLE +ufunc_elliprd_types[15] = NPY_CDOUBLE +ufunc_elliprd_ptr[2*0] = scipy.special._ufuncs_cxx._export_fellint_RD +ufunc_elliprd_ptr[2*0+1] = ("elliprd") +ufunc_elliprd_ptr[2*1] = scipy.special._ufuncs_cxx._export_fellint_RD +ufunc_elliprd_ptr[2*1+1] = ("elliprd") +ufunc_elliprd_ptr[2*2] = scipy.special._ufuncs_cxx._export_cellint_RD +ufunc_elliprd_ptr[2*2+1] = ("elliprd") +ufunc_elliprd_ptr[2*3] = scipy.special._ufuncs_cxx._export_cellint_RD +ufunc_elliprd_ptr[2*3+1] = ("elliprd") +ufunc_elliprd_data[0] = &ufunc_elliprd_ptr[2*0] +ufunc_elliprd_data[1] = &ufunc_elliprd_ptr[2*1] +ufunc_elliprd_data[2] = &ufunc_elliprd_ptr[2*2] +ufunc_elliprd_data[3] = &ufunc_elliprd_ptr[2*3] +elliprd = np.PyUFunc_FromFuncAndData(ufunc_elliprd_loops, ufunc_elliprd_data, ufunc_elliprd_types, 4, 3, 1, 0, "elliprd", ufunc_elliprd_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_elliprf_loops[4] +cdef void *ufunc_elliprf_ptr[8] +cdef void *ufunc_elliprf_data[4] +cdef char ufunc_elliprf_types[16] +cdef char *ufunc_elliprf_doc = ( + "elliprf(x, y, z, out=None)\n" + "\n" + "Completely-symmetric elliptic integral of the first kind.\n" + "\n" + "The function RF is defined as [1]_\n" + "\n" + ".. math::\n" + "\n" + " R_{\\mathrm{F}}(x, y, z) =\n" + " \\frac{1}{2} \\int_0^{+\\infty} [(t + x) (t + y) (t + z)]^{-1/2} dt\n" + "\n" + "Parameters\n" + "----------\n" + "x, y, z : array_like\n" + " Real or complex input parameters. `x`, `y`, or `z` can be any number in\n" + " the complex plane cut along the negative real axis, but at most one of\n" + " them can be zero.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "R : scalar or ndarray\n" + " Value of the integral. If all of `x`, `y`, and `z` are real, the return\n" + " value is real. Otherwise, the return value is complex.\n" + "\n" + "See Also\n" + "--------\n" + "elliprc : Degenerate symmetric integral.\n" + "elliprd : Symmetric elliptic integral of the second kind.\n" + "elliprg : Completely-symmetric elliptic integral of the second kind.\n" + "elliprj : Symmetric elliptic integral of the third kind.\n" + "\n" + "Notes\n" + "-----\n" + "The code implements Carlson's algorithm based on the duplication theorems\n" + "and series expansion up to the 7th order (cf.:\n" + "https://dlmf.nist.gov/19.36.i) and the AGM algorithm for the complete\n" + "integral. [2]_\n" + "\n" + ".. versionadded:: 1.8.0\n" + "\n" + "References\n" + "----------\n" + ".. [1] B. C. Carlson, ed., Chapter 19 in \"Digital Library of Mathematical\n" + " Functions,\" NIST, US Dept. of Commerce.\n" + " https://dlmf.nist.gov/19.16.E1\n" + ".. [2] B. C. Carlson, \"Numerical computation of real or complex elliptic\n" + " integrals,\" Numer. Algorithm, vol. 10, no. 1, pp. 13-26, 1995.\n" + " https://arxiv.org/abs/math/9409227\n" + " https://doi.org/10.1007/BF02198293\n" + "\n" + "Examples\n" + "--------\n" + "Basic homogeneity property:\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import elliprf\n" + "\n" + ">>> x = 1.2 + 3.4j\n" + ">>> y = 5.\n" + ">>> z = 6.\n" + ">>> scale = 0.3 + 0.4j\n" + ">>> elliprf(scale*x, scale*y, scale*z)\n" + "(0.5328051227278146-0.4008623567957094j)\n" + "\n" + ">>> elliprf(x, y, z)/np.sqrt(scale)\n" + "(0.5328051227278147-0.4008623567957095j)\n" + "\n" + "All three arguments coincide:\n" + "\n" + ">>> x = 1.2 + 3.4j\n" + ">>> elliprf(x, x, x)\n" + "(0.42991731206146316-0.30417298187455954j)\n" + "\n" + ">>> 1/np.sqrt(x)\n" + "(0.4299173120614631-0.30417298187455954j)\n" + "\n" + "The so-called \"first lemniscate constant\":\n" + "\n" + ">>> elliprf(0, 1, 2)\n" + "1.3110287771460598\n" + "\n" + ">>> from scipy.special import gamma\n" + ">>> gamma(0.25)**2/(4*np.sqrt(2*np.pi))\n" + "1.3110287771460598") +ufunc_elliprf_loops[0] = loop_d_ddd__As_fff_f +ufunc_elliprf_loops[1] = loop_d_ddd__As_ddd_d +ufunc_elliprf_loops[2] = loop_D_DDD__As_FFF_F +ufunc_elliprf_loops[3] = loop_D_DDD__As_DDD_D +ufunc_elliprf_types[0] = NPY_FLOAT +ufunc_elliprf_types[1] = NPY_FLOAT +ufunc_elliprf_types[2] = NPY_FLOAT +ufunc_elliprf_types[3] = NPY_FLOAT +ufunc_elliprf_types[4] = NPY_DOUBLE +ufunc_elliprf_types[5] = NPY_DOUBLE +ufunc_elliprf_types[6] = NPY_DOUBLE +ufunc_elliprf_types[7] = NPY_DOUBLE +ufunc_elliprf_types[8] = NPY_CFLOAT +ufunc_elliprf_types[9] = NPY_CFLOAT +ufunc_elliprf_types[10] = NPY_CFLOAT +ufunc_elliprf_types[11] = NPY_CFLOAT +ufunc_elliprf_types[12] = NPY_CDOUBLE +ufunc_elliprf_types[13] = NPY_CDOUBLE +ufunc_elliprf_types[14] = NPY_CDOUBLE +ufunc_elliprf_types[15] = NPY_CDOUBLE +ufunc_elliprf_ptr[2*0] = scipy.special._ufuncs_cxx._export_fellint_RF +ufunc_elliprf_ptr[2*0+1] = ("elliprf") +ufunc_elliprf_ptr[2*1] = scipy.special._ufuncs_cxx._export_fellint_RF +ufunc_elliprf_ptr[2*1+1] = ("elliprf") +ufunc_elliprf_ptr[2*2] = scipy.special._ufuncs_cxx._export_cellint_RF +ufunc_elliprf_ptr[2*2+1] = ("elliprf") +ufunc_elliprf_ptr[2*3] = scipy.special._ufuncs_cxx._export_cellint_RF +ufunc_elliprf_ptr[2*3+1] = ("elliprf") +ufunc_elliprf_data[0] = &ufunc_elliprf_ptr[2*0] +ufunc_elliprf_data[1] = &ufunc_elliprf_ptr[2*1] +ufunc_elliprf_data[2] = &ufunc_elliprf_ptr[2*2] +ufunc_elliprf_data[3] = &ufunc_elliprf_ptr[2*3] +elliprf = np.PyUFunc_FromFuncAndData(ufunc_elliprf_loops, ufunc_elliprf_data, ufunc_elliprf_types, 4, 3, 1, 0, "elliprf", ufunc_elliprf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_elliprg_loops[4] +cdef void *ufunc_elliprg_ptr[8] +cdef void *ufunc_elliprg_data[4] +cdef char ufunc_elliprg_types[16] +cdef char *ufunc_elliprg_doc = ( + "elliprg(x, y, z, out=None)\n" + "\n" + "Completely-symmetric elliptic integral of the second kind.\n" + "\n" + "The function RG is defined as [1]_\n" + "\n" + ".. math::\n" + "\n" + " R_{\\mathrm{G}}(x, y, z) =\n" + " \\frac{1}{4} \\int_0^{+\\infty} [(t + x) (t + y) (t + z)]^{-1/2}\n" + " \\left(\\frac{x}{t + x} + \\frac{y}{t + y} + \\frac{z}{t + z}\\right) t\n" + " dt\n" + "\n" + "Parameters\n" + "----------\n" + "x, y, z : array_like\n" + " Real or complex input parameters. `x`, `y`, or `z` can be any number in\n" + " the complex plane cut along the negative real axis.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "R : scalar or ndarray\n" + " Value of the integral. If all of `x`, `y`, and `z` are real, the return\n" + " value is real. Otherwise, the return value is complex.\n" + "\n" + "See Also\n" + "--------\n" + "elliprc : Degenerate symmetric integral.\n" + "elliprd : Symmetric elliptic integral of the second kind.\n" + "elliprf : Completely-symmetric elliptic integral of the first kind.\n" + "elliprj : Symmetric elliptic integral of the third kind.\n" + "\n" + "Notes\n" + "-----\n" + "The implementation uses the relation [1]_\n" + "\n" + ".. math::\n" + "\n" + " 2 R_{\\mathrm{G}}(x, y, z) =\n" + " z R_{\\mathrm{F}}(x, y, z) -\n" + " \\frac{1}{3} (x - z) (y - z) R_{\\mathrm{D}}(x, y, z) +\n" + " \\sqrt{\\frac{x y}{z}}\n" + "\n" + "and the symmetry of `x`, `y`, `z` when at least one non-zero parameter can\n" + "be chosen as the pivot. When one of the arguments is close to zero, the AGM\n" + "method is applied instead. Other special cases are computed following Ref.\n" + "[2]_\n" + "\n" + ".. versionadded:: 1.8.0\n" + "\n" + "References\n" + "----------\n" + ".. [1] B. C. Carlson, \"Numerical computation of real or complex elliptic\n" + " integrals,\" Numer. Algorithm, vol. 10, no. 1, pp. 13-26, 1995.\n" + " https://arxiv.org/abs/math/9409227\n" + " https://doi.org/10.1007/BF02198293\n" + ".. [2] B. C. Carlson, ed., Chapter 19 in \"Digital Library of Mathematical\n" + " Functions,\" NIST, US Dept. of Commerce.\n" + " https://dlmf.nist.gov/19.16.E1\n" + " https://dlmf.nist.gov/19.20.ii\n" + "\n" + "Examples\n" + "--------\n" + "Basic homogeneity property:\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import elliprg\n" + "\n" + ">>> x = 1.2 + 3.4j\n" + ">>> y = 5.\n" + ">>> z = 6.\n" + ">>> scale = 0.3 + 0.4j\n" + ">>> elliprg(scale*x, scale*y, scale*z)\n" + "(1.195936862005246+0.8470988320464167j)\n" + "\n" + ">>> elliprg(x, y, z)*np.sqrt(scale)\n" + "(1.195936862005246+0.8470988320464165j)\n" + "\n" + "Simplifications:\n" + "\n" + ">>> elliprg(0, y, y)\n" + "1.756203682760182\n" + "\n" + ">>> 0.25*np.pi*np.sqrt(y)\n" + "1.7562036827601817\n" + "\n" + ">>> elliprg(0, 0, z)\n" + "1.224744871391589\n" + "\n" + ">>> 0.5*np.sqrt(z)\n" + "1.224744871391589\n" + "\n" + "The surface area of a triaxial ellipsoid with semiaxes ``a``, ``b``, and\n" + "``c`` is given by\n" + "\n" + ".. math::\n" + "\n" + " S = 4 \\pi a b c R_{\\mathrm{G}}(1 / a^2, 1 / b^2, 1 / c^2).\n" + "\n" + ">>> def ellipsoid_area(a, b, c):\n" + "... r = 4.0 * np.pi * a * b * c\n" + "... return r * elliprg(1.0 / (a * a), 1.0 / (b * b), 1.0 / (c * c))\n" + ">>> print(ellipsoid_area(1, 3, 5))\n" + "108.62688289491807") +ufunc_elliprg_loops[0] = loop_d_ddd__As_fff_f +ufunc_elliprg_loops[1] = loop_d_ddd__As_ddd_d +ufunc_elliprg_loops[2] = loop_D_DDD__As_FFF_F +ufunc_elliprg_loops[3] = loop_D_DDD__As_DDD_D +ufunc_elliprg_types[0] = NPY_FLOAT +ufunc_elliprg_types[1] = NPY_FLOAT +ufunc_elliprg_types[2] = NPY_FLOAT +ufunc_elliprg_types[3] = NPY_FLOAT +ufunc_elliprg_types[4] = NPY_DOUBLE +ufunc_elliprg_types[5] = NPY_DOUBLE +ufunc_elliprg_types[6] = NPY_DOUBLE +ufunc_elliprg_types[7] = NPY_DOUBLE +ufunc_elliprg_types[8] = NPY_CFLOAT +ufunc_elliprg_types[9] = NPY_CFLOAT +ufunc_elliprg_types[10] = NPY_CFLOAT +ufunc_elliprg_types[11] = NPY_CFLOAT +ufunc_elliprg_types[12] = NPY_CDOUBLE +ufunc_elliprg_types[13] = NPY_CDOUBLE +ufunc_elliprg_types[14] = NPY_CDOUBLE +ufunc_elliprg_types[15] = NPY_CDOUBLE +ufunc_elliprg_ptr[2*0] = scipy.special._ufuncs_cxx._export_fellint_RG +ufunc_elliprg_ptr[2*0+1] = ("elliprg") +ufunc_elliprg_ptr[2*1] = scipy.special._ufuncs_cxx._export_fellint_RG +ufunc_elliprg_ptr[2*1+1] = ("elliprg") +ufunc_elliprg_ptr[2*2] = scipy.special._ufuncs_cxx._export_cellint_RG +ufunc_elliprg_ptr[2*2+1] = ("elliprg") +ufunc_elliprg_ptr[2*3] = scipy.special._ufuncs_cxx._export_cellint_RG +ufunc_elliprg_ptr[2*3+1] = ("elliprg") +ufunc_elliprg_data[0] = &ufunc_elliprg_ptr[2*0] +ufunc_elliprg_data[1] = &ufunc_elliprg_ptr[2*1] +ufunc_elliprg_data[2] = &ufunc_elliprg_ptr[2*2] +ufunc_elliprg_data[3] = &ufunc_elliprg_ptr[2*3] +elliprg = np.PyUFunc_FromFuncAndData(ufunc_elliprg_loops, ufunc_elliprg_data, ufunc_elliprg_types, 4, 3, 1, 0, "elliprg", ufunc_elliprg_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_elliprj_loops[4] +cdef void *ufunc_elliprj_ptr[8] +cdef void *ufunc_elliprj_data[4] +cdef char ufunc_elliprj_types[20] +cdef char *ufunc_elliprj_doc = ( + "elliprj(x, y, z, p, out=None)\n" + "\n" + "Symmetric elliptic integral of the third kind.\n" + "\n" + "The function RJ is defined as [1]_\n" + "\n" + ".. math::\n" + "\n" + " R_{\\mathrm{J}}(x, y, z, p) =\n" + " \\frac{3}{2} \\int_0^{+\\infty} [(t + x) (t + y) (t + z)]^{-1/2}\n" + " (t + p)^{-1} dt\n" + "\n" + ".. warning::\n" + " This function should be considered experimental when the inputs are\n" + " unbalanced. Check correctness with another independent implementation.\n" + "\n" + "Parameters\n" + "----------\n" + "x, y, z, p : array_like\n" + " Real or complex input parameters. `x`, `y`, or `z` are numbers in\n" + " the complex plane cut along the negative real axis (subject to further\n" + " constraints, see Notes), and at most one of them can be zero. `p` must\n" + " be non-zero.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "R : scalar or ndarray\n" + " Value of the integral. If all of `x`, `y`, `z`, and `p` are real, the\n" + " return value is real. Otherwise, the return value is complex.\n" + "\n" + " If `p` is real and negative, while `x`, `y`, and `z` are real,\n" + " non-negative, and at most one of them is zero, the Cauchy principal\n" + " value is returned. [1]_ [2]_\n" + "\n" + "See Also\n" + "--------\n" + "elliprc : Degenerate symmetric integral.\n" + "elliprd : Symmetric elliptic integral of the second kind.\n" + "elliprf : Completely-symmetric elliptic integral of the first kind.\n" + "elliprg : Completely-symmetric elliptic integral of the second kind.\n" + "\n" + "Notes\n" + "-----\n" + "The code implements Carlson's algorithm based on the duplication theorems\n" + "and series expansion up to the 7th order. [3]_ The algorithm is slightly\n" + "different from its earlier incarnation as it appears in [1]_, in that the\n" + "call to `elliprc` (or ``atan``/``atanh``, see [4]_) is no longer needed in\n" + "the inner loop. Asymptotic approximations are used where arguments differ\n" + "widely in the order of magnitude. [5]_\n" + "\n" + "The input values are subject to certain sufficient but not necessary\n" + "constraints when input arguments are complex. Notably, ``x``, ``y``, and\n" + "``z`` must have non-negative real parts, unless two of them are\n" + "non-negative and complex-conjugates to each other while the other is a real\n" + "non-negative number. [1]_ If the inputs do not satisfy the sufficient\n" + "condition described in Ref. [1]_ they are rejected outright with the output\n" + "set to NaN.\n" + "\n" + "In the case where one of ``x``, ``y``, and ``z`` is equal to ``p``, the\n" + "function ``elliprd`` should be preferred because of its less restrictive\n" + "domain.\n" + "\n" + ".. versionadded:: 1.8.0\n" + "\n" + "References\n" + "----------\n" + ".. [1] B. C. Carlson, \"Numerical computation of real or complex elliptic\n" + " integrals,\" Numer. Algorithm, vol. 10, no. 1, pp. 13-26, 1995.\n" + " https://arxiv.org/abs/math/9409227\n" + " https://doi.org/10.1007/BF02198293\n" + ".. [2] B. C. Carlson, ed., Chapter 19 in \"Digital Library of Mathematical\n" + " Functions,\" NIST, US Dept. of Commerce.\n" + " https://dlmf.nist.gov/19.20.iii\n" + ".. [3] B. C. Carlson, J. FitzSimmons, \"Reduction Theorems for Elliptic\n" + " Integrands with the Square Root of Two Quadratic Factors,\" J.\n" + " Comput. Appl. Math., vol. 118, nos. 1-2, pp. 71-85, 2000.\n" + " https://doi.org/10.1016/S0377-0427(00)00282-X\n" + ".. [4] F. Johansson, \"Numerical Evaluation of Elliptic Functions, Elliptic\n" + " Integrals and Modular Forms,\" in J. Blumlein, C. Schneider, P.\n" + " Paule, eds., \"Elliptic Integrals, Elliptic Functions and Modular\n" + " Forms in Quantum Field Theory,\" pp. 269-293, 2019 (Cham,\n" + " Switzerland: Springer Nature Switzerland)\n" + " https://arxiv.org/abs/1806.06725\n" + " https://doi.org/10.1007/978-3-030-04480-0\n" + ".. [5] B. C. Carlson, J. L. Gustafson, \"Asymptotic Approximations for\n" + " Symmetric Elliptic Integrals,\" SIAM J. Math. Anls., vol. 25, no. 2,\n" + " pp. 288-303, 1994.\n" + " https://arxiv.org/abs/math/9310223\n" + " https://doi.org/10.1137/S0036141092228477\n" + "\n" + "Examples\n" + "--------\n" + "Basic homogeneity property:\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import elliprj\n" + "\n" + ">>> x = 1.2 + 3.4j\n" + ">>> y = 5.\n" + ">>> z = 6.\n" + ">>> p = 7.\n" + ">>> scale = 0.3 - 0.4j\n" + ">>> elliprj(scale*x, scale*y, scale*z, scale*p)\n" + "(0.10834905565679157+0.19694950747103812j)\n" + "\n" + ">>> elliprj(x, y, z, p)*np.power(scale, -1.5)\n" + "(0.10834905565679556+0.19694950747103854j)\n" + "\n" + "Reduction to simpler elliptic integral:\n" + "\n" + ">>> elliprj(x, y, z, z)\n" + "(0.08288462362195129-0.028376809745123258j)\n" + "\n" + ">>> from scipy.special import elliprd\n" + ">>> elliprd(x, y, z)\n" + "(0.08288462362195136-0.028376809745123296j)\n" + "\n" + "All arguments coincide:\n" + "\n" + ">>> elliprj(x, x, x, x)\n" + "(-0.03986825876151896-0.14051741840449586j)\n" + "\n" + ">>> np.power(x, -1.5)\n" + "(-0.03986825876151894-0.14051741840449583j)") +ufunc_elliprj_loops[0] = loop_d_dddd__As_ffff_f +ufunc_elliprj_loops[1] = loop_d_dddd__As_dddd_d +ufunc_elliprj_loops[2] = loop_D_DDDD__As_FFFF_F +ufunc_elliprj_loops[3] = loop_D_DDDD__As_DDDD_D +ufunc_elliprj_types[0] = NPY_FLOAT +ufunc_elliprj_types[1] = NPY_FLOAT +ufunc_elliprj_types[2] = NPY_FLOAT +ufunc_elliprj_types[3] = NPY_FLOAT +ufunc_elliprj_types[4] = NPY_FLOAT +ufunc_elliprj_types[5] = NPY_DOUBLE +ufunc_elliprj_types[6] = NPY_DOUBLE +ufunc_elliprj_types[7] = NPY_DOUBLE +ufunc_elliprj_types[8] = NPY_DOUBLE +ufunc_elliprj_types[9] = NPY_DOUBLE +ufunc_elliprj_types[10] = NPY_CFLOAT +ufunc_elliprj_types[11] = NPY_CFLOAT +ufunc_elliprj_types[12] = NPY_CFLOAT +ufunc_elliprj_types[13] = NPY_CFLOAT +ufunc_elliprj_types[14] = NPY_CFLOAT +ufunc_elliprj_types[15] = NPY_CDOUBLE +ufunc_elliprj_types[16] = NPY_CDOUBLE +ufunc_elliprj_types[17] = NPY_CDOUBLE +ufunc_elliprj_types[18] = NPY_CDOUBLE +ufunc_elliprj_types[19] = NPY_CDOUBLE +ufunc_elliprj_ptr[2*0] = scipy.special._ufuncs_cxx._export_fellint_RJ +ufunc_elliprj_ptr[2*0+1] = ("elliprj") +ufunc_elliprj_ptr[2*1] = scipy.special._ufuncs_cxx._export_fellint_RJ +ufunc_elliprj_ptr[2*1+1] = ("elliprj") +ufunc_elliprj_ptr[2*2] = scipy.special._ufuncs_cxx._export_cellint_RJ +ufunc_elliprj_ptr[2*2+1] = ("elliprj") +ufunc_elliprj_ptr[2*3] = scipy.special._ufuncs_cxx._export_cellint_RJ +ufunc_elliprj_ptr[2*3+1] = ("elliprj") +ufunc_elliprj_data[0] = &ufunc_elliprj_ptr[2*0] +ufunc_elliprj_data[1] = &ufunc_elliprj_ptr[2*1] +ufunc_elliprj_data[2] = &ufunc_elliprj_ptr[2*2] +ufunc_elliprj_data[3] = &ufunc_elliprj_ptr[2*3] +elliprj = np.PyUFunc_FromFuncAndData(ufunc_elliprj_loops, ufunc_elliprj_data, ufunc_elliprj_types, 4, 4, 1, 0, "elliprj", ufunc_elliprj_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_entr_loops[2] +cdef void *ufunc_entr_ptr[4] +cdef void *ufunc_entr_data[2] +cdef char ufunc_entr_types[4] +cdef char *ufunc_entr_doc = ( + "entr(x, out=None)\n" + "\n" + "Elementwise function for computing entropy.\n" + "\n" + ".. math:: \\text{entr}(x) = \\begin{cases} - x \\log(x) & x > 0 \\\\ 0 & x = 0\n" + " \\\\ -\\infty & \\text{otherwise} \\end{cases}\n" + "\n" + "Parameters\n" + "----------\n" + "x : ndarray\n" + " Input array.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "res : scalar or ndarray\n" + " The value of the elementwise entropy function at the given points `x`.\n" + "\n" + "See Also\n" + "--------\n" + "kl_div, rel_entr, scipy.stats.entropy\n" + "\n" + "Notes\n" + "-----\n" + ".. versionadded:: 0.15.0\n" + "\n" + "This function is concave.\n" + "\n" + "The origin of this function is in convex programming; see [1]_.\n" + "Given a probability distribution :math:`p_1, \\ldots, p_n`,\n" + "the definition of entropy in the context of *information theory* is\n" + "\n" + ".. math::\n" + "\n" + " \\sum_{i = 1}^n \\mathrm{entr}(p_i).\n" + "\n" + "To compute the latter quantity, use `scipy.stats.entropy`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Boyd, Stephen and Lieven Vandenberghe. *Convex optimization*.\n" + " Cambridge University Press, 2004.\n" + " :doi:`https://doi.org/10.1017/CBO9780511804441`") +ufunc_entr_loops[0] = loop_d_d__As_f_f +ufunc_entr_loops[1] = loop_d_d__As_d_d +ufunc_entr_types[0] = NPY_FLOAT +ufunc_entr_types[1] = NPY_FLOAT +ufunc_entr_types[2] = NPY_DOUBLE +ufunc_entr_types[3] = NPY_DOUBLE +ufunc_entr_ptr[2*0] = _func_entr +ufunc_entr_ptr[2*0+1] = ("entr") +ufunc_entr_ptr[2*1] = _func_entr +ufunc_entr_ptr[2*1+1] = ("entr") +ufunc_entr_data[0] = &ufunc_entr_ptr[2*0] +ufunc_entr_data[1] = &ufunc_entr_ptr[2*1] +entr = np.PyUFunc_FromFuncAndData(ufunc_entr_loops, ufunc_entr_data, ufunc_entr_types, 2, 1, 1, 0, "entr", ufunc_entr_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_erf_loops[4] +cdef void *ufunc_erf_ptr[8] +cdef void *ufunc_erf_data[4] +cdef char ufunc_erf_types[8] +cdef char *ufunc_erf_doc = ( + "erf(z, out=None)\n" + "\n" + "Returns the error function of complex argument.\n" + "\n" + "It is defined as ``2/sqrt(pi)*integral(exp(-t**2), t=0..z)``.\n" + "\n" + "Parameters\n" + "----------\n" + "x : ndarray\n" + " Input array.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "res : scalar or ndarray\n" + " The values of the error function at the given points `x`.\n" + "\n" + "See Also\n" + "--------\n" + "erfc, erfinv, erfcinv, wofz, erfcx, erfi\n" + "\n" + "Notes\n" + "-----\n" + "The cumulative of the unit normal distribution is given by\n" + "``Phi(z) = 1/2[1 + erf(z/sqrt(2))]``.\n" + "\n" + "References\n" + "----------\n" + ".. [1] https://en.wikipedia.org/wiki/Error_function\n" + ".. [2] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover,\n" + " 1972. http://www.math.sfu.ca/~cbm/aands/page_297.htm\n" + ".. [3] Steven G. Johnson, Faddeeva W function implementation.\n" + " http://ab-initio.mit.edu/Faddeeva\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> from scipy import special\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> x = np.linspace(-3, 3)\n" + ">>> plt.plot(x, special.erf(x))\n" + ">>> plt.xlabel('$x$')\n" + ">>> plt.ylabel('$erf(x)$')\n" + ">>> plt.show()") +ufunc_erf_loops[0] = loop_d_d__As_f_f +ufunc_erf_loops[1] = loop_d_d__As_d_d +ufunc_erf_loops[2] = loop_D_D__As_F_F +ufunc_erf_loops[3] = loop_D_D__As_D_D +ufunc_erf_types[0] = NPY_FLOAT +ufunc_erf_types[1] = NPY_FLOAT +ufunc_erf_types[2] = NPY_DOUBLE +ufunc_erf_types[3] = NPY_DOUBLE +ufunc_erf_types[4] = NPY_CFLOAT +ufunc_erf_types[5] = NPY_CFLOAT +ufunc_erf_types[6] = NPY_CDOUBLE +ufunc_erf_types[7] = NPY_CDOUBLE +ufunc_erf_ptr[2*0] = _func_cephes_erf +ufunc_erf_ptr[2*0+1] = ("erf") +ufunc_erf_ptr[2*1] = _func_cephes_erf +ufunc_erf_ptr[2*1+1] = ("erf") +ufunc_erf_ptr[2*2] = scipy.special._ufuncs_cxx._export_faddeeva_erf +ufunc_erf_ptr[2*2+1] = ("erf") +ufunc_erf_ptr[2*3] = scipy.special._ufuncs_cxx._export_faddeeva_erf +ufunc_erf_ptr[2*3+1] = ("erf") +ufunc_erf_data[0] = &ufunc_erf_ptr[2*0] +ufunc_erf_data[1] = &ufunc_erf_ptr[2*1] +ufunc_erf_data[2] = &ufunc_erf_ptr[2*2] +ufunc_erf_data[3] = &ufunc_erf_ptr[2*3] +erf = np.PyUFunc_FromFuncAndData(ufunc_erf_loops, ufunc_erf_data, ufunc_erf_types, 4, 1, 1, 0, "erf", ufunc_erf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_erfc_loops[4] +cdef void *ufunc_erfc_ptr[8] +cdef void *ufunc_erfc_data[4] +cdef char ufunc_erfc_types[8] +cdef char *ufunc_erfc_doc = ( + "erfc(x, out=None)\n" + "\n" + "Complementary error function, ``1 - erf(x)``.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real or complex valued argument\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the complementary error function\n" + "\n" + "See Also\n" + "--------\n" + "erf, erfi, erfcx, dawsn, wofz\n" + "\n" + "References\n" + "----------\n" + ".. [1] Steven G. Johnson, Faddeeva W function implementation.\n" + " http://ab-initio.mit.edu/Faddeeva\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> from scipy import special\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> x = np.linspace(-3, 3)\n" + ">>> plt.plot(x, special.erfc(x))\n" + ">>> plt.xlabel('$x$')\n" + ">>> plt.ylabel('$erfc(x)$')\n" + ">>> plt.show()") +ufunc_erfc_loops[0] = loop_d_d__As_f_f +ufunc_erfc_loops[1] = loop_d_d__As_d_d +ufunc_erfc_loops[2] = loop_D_D__As_F_F +ufunc_erfc_loops[3] = loop_D_D__As_D_D +ufunc_erfc_types[0] = NPY_FLOAT +ufunc_erfc_types[1] = NPY_FLOAT +ufunc_erfc_types[2] = NPY_DOUBLE +ufunc_erfc_types[3] = NPY_DOUBLE +ufunc_erfc_types[4] = NPY_CFLOAT +ufunc_erfc_types[5] = NPY_CFLOAT +ufunc_erfc_types[6] = NPY_CDOUBLE +ufunc_erfc_types[7] = NPY_CDOUBLE +ufunc_erfc_ptr[2*0] = _func_cephes_erfc +ufunc_erfc_ptr[2*0+1] = ("erfc") +ufunc_erfc_ptr[2*1] = _func_cephes_erfc +ufunc_erfc_ptr[2*1+1] = ("erfc") +ufunc_erfc_ptr[2*2] = scipy.special._ufuncs_cxx._export_faddeeva_erfc_complex +ufunc_erfc_ptr[2*2+1] = ("erfc") +ufunc_erfc_ptr[2*3] = scipy.special._ufuncs_cxx._export_faddeeva_erfc_complex +ufunc_erfc_ptr[2*3+1] = ("erfc") +ufunc_erfc_data[0] = &ufunc_erfc_ptr[2*0] +ufunc_erfc_data[1] = &ufunc_erfc_ptr[2*1] +ufunc_erfc_data[2] = &ufunc_erfc_ptr[2*2] +ufunc_erfc_data[3] = &ufunc_erfc_ptr[2*3] +erfc = np.PyUFunc_FromFuncAndData(ufunc_erfc_loops, ufunc_erfc_data, ufunc_erfc_types, 4, 1, 1, 0, "erfc", ufunc_erfc_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_erfcinv_loops[2] +cdef void *ufunc_erfcinv_ptr[4] +cdef void *ufunc_erfcinv_data[2] +cdef char ufunc_erfcinv_types[4] +cdef char *ufunc_erfcinv_doc = ( + "erfcinv(y, out=None)\n" + "\n" + "Inverse of the complementary error function.\n" + "\n" + "Computes the inverse of the complementary error function.\n" + "\n" + "In the complex domain, there is no unique complex number w satisfying\n" + "erfc(w)=z. This indicates a true inverse function would be multivalued.\n" + "When the domain restricts to the real, 0 < x < 2, there is a unique real\n" + "number satisfying erfc(erfcinv(x)) = erfcinv(erfc(x)).\n" + "\n" + "It is related to inverse of the error function by erfcinv(1-x) = erfinv(x)\n" + "\n" + "Parameters\n" + "----------\n" + "y : ndarray\n" + " Argument at which to evaluate. Domain: [0, 2]\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "erfcinv : scalar or ndarray\n" + " The inverse of erfc of y, element-wise\n" + "\n" + "See Also\n" + "--------\n" + "erf : Error function of a complex argument\n" + "erfc : Complementary error function, ``1 - erf(x)``\n" + "erfinv : Inverse of the error function\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> from scipy.special import erfcinv\n" + "\n" + ">>> erfcinv(0.5)\n" + "0.4769362762044699\n" + "\n" + ">>> y = np.linspace(0.0, 2.0, num=11)\n" + ">>> erfcinv(y)\n" + "array([ inf, 0.9061938 , 0.59511608, 0.37080716, 0.17914345,\n" + " -0. , -0.17914345, -0.37080716, -0.59511608, -0.9061938 ,\n" + " -inf])\n" + "\n" + "Plot the function:\n" + "\n" + ">>> y = np.linspace(0, 2, 200)\n" + ">>> fig, ax = plt.subplots()\n" + ">>> ax.plot(y, erfcinv(y))\n" + ">>> ax.grid(True)\n" + ">>> ax.set_xlabel('y')\n" + ">>> ax.set_title('erfcinv(y)')\n" + ">>> plt.show()") +ufunc_erfcinv_loops[0] = loop_d_d__As_f_f +ufunc_erfcinv_loops[1] = loop_d_d__As_d_d +ufunc_erfcinv_types[0] = NPY_FLOAT +ufunc_erfcinv_types[1] = NPY_FLOAT +ufunc_erfcinv_types[2] = NPY_DOUBLE +ufunc_erfcinv_types[3] = NPY_DOUBLE +ufunc_erfcinv_ptr[2*0] = _func_cephes_erfcinv +ufunc_erfcinv_ptr[2*0+1] = ("erfcinv") +ufunc_erfcinv_ptr[2*1] = _func_cephes_erfcinv +ufunc_erfcinv_ptr[2*1+1] = ("erfcinv") +ufunc_erfcinv_data[0] = &ufunc_erfcinv_ptr[2*0] +ufunc_erfcinv_data[1] = &ufunc_erfcinv_ptr[2*1] +erfcinv = np.PyUFunc_FromFuncAndData(ufunc_erfcinv_loops, ufunc_erfcinv_data, ufunc_erfcinv_types, 2, 1, 1, 0, "erfcinv", ufunc_erfcinv_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_erfcx_loops[4] +cdef void *ufunc_erfcx_ptr[8] +cdef void *ufunc_erfcx_data[4] +cdef char ufunc_erfcx_types[8] +cdef char *ufunc_erfcx_doc = ( + "erfcx(x, out=None)\n" + "\n" + "Scaled complementary error function, ``exp(x**2) * erfc(x)``.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real or complex valued argument\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the scaled complementary error function\n" + "\n" + "\n" + "See Also\n" + "--------\n" + "erf, erfc, erfi, dawsn, wofz\n" + "\n" + "Notes\n" + "-----\n" + "\n" + ".. versionadded:: 0.12.0\n" + "\n" + "References\n" + "----------\n" + ".. [1] Steven G. Johnson, Faddeeva W function implementation.\n" + " http://ab-initio.mit.edu/Faddeeva\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> from scipy import special\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> x = np.linspace(-3, 3)\n" + ">>> plt.plot(x, special.erfcx(x))\n" + ">>> plt.xlabel('$x$')\n" + ">>> plt.ylabel('$erfcx(x)$')\n" + ">>> plt.show()") +ufunc_erfcx_loops[0] = loop_d_d__As_f_f +ufunc_erfcx_loops[1] = loop_d_d__As_d_d +ufunc_erfcx_loops[2] = loop_D_D__As_F_F +ufunc_erfcx_loops[3] = loop_D_D__As_D_D +ufunc_erfcx_types[0] = NPY_FLOAT +ufunc_erfcx_types[1] = NPY_FLOAT +ufunc_erfcx_types[2] = NPY_DOUBLE +ufunc_erfcx_types[3] = NPY_DOUBLE +ufunc_erfcx_types[4] = NPY_CFLOAT +ufunc_erfcx_types[5] = NPY_CFLOAT +ufunc_erfcx_types[6] = NPY_CDOUBLE +ufunc_erfcx_types[7] = NPY_CDOUBLE +ufunc_erfcx_ptr[2*0] = scipy.special._ufuncs_cxx._export_faddeeva_erfcx +ufunc_erfcx_ptr[2*0+1] = ("erfcx") +ufunc_erfcx_ptr[2*1] = scipy.special._ufuncs_cxx._export_faddeeva_erfcx +ufunc_erfcx_ptr[2*1+1] = ("erfcx") +ufunc_erfcx_ptr[2*2] = scipy.special._ufuncs_cxx._export_faddeeva_erfcx_complex +ufunc_erfcx_ptr[2*2+1] = ("erfcx") +ufunc_erfcx_ptr[2*3] = scipy.special._ufuncs_cxx._export_faddeeva_erfcx_complex +ufunc_erfcx_ptr[2*3+1] = ("erfcx") +ufunc_erfcx_data[0] = &ufunc_erfcx_ptr[2*0] +ufunc_erfcx_data[1] = &ufunc_erfcx_ptr[2*1] +ufunc_erfcx_data[2] = &ufunc_erfcx_ptr[2*2] +ufunc_erfcx_data[3] = &ufunc_erfcx_ptr[2*3] +erfcx = np.PyUFunc_FromFuncAndData(ufunc_erfcx_loops, ufunc_erfcx_data, ufunc_erfcx_types, 4, 1, 1, 0, "erfcx", ufunc_erfcx_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_erfi_loops[4] +cdef void *ufunc_erfi_ptr[8] +cdef void *ufunc_erfi_data[4] +cdef char ufunc_erfi_types[8] +cdef char *ufunc_erfi_doc = ( + "erfi(z, out=None)\n" + "\n" + "Imaginary error function, ``-i erf(i z)``.\n" + "\n" + "Parameters\n" + "----------\n" + "z : array_like\n" + " Real or complex valued argument\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the imaginary error function\n" + "\n" + "See Also\n" + "--------\n" + "erf, erfc, erfcx, dawsn, wofz\n" + "\n" + "Notes\n" + "-----\n" + "\n" + ".. versionadded:: 0.12.0\n" + "\n" + "References\n" + "----------\n" + ".. [1] Steven G. Johnson, Faddeeva W function implementation.\n" + " http://ab-initio.mit.edu/Faddeeva\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> from scipy import special\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> x = np.linspace(-3, 3)\n" + ">>> plt.plot(x, special.erfi(x))\n" + ">>> plt.xlabel('$x$')\n" + ">>> plt.ylabel('$erfi(x)$')\n" + ">>> plt.show()") +ufunc_erfi_loops[0] = loop_d_d__As_f_f +ufunc_erfi_loops[1] = loop_d_d__As_d_d +ufunc_erfi_loops[2] = loop_D_D__As_F_F +ufunc_erfi_loops[3] = loop_D_D__As_D_D +ufunc_erfi_types[0] = NPY_FLOAT +ufunc_erfi_types[1] = NPY_FLOAT +ufunc_erfi_types[2] = NPY_DOUBLE +ufunc_erfi_types[3] = NPY_DOUBLE +ufunc_erfi_types[4] = NPY_CFLOAT +ufunc_erfi_types[5] = NPY_CFLOAT +ufunc_erfi_types[6] = NPY_CDOUBLE +ufunc_erfi_types[7] = NPY_CDOUBLE +ufunc_erfi_ptr[2*0] = scipy.special._ufuncs_cxx._export_faddeeva_erfi +ufunc_erfi_ptr[2*0+1] = ("erfi") +ufunc_erfi_ptr[2*1] = scipy.special._ufuncs_cxx._export_faddeeva_erfi +ufunc_erfi_ptr[2*1+1] = ("erfi") +ufunc_erfi_ptr[2*2] = scipy.special._ufuncs_cxx._export_faddeeva_erfi_complex +ufunc_erfi_ptr[2*2+1] = ("erfi") +ufunc_erfi_ptr[2*3] = scipy.special._ufuncs_cxx._export_faddeeva_erfi_complex +ufunc_erfi_ptr[2*3+1] = ("erfi") +ufunc_erfi_data[0] = &ufunc_erfi_ptr[2*0] +ufunc_erfi_data[1] = &ufunc_erfi_ptr[2*1] +ufunc_erfi_data[2] = &ufunc_erfi_ptr[2*2] +ufunc_erfi_data[3] = &ufunc_erfi_ptr[2*3] +erfi = np.PyUFunc_FromFuncAndData(ufunc_erfi_loops, ufunc_erfi_data, ufunc_erfi_types, 4, 1, 1, 0, "erfi", ufunc_erfi_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_erfinv_loops[2] +cdef void *ufunc_erfinv_ptr[4] +cdef void *ufunc_erfinv_data[2] +cdef char ufunc_erfinv_types[4] +cdef char *ufunc_erfinv_doc = ( + "erfinv(y, out=None)\n" + "\n" + "Inverse of the error function.\n" + "\n" + "Computes the inverse of the error function.\n" + "\n" + "In the complex domain, there is no unique complex number w satisfying\n" + "erf(w)=z. This indicates a true inverse function would be multivalued.\n" + "When the domain restricts to the real, -1 < x < 1, there is a unique real\n" + "number satisfying erf(erfinv(x)) = x.\n" + "\n" + "Parameters\n" + "----------\n" + "y : ndarray\n" + " Argument at which to evaluate. Domain: [-1, 1]\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "erfinv : scalar or ndarray\n" + " The inverse of erf of y, element-wise\n" + "\n" + "See Also\n" + "--------\n" + "erf : Error function of a complex argument\n" + "erfc : Complementary error function, ``1 - erf(x)``\n" + "erfcinv : Inverse of the complementary error function\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> from scipy.special import erfinv, erf\n" + "\n" + ">>> erfinv(0.5)\n" + "0.4769362762044699\n" + "\n" + ">>> y = np.linspace(-1.0, 1.0, num=9)\n" + ">>> x = erfinv(y)\n" + ">>> x\n" + "array([ -inf, -0.81341985, -0.47693628, -0.22531206, 0. ,\n" + " 0.22531206, 0.47693628, 0.81341985, inf])\n" + "\n" + "Verify that ``erf(erfinv(y))`` is ``y``.\n" + "\n" + ">>> erf(x)\n" + "array([-1. , -0.75, -0.5 , -0.25, 0. , 0.25, 0.5 , 0.75, 1. ])\n" + "\n" + "Plot the function:\n" + "\n" + ">>> y = np.linspace(-1, 1, 200)\n" + ">>> fig, ax = plt.subplots()\n" + ">>> ax.plot(y, erfinv(y))\n" + ">>> ax.grid(True)\n" + ">>> ax.set_xlabel('y')\n" + ">>> ax.set_title('erfinv(y)')\n" + ">>> plt.show()") +ufunc_erfinv_loops[0] = loop_f_f__As_f_f +ufunc_erfinv_loops[1] = loop_d_d__As_d_d +ufunc_erfinv_types[0] = NPY_FLOAT +ufunc_erfinv_types[1] = NPY_FLOAT +ufunc_erfinv_types[2] = NPY_DOUBLE +ufunc_erfinv_types[3] = NPY_DOUBLE +ufunc_erfinv_ptr[2*0] = scipy.special._ufuncs_cxx._export_erfinv_float +ufunc_erfinv_ptr[2*0+1] = ("erfinv") +ufunc_erfinv_ptr[2*1] = scipy.special._ufuncs_cxx._export_erfinv_double +ufunc_erfinv_ptr[2*1+1] = ("erfinv") +ufunc_erfinv_data[0] = &ufunc_erfinv_ptr[2*0] +ufunc_erfinv_data[1] = &ufunc_erfinv_ptr[2*1] +erfinv = np.PyUFunc_FromFuncAndData(ufunc_erfinv_loops, ufunc_erfinv_data, ufunc_erfinv_types, 2, 1, 1, 0, "erfinv", ufunc_erfinv_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_eval_chebyc_loops[5] +cdef void *ufunc_eval_chebyc_ptr[10] +cdef void *ufunc_eval_chebyc_data[5] +cdef char ufunc_eval_chebyc_types[15] +cdef char *ufunc_eval_chebyc_doc = ( + "eval_chebyc(n, x, out=None)\n" + "\n" + "Evaluate Chebyshev polynomial of the first kind on [-2, 2] at a\n" + "point.\n" + "\n" + "These polynomials are defined as\n" + "\n" + ".. math::\n" + "\n" + " C_n(x) = 2 T_n(x/2)\n" + "\n" + "where :math:`T_n` is a Chebyshev polynomial of the first kind. See\n" + "22.5.11 in [AS]_ for details.\n" + "\n" + "Parameters\n" + "----------\n" + "n : array_like\n" + " Degree of the polynomial. If not an integer, the result is\n" + " determined via the relation to `eval_chebyt`.\n" + "x : array_like\n" + " Points at which to evaluate the Chebyshev polynomial\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "C : scalar or ndarray\n" + " Values of the Chebyshev polynomial\n" + "\n" + "See Also\n" + "--------\n" + "roots_chebyc : roots and quadrature weights of Chebyshev\n" + " polynomials of the first kind on [-2, 2]\n" + "chebyc : Chebyshev polynomial object\n" + "numpy.polynomial.chebyshev.Chebyshev : Chebyshev series\n" + "eval_chebyt : evaluate Chebycshev polynomials of the first kind\n" + "\n" + "References\n" + "----------\n" + ".. [AS] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import scipy.special as sc\n" + "\n" + "They are a scaled version of the Chebyshev polynomials of the\n" + "first kind.\n" + "\n" + ">>> x = np.linspace(-2, 2, 6)\n" + ">>> sc.eval_chebyc(3, x)\n" + "array([-2. , 1.872, 1.136, -1.136, -1.872, 2. ])\n" + ">>> 2 * sc.eval_chebyt(3, x / 2)\n" + "array([-2. , 1.872, 1.136, -1.136, -1.872, 2. ])") +ufunc_eval_chebyc_loops[0] = loop_d_pd__As_pd_d +ufunc_eval_chebyc_loops[1] = loop_d_dd__As_ff_f +ufunc_eval_chebyc_loops[2] = loop_D_dD__As_fF_F +ufunc_eval_chebyc_loops[3] = loop_d_dd__As_dd_d +ufunc_eval_chebyc_loops[4] = loop_D_dD__As_dD_D +ufunc_eval_chebyc_types[0] = NPY_INTP +ufunc_eval_chebyc_types[1] = NPY_DOUBLE +ufunc_eval_chebyc_types[2] = NPY_DOUBLE +ufunc_eval_chebyc_types[3] = NPY_FLOAT +ufunc_eval_chebyc_types[4] = NPY_FLOAT +ufunc_eval_chebyc_types[5] = NPY_FLOAT +ufunc_eval_chebyc_types[6] = NPY_FLOAT +ufunc_eval_chebyc_types[7] = NPY_CFLOAT +ufunc_eval_chebyc_types[8] = NPY_CFLOAT +ufunc_eval_chebyc_types[9] = NPY_DOUBLE +ufunc_eval_chebyc_types[10] = NPY_DOUBLE +ufunc_eval_chebyc_types[11] = NPY_DOUBLE +ufunc_eval_chebyc_types[12] = NPY_DOUBLE +ufunc_eval_chebyc_types[13] = NPY_CDOUBLE +ufunc_eval_chebyc_types[14] = NPY_CDOUBLE +ufunc_eval_chebyc_ptr[2*0] = _func_eval_chebyc_l +ufunc_eval_chebyc_ptr[2*0+1] = ("eval_chebyc") +ufunc_eval_chebyc_ptr[2*1] = _func_eval_chebyc[double] +ufunc_eval_chebyc_ptr[2*1+1] = ("eval_chebyc") +ufunc_eval_chebyc_ptr[2*2] = _func_eval_chebyc[double_complex] +ufunc_eval_chebyc_ptr[2*2+1] = ("eval_chebyc") +ufunc_eval_chebyc_ptr[2*3] = _func_eval_chebyc[double] +ufunc_eval_chebyc_ptr[2*3+1] = ("eval_chebyc") +ufunc_eval_chebyc_ptr[2*4] = _func_eval_chebyc[double_complex] +ufunc_eval_chebyc_ptr[2*4+1] = ("eval_chebyc") +ufunc_eval_chebyc_data[0] = &ufunc_eval_chebyc_ptr[2*0] +ufunc_eval_chebyc_data[1] = &ufunc_eval_chebyc_ptr[2*1] +ufunc_eval_chebyc_data[2] = &ufunc_eval_chebyc_ptr[2*2] +ufunc_eval_chebyc_data[3] = &ufunc_eval_chebyc_ptr[2*3] +ufunc_eval_chebyc_data[4] = &ufunc_eval_chebyc_ptr[2*4] +eval_chebyc = np.PyUFunc_FromFuncAndData(ufunc_eval_chebyc_loops, ufunc_eval_chebyc_data, ufunc_eval_chebyc_types, 5, 2, 1, 0, "eval_chebyc", ufunc_eval_chebyc_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_eval_chebys_loops[5] +cdef void *ufunc_eval_chebys_ptr[10] +cdef void *ufunc_eval_chebys_data[5] +cdef char ufunc_eval_chebys_types[15] +cdef char *ufunc_eval_chebys_doc = ( + "eval_chebys(n, x, out=None)\n" + "\n" + "Evaluate Chebyshev polynomial of the second kind on [-2, 2] at a\n" + "point.\n" + "\n" + "These polynomials are defined as\n" + "\n" + ".. math::\n" + "\n" + " S_n(x) = U_n(x/2)\n" + "\n" + "where :math:`U_n` is a Chebyshev polynomial of the second\n" + "kind. See 22.5.13 in [AS]_ for details.\n" + "\n" + "Parameters\n" + "----------\n" + "n : array_like\n" + " Degree of the polynomial. If not an integer, the result is\n" + " determined via the relation to `eval_chebyu`.\n" + "x : array_like\n" + " Points at which to evaluate the Chebyshev polynomial\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "S : scalar or ndarray\n" + " Values of the Chebyshev polynomial\n" + "\n" + "See Also\n" + "--------\n" + "roots_chebys : roots and quadrature weights of Chebyshev\n" + " polynomials of the second kind on [-2, 2]\n" + "chebys : Chebyshev polynomial object\n" + "eval_chebyu : evaluate Chebyshev polynomials of the second kind\n" + "\n" + "References\n" + "----------\n" + ".. [AS] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import scipy.special as sc\n" + "\n" + "They are a scaled version of the Chebyshev polynomials of the\n" + "second kind.\n" + "\n" + ">>> x = np.linspace(-2, 2, 6)\n" + ">>> sc.eval_chebys(3, x)\n" + "array([-4. , 0.672, 0.736, -0.736, -0.672, 4. ])\n" + ">>> sc.eval_chebyu(3, x / 2)\n" + "array([-4. , 0.672, 0.736, -0.736, -0.672, 4. ])") +ufunc_eval_chebys_loops[0] = loop_d_pd__As_pd_d +ufunc_eval_chebys_loops[1] = loop_d_dd__As_ff_f +ufunc_eval_chebys_loops[2] = loop_D_dD__As_fF_F +ufunc_eval_chebys_loops[3] = loop_d_dd__As_dd_d +ufunc_eval_chebys_loops[4] = loop_D_dD__As_dD_D +ufunc_eval_chebys_types[0] = NPY_INTP +ufunc_eval_chebys_types[1] = NPY_DOUBLE +ufunc_eval_chebys_types[2] = NPY_DOUBLE +ufunc_eval_chebys_types[3] = NPY_FLOAT +ufunc_eval_chebys_types[4] = NPY_FLOAT +ufunc_eval_chebys_types[5] = NPY_FLOAT +ufunc_eval_chebys_types[6] = NPY_FLOAT +ufunc_eval_chebys_types[7] = NPY_CFLOAT +ufunc_eval_chebys_types[8] = NPY_CFLOAT +ufunc_eval_chebys_types[9] = NPY_DOUBLE +ufunc_eval_chebys_types[10] = NPY_DOUBLE +ufunc_eval_chebys_types[11] = NPY_DOUBLE +ufunc_eval_chebys_types[12] = NPY_DOUBLE +ufunc_eval_chebys_types[13] = NPY_CDOUBLE +ufunc_eval_chebys_types[14] = NPY_CDOUBLE +ufunc_eval_chebys_ptr[2*0] = _func_eval_chebys_l +ufunc_eval_chebys_ptr[2*0+1] = ("eval_chebys") +ufunc_eval_chebys_ptr[2*1] = _func_eval_chebys[double] +ufunc_eval_chebys_ptr[2*1+1] = ("eval_chebys") +ufunc_eval_chebys_ptr[2*2] = _func_eval_chebys[double_complex] +ufunc_eval_chebys_ptr[2*2+1] = ("eval_chebys") +ufunc_eval_chebys_ptr[2*3] = _func_eval_chebys[double] +ufunc_eval_chebys_ptr[2*3+1] = ("eval_chebys") +ufunc_eval_chebys_ptr[2*4] = _func_eval_chebys[double_complex] +ufunc_eval_chebys_ptr[2*4+1] = ("eval_chebys") +ufunc_eval_chebys_data[0] = &ufunc_eval_chebys_ptr[2*0] +ufunc_eval_chebys_data[1] = &ufunc_eval_chebys_ptr[2*1] +ufunc_eval_chebys_data[2] = &ufunc_eval_chebys_ptr[2*2] +ufunc_eval_chebys_data[3] = &ufunc_eval_chebys_ptr[2*3] +ufunc_eval_chebys_data[4] = &ufunc_eval_chebys_ptr[2*4] +eval_chebys = np.PyUFunc_FromFuncAndData(ufunc_eval_chebys_loops, ufunc_eval_chebys_data, ufunc_eval_chebys_types, 5, 2, 1, 0, "eval_chebys", ufunc_eval_chebys_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_eval_chebyt_loops[5] +cdef void *ufunc_eval_chebyt_ptr[10] +cdef void *ufunc_eval_chebyt_data[5] +cdef char ufunc_eval_chebyt_types[15] +cdef char *ufunc_eval_chebyt_doc = ( + "eval_chebyt(n, x, out=None)\n" + "\n" + "Evaluate Chebyshev polynomial of the first kind at a point.\n" + "\n" + "The Chebyshev polynomials of the first kind can be defined via the\n" + "Gauss hypergeometric function :math:`{}_2F_1` as\n" + "\n" + ".. math::\n" + "\n" + " T_n(x) = {}_2F_1(n, -n; 1/2; (1 - x)/2).\n" + "\n" + "When :math:`n` is an integer the result is a polynomial of degree\n" + ":math:`n`. See 22.5.47 in [AS]_ for details.\n" + "\n" + "Parameters\n" + "----------\n" + "n : array_like\n" + " Degree of the polynomial. If not an integer, the result is\n" + " determined via the relation to the Gauss hypergeometric\n" + " function.\n" + "x : array_like\n" + " Points at which to evaluate the Chebyshev polynomial\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "T : scalar or ndarray\n" + " Values of the Chebyshev polynomial\n" + "\n" + "See Also\n" + "--------\n" + "roots_chebyt : roots and quadrature weights of Chebyshev\n" + " polynomials of the first kind\n" + "chebyu : Chebychev polynomial object\n" + "eval_chebyu : evaluate Chebyshev polynomials of the second kind\n" + "hyp2f1 : Gauss hypergeometric function\n" + "numpy.polynomial.chebyshev.Chebyshev : Chebyshev series\n" + "\n" + "Notes\n" + "-----\n" + "This routine is numerically stable for `x` in ``[-1, 1]`` at least\n" + "up to order ``10000``.\n" + "\n" + "References\n" + "----------\n" + ".. [AS] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.") +ufunc_eval_chebyt_loops[0] = loop_d_pd__As_pd_d +ufunc_eval_chebyt_loops[1] = loop_d_dd__As_ff_f +ufunc_eval_chebyt_loops[2] = loop_D_dD__As_fF_F +ufunc_eval_chebyt_loops[3] = loop_d_dd__As_dd_d +ufunc_eval_chebyt_loops[4] = loop_D_dD__As_dD_D +ufunc_eval_chebyt_types[0] = NPY_INTP +ufunc_eval_chebyt_types[1] = NPY_DOUBLE +ufunc_eval_chebyt_types[2] = NPY_DOUBLE +ufunc_eval_chebyt_types[3] = NPY_FLOAT +ufunc_eval_chebyt_types[4] = NPY_FLOAT +ufunc_eval_chebyt_types[5] = NPY_FLOAT +ufunc_eval_chebyt_types[6] = NPY_FLOAT +ufunc_eval_chebyt_types[7] = NPY_CFLOAT +ufunc_eval_chebyt_types[8] = NPY_CFLOAT +ufunc_eval_chebyt_types[9] = NPY_DOUBLE +ufunc_eval_chebyt_types[10] = NPY_DOUBLE +ufunc_eval_chebyt_types[11] = NPY_DOUBLE +ufunc_eval_chebyt_types[12] = NPY_DOUBLE +ufunc_eval_chebyt_types[13] = NPY_CDOUBLE +ufunc_eval_chebyt_types[14] = NPY_CDOUBLE +ufunc_eval_chebyt_ptr[2*0] = _func_eval_chebyt_l +ufunc_eval_chebyt_ptr[2*0+1] = ("eval_chebyt") +ufunc_eval_chebyt_ptr[2*1] = _func_eval_chebyt[double] +ufunc_eval_chebyt_ptr[2*1+1] = ("eval_chebyt") +ufunc_eval_chebyt_ptr[2*2] = _func_eval_chebyt[double_complex] +ufunc_eval_chebyt_ptr[2*2+1] = ("eval_chebyt") +ufunc_eval_chebyt_ptr[2*3] = _func_eval_chebyt[double] +ufunc_eval_chebyt_ptr[2*3+1] = ("eval_chebyt") +ufunc_eval_chebyt_ptr[2*4] = _func_eval_chebyt[double_complex] +ufunc_eval_chebyt_ptr[2*4+1] = ("eval_chebyt") +ufunc_eval_chebyt_data[0] = &ufunc_eval_chebyt_ptr[2*0] +ufunc_eval_chebyt_data[1] = &ufunc_eval_chebyt_ptr[2*1] +ufunc_eval_chebyt_data[2] = &ufunc_eval_chebyt_ptr[2*2] +ufunc_eval_chebyt_data[3] = &ufunc_eval_chebyt_ptr[2*3] +ufunc_eval_chebyt_data[4] = &ufunc_eval_chebyt_ptr[2*4] +eval_chebyt = np.PyUFunc_FromFuncAndData(ufunc_eval_chebyt_loops, ufunc_eval_chebyt_data, ufunc_eval_chebyt_types, 5, 2, 1, 0, "eval_chebyt", ufunc_eval_chebyt_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_eval_chebyu_loops[5] +cdef void *ufunc_eval_chebyu_ptr[10] +cdef void *ufunc_eval_chebyu_data[5] +cdef char ufunc_eval_chebyu_types[15] +cdef char *ufunc_eval_chebyu_doc = ( + "eval_chebyu(n, x, out=None)\n" + "\n" + "Evaluate Chebyshev polynomial of the second kind at a point.\n" + "\n" + "The Chebyshev polynomials of the second kind can be defined via\n" + "the Gauss hypergeometric function :math:`{}_2F_1` as\n" + "\n" + ".. math::\n" + "\n" + " U_n(x) = (n + 1) {}_2F_1(-n, n + 2; 3/2; (1 - x)/2).\n" + "\n" + "When :math:`n` is an integer the result is a polynomial of degree\n" + ":math:`n`. See 22.5.48 in [AS]_ for details.\n" + "\n" + "Parameters\n" + "----------\n" + "n : array_like\n" + " Degree of the polynomial. If not an integer, the result is\n" + " determined via the relation to the Gauss hypergeometric\n" + " function.\n" + "x : array_like\n" + " Points at which to evaluate the Chebyshev polynomial\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "U : scalar or ndarray\n" + " Values of the Chebyshev polynomial\n" + "\n" + "See Also\n" + "--------\n" + "roots_chebyu : roots and quadrature weights of Chebyshev\n" + " polynomials of the second kind\n" + "chebyu : Chebyshev polynomial object\n" + "eval_chebyt : evaluate Chebyshev polynomials of the first kind\n" + "hyp2f1 : Gauss hypergeometric function\n" + "\n" + "References\n" + "----------\n" + ".. [AS] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.") +ufunc_eval_chebyu_loops[0] = loop_d_pd__As_pd_d +ufunc_eval_chebyu_loops[1] = loop_d_dd__As_ff_f +ufunc_eval_chebyu_loops[2] = loop_D_dD__As_fF_F +ufunc_eval_chebyu_loops[3] = loop_d_dd__As_dd_d +ufunc_eval_chebyu_loops[4] = loop_D_dD__As_dD_D +ufunc_eval_chebyu_types[0] = NPY_INTP +ufunc_eval_chebyu_types[1] = NPY_DOUBLE +ufunc_eval_chebyu_types[2] = NPY_DOUBLE +ufunc_eval_chebyu_types[3] = NPY_FLOAT +ufunc_eval_chebyu_types[4] = NPY_FLOAT +ufunc_eval_chebyu_types[5] = NPY_FLOAT +ufunc_eval_chebyu_types[6] = NPY_FLOAT +ufunc_eval_chebyu_types[7] = NPY_CFLOAT +ufunc_eval_chebyu_types[8] = NPY_CFLOAT +ufunc_eval_chebyu_types[9] = NPY_DOUBLE +ufunc_eval_chebyu_types[10] = NPY_DOUBLE +ufunc_eval_chebyu_types[11] = NPY_DOUBLE +ufunc_eval_chebyu_types[12] = NPY_DOUBLE +ufunc_eval_chebyu_types[13] = NPY_CDOUBLE +ufunc_eval_chebyu_types[14] = NPY_CDOUBLE +ufunc_eval_chebyu_ptr[2*0] = _func_eval_chebyu_l +ufunc_eval_chebyu_ptr[2*0+1] = ("eval_chebyu") +ufunc_eval_chebyu_ptr[2*1] = _func_eval_chebyu[double] +ufunc_eval_chebyu_ptr[2*1+1] = ("eval_chebyu") +ufunc_eval_chebyu_ptr[2*2] = _func_eval_chebyu[double_complex] +ufunc_eval_chebyu_ptr[2*2+1] = ("eval_chebyu") +ufunc_eval_chebyu_ptr[2*3] = _func_eval_chebyu[double] +ufunc_eval_chebyu_ptr[2*3+1] = ("eval_chebyu") +ufunc_eval_chebyu_ptr[2*4] = _func_eval_chebyu[double_complex] +ufunc_eval_chebyu_ptr[2*4+1] = ("eval_chebyu") +ufunc_eval_chebyu_data[0] = &ufunc_eval_chebyu_ptr[2*0] +ufunc_eval_chebyu_data[1] = &ufunc_eval_chebyu_ptr[2*1] +ufunc_eval_chebyu_data[2] = &ufunc_eval_chebyu_ptr[2*2] +ufunc_eval_chebyu_data[3] = &ufunc_eval_chebyu_ptr[2*3] +ufunc_eval_chebyu_data[4] = &ufunc_eval_chebyu_ptr[2*4] +eval_chebyu = np.PyUFunc_FromFuncAndData(ufunc_eval_chebyu_loops, ufunc_eval_chebyu_data, ufunc_eval_chebyu_types, 5, 2, 1, 0, "eval_chebyu", ufunc_eval_chebyu_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_eval_gegenbauer_loops[5] +cdef void *ufunc_eval_gegenbauer_ptr[10] +cdef void *ufunc_eval_gegenbauer_data[5] +cdef char ufunc_eval_gegenbauer_types[20] +cdef char *ufunc_eval_gegenbauer_doc = ( + "eval_gegenbauer(n, alpha, x, out=None)\n" + "\n" + "Evaluate Gegenbauer polynomial at a point.\n" + "\n" + "The Gegenbauer polynomials can be defined via the Gauss\n" + "hypergeometric function :math:`{}_2F_1` as\n" + "\n" + ".. math::\n" + "\n" + " C_n^{(\\alpha)} = \\frac{(2\\alpha)_n}{\\Gamma(n + 1)}\n" + " {}_2F_1(-n, 2\\alpha + n; \\alpha + 1/2; (1 - z)/2).\n" + "\n" + "When :math:`n` is an integer the result is a polynomial of degree\n" + ":math:`n`. See 22.5.46 in [AS]_ for details.\n" + "\n" + "Parameters\n" + "----------\n" + "n : array_like\n" + " Degree of the polynomial. If not an integer, the result is\n" + " determined via the relation to the Gauss hypergeometric\n" + " function.\n" + "alpha : array_like\n" + " Parameter\n" + "x : array_like\n" + " Points at which to evaluate the Gegenbauer polynomial\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "C : scalar or ndarray\n" + " Values of the Gegenbauer polynomial\n" + "\n" + "See Also\n" + "--------\n" + "roots_gegenbauer : roots and quadrature weights of Gegenbauer\n" + " polynomials\n" + "gegenbauer : Gegenbauer polynomial object\n" + "hyp2f1 : Gauss hypergeometric function\n" + "\n" + "References\n" + "----------\n" + ".. [AS] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.") +ufunc_eval_gegenbauer_loops[0] = loop_d_pdd__As_pdd_d +ufunc_eval_gegenbauer_loops[1] = loop_d_ddd__As_fff_f +ufunc_eval_gegenbauer_loops[2] = loop_D_ddD__As_ffF_F +ufunc_eval_gegenbauer_loops[3] = loop_d_ddd__As_ddd_d +ufunc_eval_gegenbauer_loops[4] = loop_D_ddD__As_ddD_D +ufunc_eval_gegenbauer_types[0] = NPY_INTP +ufunc_eval_gegenbauer_types[1] = NPY_DOUBLE +ufunc_eval_gegenbauer_types[2] = NPY_DOUBLE +ufunc_eval_gegenbauer_types[3] = NPY_DOUBLE +ufunc_eval_gegenbauer_types[4] = NPY_FLOAT +ufunc_eval_gegenbauer_types[5] = NPY_FLOAT +ufunc_eval_gegenbauer_types[6] = NPY_FLOAT +ufunc_eval_gegenbauer_types[7] = NPY_FLOAT +ufunc_eval_gegenbauer_types[8] = NPY_FLOAT +ufunc_eval_gegenbauer_types[9] = NPY_FLOAT +ufunc_eval_gegenbauer_types[10] = NPY_CFLOAT +ufunc_eval_gegenbauer_types[11] = NPY_CFLOAT +ufunc_eval_gegenbauer_types[12] = NPY_DOUBLE +ufunc_eval_gegenbauer_types[13] = NPY_DOUBLE +ufunc_eval_gegenbauer_types[14] = NPY_DOUBLE +ufunc_eval_gegenbauer_types[15] = NPY_DOUBLE +ufunc_eval_gegenbauer_types[16] = NPY_DOUBLE +ufunc_eval_gegenbauer_types[17] = NPY_DOUBLE +ufunc_eval_gegenbauer_types[18] = NPY_CDOUBLE +ufunc_eval_gegenbauer_types[19] = NPY_CDOUBLE +ufunc_eval_gegenbauer_ptr[2*0] = _func_eval_gegenbauer_l +ufunc_eval_gegenbauer_ptr[2*0+1] = ("eval_gegenbauer") +ufunc_eval_gegenbauer_ptr[2*1] = _func_eval_gegenbauer[double] +ufunc_eval_gegenbauer_ptr[2*1+1] = ("eval_gegenbauer") +ufunc_eval_gegenbauer_ptr[2*2] = _func_eval_gegenbauer[double_complex] +ufunc_eval_gegenbauer_ptr[2*2+1] = ("eval_gegenbauer") +ufunc_eval_gegenbauer_ptr[2*3] = _func_eval_gegenbauer[double] +ufunc_eval_gegenbauer_ptr[2*3+1] = ("eval_gegenbauer") +ufunc_eval_gegenbauer_ptr[2*4] = _func_eval_gegenbauer[double_complex] +ufunc_eval_gegenbauer_ptr[2*4+1] = ("eval_gegenbauer") +ufunc_eval_gegenbauer_data[0] = &ufunc_eval_gegenbauer_ptr[2*0] +ufunc_eval_gegenbauer_data[1] = &ufunc_eval_gegenbauer_ptr[2*1] +ufunc_eval_gegenbauer_data[2] = &ufunc_eval_gegenbauer_ptr[2*2] +ufunc_eval_gegenbauer_data[3] = &ufunc_eval_gegenbauer_ptr[2*3] +ufunc_eval_gegenbauer_data[4] = &ufunc_eval_gegenbauer_ptr[2*4] +eval_gegenbauer = np.PyUFunc_FromFuncAndData(ufunc_eval_gegenbauer_loops, ufunc_eval_gegenbauer_data, ufunc_eval_gegenbauer_types, 5, 3, 1, 0, "eval_gegenbauer", ufunc_eval_gegenbauer_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_eval_genlaguerre_loops[5] +cdef void *ufunc_eval_genlaguerre_ptr[10] +cdef void *ufunc_eval_genlaguerre_data[5] +cdef char ufunc_eval_genlaguerre_types[20] +cdef char *ufunc_eval_genlaguerre_doc = ( + "eval_genlaguerre(n, alpha, x, out=None)\n" + "\n" + "Evaluate generalized Laguerre polynomial at a point.\n" + "\n" + "The generalized Laguerre polynomials can be defined via the\n" + "confluent hypergeometric function :math:`{}_1F_1` as\n" + "\n" + ".. math::\n" + "\n" + " L_n^{(\\alpha)}(x) = \\binom{n + \\alpha}{n}\n" + " {}_1F_1(-n, \\alpha + 1, x).\n" + "\n" + "When :math:`n` is an integer the result is a polynomial of degree\n" + ":math:`n`. See 22.5.54 in [AS]_ for details. The Laguerre\n" + "polynomials are the special case where :math:`\\alpha = 0`.\n" + "\n" + "Parameters\n" + "----------\n" + "n : array_like\n" + " Degree of the polynomial. If not an integer, the result is\n" + " determined via the relation to the confluent hypergeometric\n" + " function.\n" + "alpha : array_like\n" + " Parameter; must have ``alpha > -1``\n" + "x : array_like\n" + " Points at which to evaluate the generalized Laguerre\n" + " polynomial\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "L : scalar or ndarray\n" + " Values of the generalized Laguerre polynomial\n" + "\n" + "See Also\n" + "--------\n" + "roots_genlaguerre : roots and quadrature weights of generalized\n" + " Laguerre polynomials\n" + "genlaguerre : generalized Laguerre polynomial object\n" + "hyp1f1 : confluent hypergeometric function\n" + "eval_laguerre : evaluate Laguerre polynomials\n" + "\n" + "References\n" + "----------\n" + ".. [AS] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.") +ufunc_eval_genlaguerre_loops[0] = loop_d_pdd__As_pdd_d +ufunc_eval_genlaguerre_loops[1] = loop_d_ddd__As_fff_f +ufunc_eval_genlaguerre_loops[2] = loop_D_ddD__As_ffF_F +ufunc_eval_genlaguerre_loops[3] = loop_d_ddd__As_ddd_d +ufunc_eval_genlaguerre_loops[4] = loop_D_ddD__As_ddD_D +ufunc_eval_genlaguerre_types[0] = NPY_INTP +ufunc_eval_genlaguerre_types[1] = NPY_DOUBLE +ufunc_eval_genlaguerre_types[2] = NPY_DOUBLE +ufunc_eval_genlaguerre_types[3] = NPY_DOUBLE +ufunc_eval_genlaguerre_types[4] = NPY_FLOAT +ufunc_eval_genlaguerre_types[5] = NPY_FLOAT +ufunc_eval_genlaguerre_types[6] = NPY_FLOAT +ufunc_eval_genlaguerre_types[7] = NPY_FLOAT +ufunc_eval_genlaguerre_types[8] = NPY_FLOAT +ufunc_eval_genlaguerre_types[9] = NPY_FLOAT +ufunc_eval_genlaguerre_types[10] = NPY_CFLOAT +ufunc_eval_genlaguerre_types[11] = NPY_CFLOAT +ufunc_eval_genlaguerre_types[12] = NPY_DOUBLE +ufunc_eval_genlaguerre_types[13] = NPY_DOUBLE +ufunc_eval_genlaguerre_types[14] = NPY_DOUBLE +ufunc_eval_genlaguerre_types[15] = NPY_DOUBLE +ufunc_eval_genlaguerre_types[16] = NPY_DOUBLE +ufunc_eval_genlaguerre_types[17] = NPY_DOUBLE +ufunc_eval_genlaguerre_types[18] = NPY_CDOUBLE +ufunc_eval_genlaguerre_types[19] = NPY_CDOUBLE +ufunc_eval_genlaguerre_ptr[2*0] = _func_eval_genlaguerre_l +ufunc_eval_genlaguerre_ptr[2*0+1] = ("eval_genlaguerre") +ufunc_eval_genlaguerre_ptr[2*1] = _func_eval_genlaguerre[double] +ufunc_eval_genlaguerre_ptr[2*1+1] = ("eval_genlaguerre") +ufunc_eval_genlaguerre_ptr[2*2] = _func_eval_genlaguerre[double_complex] +ufunc_eval_genlaguerre_ptr[2*2+1] = ("eval_genlaguerre") +ufunc_eval_genlaguerre_ptr[2*3] = _func_eval_genlaguerre[double] +ufunc_eval_genlaguerre_ptr[2*3+1] = ("eval_genlaguerre") +ufunc_eval_genlaguerre_ptr[2*4] = _func_eval_genlaguerre[double_complex] +ufunc_eval_genlaguerre_ptr[2*4+1] = ("eval_genlaguerre") +ufunc_eval_genlaguerre_data[0] = &ufunc_eval_genlaguerre_ptr[2*0] +ufunc_eval_genlaguerre_data[1] = &ufunc_eval_genlaguerre_ptr[2*1] +ufunc_eval_genlaguerre_data[2] = &ufunc_eval_genlaguerre_ptr[2*2] +ufunc_eval_genlaguerre_data[3] = &ufunc_eval_genlaguerre_ptr[2*3] +ufunc_eval_genlaguerre_data[4] = &ufunc_eval_genlaguerre_ptr[2*4] +eval_genlaguerre = np.PyUFunc_FromFuncAndData(ufunc_eval_genlaguerre_loops, ufunc_eval_genlaguerre_data, ufunc_eval_genlaguerre_types, 5, 3, 1, 0, "eval_genlaguerre", ufunc_eval_genlaguerre_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_eval_hermite_loops[1] +cdef void *ufunc_eval_hermite_ptr[2] +cdef void *ufunc_eval_hermite_data[1] +cdef char ufunc_eval_hermite_types[3] +cdef char *ufunc_eval_hermite_doc = ( + "eval_hermite(n, x, out=None)\n" + "\n" + "Evaluate physicist's Hermite polynomial at a point.\n" + "\n" + "Defined by\n" + "\n" + ".. math::\n" + "\n" + " H_n(x) = (-1)^n e^{x^2} \\frac{d^n}{dx^n} e^{-x^2};\n" + "\n" + ":math:`H_n` is a polynomial of degree :math:`n`. See 22.11.7 in\n" + "[AS]_ for details.\n" + "\n" + "Parameters\n" + "----------\n" + "n : array_like\n" + " Degree of the polynomial\n" + "x : array_like\n" + " Points at which to evaluate the Hermite polynomial\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "H : scalar or ndarray\n" + " Values of the Hermite polynomial\n" + "\n" + "See Also\n" + "--------\n" + "roots_hermite : roots and quadrature weights of physicist's\n" + " Hermite polynomials\n" + "hermite : physicist's Hermite polynomial object\n" + "numpy.polynomial.hermite.Hermite : Physicist's Hermite series\n" + "eval_hermitenorm : evaluate Probabilist's Hermite polynomials\n" + "\n" + "References\n" + "----------\n" + ".. [AS] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.") +ufunc_eval_hermite_loops[0] = loop_d_pd__As_pd_d +ufunc_eval_hermite_types[0] = NPY_INTP +ufunc_eval_hermite_types[1] = NPY_DOUBLE +ufunc_eval_hermite_types[2] = NPY_DOUBLE +ufunc_eval_hermite_ptr[2*0] = _func_eval_hermite +ufunc_eval_hermite_ptr[2*0+1] = ("eval_hermite") +ufunc_eval_hermite_data[0] = &ufunc_eval_hermite_ptr[2*0] +eval_hermite = np.PyUFunc_FromFuncAndData(ufunc_eval_hermite_loops, ufunc_eval_hermite_data, ufunc_eval_hermite_types, 1, 2, 1, 0, "eval_hermite", ufunc_eval_hermite_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_eval_hermitenorm_loops[1] +cdef void *ufunc_eval_hermitenorm_ptr[2] +cdef void *ufunc_eval_hermitenorm_data[1] +cdef char ufunc_eval_hermitenorm_types[3] +cdef char *ufunc_eval_hermitenorm_doc = ( + "eval_hermitenorm(n, x, out=None)\n" + "\n" + "Evaluate probabilist's (normalized) Hermite polynomial at a\n" + "point.\n" + "\n" + "Defined by\n" + "\n" + ".. math::\n" + "\n" + " He_n(x) = (-1)^n e^{x^2/2} \\frac{d^n}{dx^n} e^{-x^2/2};\n" + "\n" + ":math:`He_n` is a polynomial of degree :math:`n`. See 22.11.8 in\n" + "[AS]_ for details.\n" + "\n" + "Parameters\n" + "----------\n" + "n : array_like\n" + " Degree of the polynomial\n" + "x : array_like\n" + " Points at which to evaluate the Hermite polynomial\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "He : scalar or ndarray\n" + " Values of the Hermite polynomial\n" + "\n" + "See Also\n" + "--------\n" + "roots_hermitenorm : roots and quadrature weights of probabilist's\n" + " Hermite polynomials\n" + "hermitenorm : probabilist's Hermite polynomial object\n" + "numpy.polynomial.hermite_e.HermiteE : Probabilist's Hermite series\n" + "eval_hermite : evaluate physicist's Hermite polynomials\n" + "\n" + "References\n" + "----------\n" + ".. [AS] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.") +ufunc_eval_hermitenorm_loops[0] = loop_d_pd__As_pd_d +ufunc_eval_hermitenorm_types[0] = NPY_INTP +ufunc_eval_hermitenorm_types[1] = NPY_DOUBLE +ufunc_eval_hermitenorm_types[2] = NPY_DOUBLE +ufunc_eval_hermitenorm_ptr[2*0] = _func_eval_hermitenorm +ufunc_eval_hermitenorm_ptr[2*0+1] = ("eval_hermitenorm") +ufunc_eval_hermitenorm_data[0] = &ufunc_eval_hermitenorm_ptr[2*0] +eval_hermitenorm = np.PyUFunc_FromFuncAndData(ufunc_eval_hermitenorm_loops, ufunc_eval_hermitenorm_data, ufunc_eval_hermitenorm_types, 1, 2, 1, 0, "eval_hermitenorm", ufunc_eval_hermitenorm_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_eval_jacobi_loops[5] +cdef void *ufunc_eval_jacobi_ptr[10] +cdef void *ufunc_eval_jacobi_data[5] +cdef char ufunc_eval_jacobi_types[25] +cdef char *ufunc_eval_jacobi_doc = ( + "eval_jacobi(n, alpha, beta, x, out=None)\n" + "\n" + "Evaluate Jacobi polynomial at a point.\n" + "\n" + "The Jacobi polynomials can be defined via the Gauss hypergeometric\n" + "function :math:`{}_2F_1` as\n" + "\n" + ".. math::\n" + "\n" + " P_n^{(\\alpha, \\beta)}(x) = \\frac{(\\alpha + 1)_n}{\\Gamma(n + 1)}\n" + " {}_2F_1(-n, 1 + \\alpha + \\beta + n; \\alpha + 1; (1 - z)/2)\n" + "\n" + "where :math:`(\\cdot)_n` is the Pochhammer symbol; see `poch`. When\n" + ":math:`n` is an integer the result is a polynomial of degree\n" + ":math:`n`. See 22.5.42 in [AS]_ for details.\n" + "\n" + "Parameters\n" + "----------\n" + "n : array_like\n" + " Degree of the polynomial. If not an integer the result is\n" + " determined via the relation to the Gauss hypergeometric\n" + " function.\n" + "alpha : array_like\n" + " Parameter\n" + "beta : array_like\n" + " Parameter\n" + "x : array_like\n" + " Points at which to evaluate the polynomial\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "P : scalar or ndarray\n" + " Values of the Jacobi polynomial\n" + "\n" + "See Also\n" + "--------\n" + "roots_jacobi : roots and quadrature weights of Jacobi polynomials\n" + "jacobi : Jacobi polynomial object\n" + "hyp2f1 : Gauss hypergeometric function\n" + "\n" + "References\n" + "----------\n" + ".. [AS] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.") +ufunc_eval_jacobi_loops[0] = loop_d_pddd__As_pddd_d +ufunc_eval_jacobi_loops[1] = loop_d_dddd__As_ffff_f +ufunc_eval_jacobi_loops[2] = loop_D_dddD__As_fffF_F +ufunc_eval_jacobi_loops[3] = loop_d_dddd__As_dddd_d +ufunc_eval_jacobi_loops[4] = loop_D_dddD__As_dddD_D +ufunc_eval_jacobi_types[0] = NPY_INTP +ufunc_eval_jacobi_types[1] = NPY_DOUBLE +ufunc_eval_jacobi_types[2] = NPY_DOUBLE +ufunc_eval_jacobi_types[3] = NPY_DOUBLE +ufunc_eval_jacobi_types[4] = NPY_DOUBLE +ufunc_eval_jacobi_types[5] = NPY_FLOAT +ufunc_eval_jacobi_types[6] = NPY_FLOAT +ufunc_eval_jacobi_types[7] = NPY_FLOAT +ufunc_eval_jacobi_types[8] = NPY_FLOAT +ufunc_eval_jacobi_types[9] = NPY_FLOAT +ufunc_eval_jacobi_types[10] = NPY_FLOAT +ufunc_eval_jacobi_types[11] = NPY_FLOAT +ufunc_eval_jacobi_types[12] = NPY_FLOAT +ufunc_eval_jacobi_types[13] = NPY_CFLOAT +ufunc_eval_jacobi_types[14] = NPY_CFLOAT +ufunc_eval_jacobi_types[15] = NPY_DOUBLE +ufunc_eval_jacobi_types[16] = NPY_DOUBLE +ufunc_eval_jacobi_types[17] = NPY_DOUBLE +ufunc_eval_jacobi_types[18] = NPY_DOUBLE +ufunc_eval_jacobi_types[19] = NPY_DOUBLE +ufunc_eval_jacobi_types[20] = NPY_DOUBLE +ufunc_eval_jacobi_types[21] = NPY_DOUBLE +ufunc_eval_jacobi_types[22] = NPY_DOUBLE +ufunc_eval_jacobi_types[23] = NPY_CDOUBLE +ufunc_eval_jacobi_types[24] = NPY_CDOUBLE +ufunc_eval_jacobi_ptr[2*0] = _func_eval_jacobi_l +ufunc_eval_jacobi_ptr[2*0+1] = ("eval_jacobi") +ufunc_eval_jacobi_ptr[2*1] = _func_eval_jacobi[double] +ufunc_eval_jacobi_ptr[2*1+1] = ("eval_jacobi") +ufunc_eval_jacobi_ptr[2*2] = _func_eval_jacobi[double_complex] +ufunc_eval_jacobi_ptr[2*2+1] = ("eval_jacobi") +ufunc_eval_jacobi_ptr[2*3] = _func_eval_jacobi[double] +ufunc_eval_jacobi_ptr[2*3+1] = ("eval_jacobi") +ufunc_eval_jacobi_ptr[2*4] = _func_eval_jacobi[double_complex] +ufunc_eval_jacobi_ptr[2*4+1] = ("eval_jacobi") +ufunc_eval_jacobi_data[0] = &ufunc_eval_jacobi_ptr[2*0] +ufunc_eval_jacobi_data[1] = &ufunc_eval_jacobi_ptr[2*1] +ufunc_eval_jacobi_data[2] = &ufunc_eval_jacobi_ptr[2*2] +ufunc_eval_jacobi_data[3] = &ufunc_eval_jacobi_ptr[2*3] +ufunc_eval_jacobi_data[4] = &ufunc_eval_jacobi_ptr[2*4] +eval_jacobi = np.PyUFunc_FromFuncAndData(ufunc_eval_jacobi_loops, ufunc_eval_jacobi_data, ufunc_eval_jacobi_types, 5, 4, 1, 0, "eval_jacobi", ufunc_eval_jacobi_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_eval_laguerre_loops[5] +cdef void *ufunc_eval_laguerre_ptr[10] +cdef void *ufunc_eval_laguerre_data[5] +cdef char ufunc_eval_laguerre_types[15] +cdef char *ufunc_eval_laguerre_doc = ( + "eval_laguerre(n, x, out=None)\n" + "\n" + "Evaluate Laguerre polynomial at a point.\n" + "\n" + "The Laguerre polynomials can be defined via the confluent\n" + "hypergeometric function :math:`{}_1F_1` as\n" + "\n" + ".. math::\n" + "\n" + " L_n(x) = {}_1F_1(-n, 1, x).\n" + "\n" + "See 22.5.16 and 22.5.54 in [AS]_ for details. When :math:`n` is an\n" + "integer the result is a polynomial of degree :math:`n`.\n" + "\n" + "Parameters\n" + "----------\n" + "n : array_like\n" + " Degree of the polynomial. If not an integer the result is\n" + " determined via the relation to the confluent hypergeometric\n" + " function.\n" + "x : array_like\n" + " Points at which to evaluate the Laguerre polynomial\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "L : scalar or ndarray\n" + " Values of the Laguerre polynomial\n" + "\n" + "See Also\n" + "--------\n" + "roots_laguerre : roots and quadrature weights of Laguerre\n" + " polynomials\n" + "laguerre : Laguerre polynomial object\n" + "numpy.polynomial.laguerre.Laguerre : Laguerre series\n" + "eval_genlaguerre : evaluate generalized Laguerre polynomials\n" + "\n" + "References\n" + "----------\n" + ".. [AS] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.") +ufunc_eval_laguerre_loops[0] = loop_d_pd__As_pd_d +ufunc_eval_laguerre_loops[1] = loop_d_dd__As_ff_f +ufunc_eval_laguerre_loops[2] = loop_D_dD__As_fF_F +ufunc_eval_laguerre_loops[3] = loop_d_dd__As_dd_d +ufunc_eval_laguerre_loops[4] = loop_D_dD__As_dD_D +ufunc_eval_laguerre_types[0] = NPY_INTP +ufunc_eval_laguerre_types[1] = NPY_DOUBLE +ufunc_eval_laguerre_types[2] = NPY_DOUBLE +ufunc_eval_laguerre_types[3] = NPY_FLOAT +ufunc_eval_laguerre_types[4] = NPY_FLOAT +ufunc_eval_laguerre_types[5] = NPY_FLOAT +ufunc_eval_laguerre_types[6] = NPY_FLOAT +ufunc_eval_laguerre_types[7] = NPY_CFLOAT +ufunc_eval_laguerre_types[8] = NPY_CFLOAT +ufunc_eval_laguerre_types[9] = NPY_DOUBLE +ufunc_eval_laguerre_types[10] = NPY_DOUBLE +ufunc_eval_laguerre_types[11] = NPY_DOUBLE +ufunc_eval_laguerre_types[12] = NPY_DOUBLE +ufunc_eval_laguerre_types[13] = NPY_CDOUBLE +ufunc_eval_laguerre_types[14] = NPY_CDOUBLE +ufunc_eval_laguerre_ptr[2*0] = _func_eval_laguerre_l +ufunc_eval_laguerre_ptr[2*0+1] = ("eval_laguerre") +ufunc_eval_laguerre_ptr[2*1] = _func_eval_laguerre[double] +ufunc_eval_laguerre_ptr[2*1+1] = ("eval_laguerre") +ufunc_eval_laguerre_ptr[2*2] = _func_eval_laguerre[double_complex] +ufunc_eval_laguerre_ptr[2*2+1] = ("eval_laguerre") +ufunc_eval_laguerre_ptr[2*3] = _func_eval_laguerre[double] +ufunc_eval_laguerre_ptr[2*3+1] = ("eval_laguerre") +ufunc_eval_laguerre_ptr[2*4] = _func_eval_laguerre[double_complex] +ufunc_eval_laguerre_ptr[2*4+1] = ("eval_laguerre") +ufunc_eval_laguerre_data[0] = &ufunc_eval_laguerre_ptr[2*0] +ufunc_eval_laguerre_data[1] = &ufunc_eval_laguerre_ptr[2*1] +ufunc_eval_laguerre_data[2] = &ufunc_eval_laguerre_ptr[2*2] +ufunc_eval_laguerre_data[3] = &ufunc_eval_laguerre_ptr[2*3] +ufunc_eval_laguerre_data[4] = &ufunc_eval_laguerre_ptr[2*4] +eval_laguerre = np.PyUFunc_FromFuncAndData(ufunc_eval_laguerre_loops, ufunc_eval_laguerre_data, ufunc_eval_laguerre_types, 5, 2, 1, 0, "eval_laguerre", ufunc_eval_laguerre_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_eval_legendre_loops[5] +cdef void *ufunc_eval_legendre_ptr[10] +cdef void *ufunc_eval_legendre_data[5] +cdef char ufunc_eval_legendre_types[15] +cdef char *ufunc_eval_legendre_doc = ( + "eval_legendre(n, x, out=None)\n" + "\n" + "Evaluate Legendre polynomial at a point.\n" + "\n" + "The Legendre polynomials can be defined via the Gauss\n" + "hypergeometric function :math:`{}_2F_1` as\n" + "\n" + ".. math::\n" + "\n" + " P_n(x) = {}_2F_1(-n, n + 1; 1; (1 - x)/2).\n" + "\n" + "When :math:`n` is an integer the result is a polynomial of degree\n" + ":math:`n`. See 22.5.49 in [AS]_ for details.\n" + "\n" + "Parameters\n" + "----------\n" + "n : array_like\n" + " Degree of the polynomial. If not an integer, the result is\n" + " determined via the relation to the Gauss hypergeometric\n" + " function.\n" + "x : array_like\n" + " Points at which to evaluate the Legendre polynomial\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "P : scalar or ndarray\n" + " Values of the Legendre polynomial\n" + "\n" + "See Also\n" + "--------\n" + "roots_legendre : roots and quadrature weights of Legendre\n" + " polynomials\n" + "legendre : Legendre polynomial object\n" + "hyp2f1 : Gauss hypergeometric function\n" + "numpy.polynomial.legendre.Legendre : Legendre series\n" + "\n" + "References\n" + "----------\n" + ".. [AS] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> from scipy.special import eval_legendre\n" + "\n" + "Evaluate the zero-order Legendre polynomial at x = 0\n" + "\n" + ">>> eval_legendre(0, 0)\n" + "1.0\n" + "\n" + "Evaluate the first-order Legendre polynomial between -1 and 1\n" + "\n" + ">>> X = np.linspace(-1, 1, 5) # Domain of Legendre polynomials\n" + ">>> eval_legendre(1, X)\n" + "array([-1. , -0.5, 0. , 0.5, 1. ])\n" + "\n" + "Evaluate Legendre polynomials of order 0 through 4 at x = 0\n" + "\n" + ">>> N = range(0, 5)\n" + ">>> eval_legendre(N, 0)\n" + "array([ 1. , 0. , -0.5 , 0. , 0.375])\n" + "\n" + "Plot Legendre polynomials of order 0 through 4\n" + "\n" + ">>> X = np.linspace(-1, 1)\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> for n in range(0, 5):\n" + "... y = eval_legendre(n, X)\n" + "... plt.plot(X, y, label=r'$P_{}(x)$'.format(n))\n" + "\n" + ">>> plt.title(\"Legendre Polynomials\")\n" + ">>> plt.xlabel(\"x\")\n" + ">>> plt.ylabel(r'$P_n(x)$')\n" + ">>> plt.legend(loc='lower right')\n" + ">>> plt.show()") +ufunc_eval_legendre_loops[0] = loop_d_pd__As_pd_d +ufunc_eval_legendre_loops[1] = loop_d_dd__As_ff_f +ufunc_eval_legendre_loops[2] = loop_D_dD__As_fF_F +ufunc_eval_legendre_loops[3] = loop_d_dd__As_dd_d +ufunc_eval_legendre_loops[4] = loop_D_dD__As_dD_D +ufunc_eval_legendre_types[0] = NPY_INTP +ufunc_eval_legendre_types[1] = NPY_DOUBLE +ufunc_eval_legendre_types[2] = NPY_DOUBLE +ufunc_eval_legendre_types[3] = NPY_FLOAT +ufunc_eval_legendre_types[4] = NPY_FLOAT +ufunc_eval_legendre_types[5] = NPY_FLOAT +ufunc_eval_legendre_types[6] = NPY_FLOAT +ufunc_eval_legendre_types[7] = NPY_CFLOAT +ufunc_eval_legendre_types[8] = NPY_CFLOAT +ufunc_eval_legendre_types[9] = NPY_DOUBLE +ufunc_eval_legendre_types[10] = NPY_DOUBLE +ufunc_eval_legendre_types[11] = NPY_DOUBLE +ufunc_eval_legendre_types[12] = NPY_DOUBLE +ufunc_eval_legendre_types[13] = NPY_CDOUBLE +ufunc_eval_legendre_types[14] = NPY_CDOUBLE +ufunc_eval_legendre_ptr[2*0] = _func_eval_legendre_l +ufunc_eval_legendre_ptr[2*0+1] = ("eval_legendre") +ufunc_eval_legendre_ptr[2*1] = _func_eval_legendre[double] +ufunc_eval_legendre_ptr[2*1+1] = ("eval_legendre") +ufunc_eval_legendre_ptr[2*2] = _func_eval_legendre[double_complex] +ufunc_eval_legendre_ptr[2*2+1] = ("eval_legendre") +ufunc_eval_legendre_ptr[2*3] = _func_eval_legendre[double] +ufunc_eval_legendre_ptr[2*3+1] = ("eval_legendre") +ufunc_eval_legendre_ptr[2*4] = _func_eval_legendre[double_complex] +ufunc_eval_legendre_ptr[2*4+1] = ("eval_legendre") +ufunc_eval_legendre_data[0] = &ufunc_eval_legendre_ptr[2*0] +ufunc_eval_legendre_data[1] = &ufunc_eval_legendre_ptr[2*1] +ufunc_eval_legendre_data[2] = &ufunc_eval_legendre_ptr[2*2] +ufunc_eval_legendre_data[3] = &ufunc_eval_legendre_ptr[2*3] +ufunc_eval_legendre_data[4] = &ufunc_eval_legendre_ptr[2*4] +eval_legendre = np.PyUFunc_FromFuncAndData(ufunc_eval_legendre_loops, ufunc_eval_legendre_data, ufunc_eval_legendre_types, 5, 2, 1, 0, "eval_legendre", ufunc_eval_legendre_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_eval_sh_chebyt_loops[5] +cdef void *ufunc_eval_sh_chebyt_ptr[10] +cdef void *ufunc_eval_sh_chebyt_data[5] +cdef char ufunc_eval_sh_chebyt_types[15] +cdef char *ufunc_eval_sh_chebyt_doc = ( + "eval_sh_chebyt(n, x, out=None)\n" + "\n" + "Evaluate shifted Chebyshev polynomial of the first kind at a\n" + "point.\n" + "\n" + "These polynomials are defined as\n" + "\n" + ".. math::\n" + "\n" + " T_n^*(x) = T_n(2x - 1)\n" + "\n" + "where :math:`T_n` is a Chebyshev polynomial of the first kind. See\n" + "22.5.14 in [AS]_ for details.\n" + "\n" + "Parameters\n" + "----------\n" + "n : array_like\n" + " Degree of the polynomial. If not an integer, the result is\n" + " determined via the relation to `eval_chebyt`.\n" + "x : array_like\n" + " Points at which to evaluate the shifted Chebyshev polynomial\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "T : scalar or ndarray\n" + " Values of the shifted Chebyshev polynomial\n" + "\n" + "See Also\n" + "--------\n" + "roots_sh_chebyt : roots and quadrature weights of shifted\n" + " Chebyshev polynomials of the first kind\n" + "sh_chebyt : shifted Chebyshev polynomial object\n" + "eval_chebyt : evaluate Chebyshev polynomials of the first kind\n" + "numpy.polynomial.chebyshev.Chebyshev : Chebyshev series\n" + "\n" + "References\n" + "----------\n" + ".. [AS] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.") +ufunc_eval_sh_chebyt_loops[0] = loop_d_pd__As_pd_d +ufunc_eval_sh_chebyt_loops[1] = loop_d_dd__As_ff_f +ufunc_eval_sh_chebyt_loops[2] = loop_D_dD__As_fF_F +ufunc_eval_sh_chebyt_loops[3] = loop_d_dd__As_dd_d +ufunc_eval_sh_chebyt_loops[4] = loop_D_dD__As_dD_D +ufunc_eval_sh_chebyt_types[0] = NPY_INTP +ufunc_eval_sh_chebyt_types[1] = NPY_DOUBLE +ufunc_eval_sh_chebyt_types[2] = NPY_DOUBLE +ufunc_eval_sh_chebyt_types[3] = NPY_FLOAT +ufunc_eval_sh_chebyt_types[4] = NPY_FLOAT +ufunc_eval_sh_chebyt_types[5] = NPY_FLOAT +ufunc_eval_sh_chebyt_types[6] = NPY_FLOAT +ufunc_eval_sh_chebyt_types[7] = NPY_CFLOAT +ufunc_eval_sh_chebyt_types[8] = NPY_CFLOAT +ufunc_eval_sh_chebyt_types[9] = NPY_DOUBLE +ufunc_eval_sh_chebyt_types[10] = NPY_DOUBLE +ufunc_eval_sh_chebyt_types[11] = NPY_DOUBLE +ufunc_eval_sh_chebyt_types[12] = NPY_DOUBLE +ufunc_eval_sh_chebyt_types[13] = NPY_CDOUBLE +ufunc_eval_sh_chebyt_types[14] = NPY_CDOUBLE +ufunc_eval_sh_chebyt_ptr[2*0] = _func_eval_sh_chebyt_l +ufunc_eval_sh_chebyt_ptr[2*0+1] = ("eval_sh_chebyt") +ufunc_eval_sh_chebyt_ptr[2*1] = _func_eval_sh_chebyt[double] +ufunc_eval_sh_chebyt_ptr[2*1+1] = ("eval_sh_chebyt") +ufunc_eval_sh_chebyt_ptr[2*2] = _func_eval_sh_chebyt[double_complex] +ufunc_eval_sh_chebyt_ptr[2*2+1] = ("eval_sh_chebyt") +ufunc_eval_sh_chebyt_ptr[2*3] = _func_eval_sh_chebyt[double] +ufunc_eval_sh_chebyt_ptr[2*3+1] = ("eval_sh_chebyt") +ufunc_eval_sh_chebyt_ptr[2*4] = _func_eval_sh_chebyt[double_complex] +ufunc_eval_sh_chebyt_ptr[2*4+1] = ("eval_sh_chebyt") +ufunc_eval_sh_chebyt_data[0] = &ufunc_eval_sh_chebyt_ptr[2*0] +ufunc_eval_sh_chebyt_data[1] = &ufunc_eval_sh_chebyt_ptr[2*1] +ufunc_eval_sh_chebyt_data[2] = &ufunc_eval_sh_chebyt_ptr[2*2] +ufunc_eval_sh_chebyt_data[3] = &ufunc_eval_sh_chebyt_ptr[2*3] +ufunc_eval_sh_chebyt_data[4] = &ufunc_eval_sh_chebyt_ptr[2*4] +eval_sh_chebyt = np.PyUFunc_FromFuncAndData(ufunc_eval_sh_chebyt_loops, ufunc_eval_sh_chebyt_data, ufunc_eval_sh_chebyt_types, 5, 2, 1, 0, "eval_sh_chebyt", ufunc_eval_sh_chebyt_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_eval_sh_chebyu_loops[5] +cdef void *ufunc_eval_sh_chebyu_ptr[10] +cdef void *ufunc_eval_sh_chebyu_data[5] +cdef char ufunc_eval_sh_chebyu_types[15] +cdef char *ufunc_eval_sh_chebyu_doc = ( + "eval_sh_chebyu(n, x, out=None)\n" + "\n" + "Evaluate shifted Chebyshev polynomial of the second kind at a\n" + "point.\n" + "\n" + "These polynomials are defined as\n" + "\n" + ".. math::\n" + "\n" + " U_n^*(x) = U_n(2x - 1)\n" + "\n" + "where :math:`U_n` is a Chebyshev polynomial of the first kind. See\n" + "22.5.15 in [AS]_ for details.\n" + "\n" + "Parameters\n" + "----------\n" + "n : array_like\n" + " Degree of the polynomial. If not an integer, the result is\n" + " determined via the relation to `eval_chebyu`.\n" + "x : array_like\n" + " Points at which to evaluate the shifted Chebyshev polynomial\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "U : scalar or ndarray\n" + " Values of the shifted Chebyshev polynomial\n" + "\n" + "See Also\n" + "--------\n" + "roots_sh_chebyu : roots and quadrature weights of shifted\n" + " Chebychev polynomials of the second kind\n" + "sh_chebyu : shifted Chebyshev polynomial object\n" + "eval_chebyu : evaluate Chebyshev polynomials of the second kind\n" + "\n" + "References\n" + "----------\n" + ".. [AS] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.") +ufunc_eval_sh_chebyu_loops[0] = loop_d_pd__As_pd_d +ufunc_eval_sh_chebyu_loops[1] = loop_d_dd__As_ff_f +ufunc_eval_sh_chebyu_loops[2] = loop_D_dD__As_fF_F +ufunc_eval_sh_chebyu_loops[3] = loop_d_dd__As_dd_d +ufunc_eval_sh_chebyu_loops[4] = loop_D_dD__As_dD_D +ufunc_eval_sh_chebyu_types[0] = NPY_INTP +ufunc_eval_sh_chebyu_types[1] = NPY_DOUBLE +ufunc_eval_sh_chebyu_types[2] = NPY_DOUBLE +ufunc_eval_sh_chebyu_types[3] = NPY_FLOAT +ufunc_eval_sh_chebyu_types[4] = NPY_FLOAT +ufunc_eval_sh_chebyu_types[5] = NPY_FLOAT +ufunc_eval_sh_chebyu_types[6] = NPY_FLOAT +ufunc_eval_sh_chebyu_types[7] = NPY_CFLOAT +ufunc_eval_sh_chebyu_types[8] = NPY_CFLOAT +ufunc_eval_sh_chebyu_types[9] = NPY_DOUBLE +ufunc_eval_sh_chebyu_types[10] = NPY_DOUBLE +ufunc_eval_sh_chebyu_types[11] = NPY_DOUBLE +ufunc_eval_sh_chebyu_types[12] = NPY_DOUBLE +ufunc_eval_sh_chebyu_types[13] = NPY_CDOUBLE +ufunc_eval_sh_chebyu_types[14] = NPY_CDOUBLE +ufunc_eval_sh_chebyu_ptr[2*0] = _func_eval_sh_chebyu_l +ufunc_eval_sh_chebyu_ptr[2*0+1] = ("eval_sh_chebyu") +ufunc_eval_sh_chebyu_ptr[2*1] = _func_eval_sh_chebyu[double] +ufunc_eval_sh_chebyu_ptr[2*1+1] = ("eval_sh_chebyu") +ufunc_eval_sh_chebyu_ptr[2*2] = _func_eval_sh_chebyu[double_complex] +ufunc_eval_sh_chebyu_ptr[2*2+1] = ("eval_sh_chebyu") +ufunc_eval_sh_chebyu_ptr[2*3] = _func_eval_sh_chebyu[double] +ufunc_eval_sh_chebyu_ptr[2*3+1] = ("eval_sh_chebyu") +ufunc_eval_sh_chebyu_ptr[2*4] = _func_eval_sh_chebyu[double_complex] +ufunc_eval_sh_chebyu_ptr[2*4+1] = ("eval_sh_chebyu") +ufunc_eval_sh_chebyu_data[0] = &ufunc_eval_sh_chebyu_ptr[2*0] +ufunc_eval_sh_chebyu_data[1] = &ufunc_eval_sh_chebyu_ptr[2*1] +ufunc_eval_sh_chebyu_data[2] = &ufunc_eval_sh_chebyu_ptr[2*2] +ufunc_eval_sh_chebyu_data[3] = &ufunc_eval_sh_chebyu_ptr[2*3] +ufunc_eval_sh_chebyu_data[4] = &ufunc_eval_sh_chebyu_ptr[2*4] +eval_sh_chebyu = np.PyUFunc_FromFuncAndData(ufunc_eval_sh_chebyu_loops, ufunc_eval_sh_chebyu_data, ufunc_eval_sh_chebyu_types, 5, 2, 1, 0, "eval_sh_chebyu", ufunc_eval_sh_chebyu_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_eval_sh_jacobi_loops[5] +cdef void *ufunc_eval_sh_jacobi_ptr[10] +cdef void *ufunc_eval_sh_jacobi_data[5] +cdef char ufunc_eval_sh_jacobi_types[25] +cdef char *ufunc_eval_sh_jacobi_doc = ( + "eval_sh_jacobi(n, p, q, x, out=None)\n" + "\n" + "Evaluate shifted Jacobi polynomial at a point.\n" + "\n" + "Defined by\n" + "\n" + ".. math::\n" + "\n" + " G_n^{(p, q)}(x)\n" + " = \\binom{2n + p - 1}{n}^{-1} P_n^{(p - q, q - 1)}(2x - 1),\n" + "\n" + "where :math:`P_n^{(\\cdot, \\cdot)}` is the n-th Jacobi\n" + "polynomial. See 22.5.2 in [AS]_ for details.\n" + "\n" + "Parameters\n" + "----------\n" + "n : int\n" + " Degree of the polynomial. If not an integer, the result is\n" + " determined via the relation to `binom` and `eval_jacobi`.\n" + "p : float\n" + " Parameter\n" + "q : float\n" + " Parameter\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "G : scalar or ndarray\n" + " Values of the shifted Jacobi polynomial.\n" + "\n" + "See Also\n" + "--------\n" + "roots_sh_jacobi : roots and quadrature weights of shifted Jacobi\n" + " polynomials\n" + "sh_jacobi : shifted Jacobi polynomial object\n" + "eval_jacobi : evaluate Jacobi polynomials\n" + "\n" + "References\n" + "----------\n" + ".. [AS] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.") +ufunc_eval_sh_jacobi_loops[0] = loop_d_pddd__As_pddd_d +ufunc_eval_sh_jacobi_loops[1] = loop_d_dddd__As_ffff_f +ufunc_eval_sh_jacobi_loops[2] = loop_D_dddD__As_fffF_F +ufunc_eval_sh_jacobi_loops[3] = loop_d_dddd__As_dddd_d +ufunc_eval_sh_jacobi_loops[4] = loop_D_dddD__As_dddD_D +ufunc_eval_sh_jacobi_types[0] = NPY_INTP +ufunc_eval_sh_jacobi_types[1] = NPY_DOUBLE +ufunc_eval_sh_jacobi_types[2] = NPY_DOUBLE +ufunc_eval_sh_jacobi_types[3] = NPY_DOUBLE +ufunc_eval_sh_jacobi_types[4] = NPY_DOUBLE +ufunc_eval_sh_jacobi_types[5] = NPY_FLOAT +ufunc_eval_sh_jacobi_types[6] = NPY_FLOAT +ufunc_eval_sh_jacobi_types[7] = NPY_FLOAT +ufunc_eval_sh_jacobi_types[8] = NPY_FLOAT +ufunc_eval_sh_jacobi_types[9] = NPY_FLOAT +ufunc_eval_sh_jacobi_types[10] = NPY_FLOAT +ufunc_eval_sh_jacobi_types[11] = NPY_FLOAT +ufunc_eval_sh_jacobi_types[12] = NPY_FLOAT +ufunc_eval_sh_jacobi_types[13] = NPY_CFLOAT +ufunc_eval_sh_jacobi_types[14] = NPY_CFLOAT +ufunc_eval_sh_jacobi_types[15] = NPY_DOUBLE +ufunc_eval_sh_jacobi_types[16] = NPY_DOUBLE +ufunc_eval_sh_jacobi_types[17] = NPY_DOUBLE +ufunc_eval_sh_jacobi_types[18] = NPY_DOUBLE +ufunc_eval_sh_jacobi_types[19] = NPY_DOUBLE +ufunc_eval_sh_jacobi_types[20] = NPY_DOUBLE +ufunc_eval_sh_jacobi_types[21] = NPY_DOUBLE +ufunc_eval_sh_jacobi_types[22] = NPY_DOUBLE +ufunc_eval_sh_jacobi_types[23] = NPY_CDOUBLE +ufunc_eval_sh_jacobi_types[24] = NPY_CDOUBLE +ufunc_eval_sh_jacobi_ptr[2*0] = _func_eval_sh_jacobi_l +ufunc_eval_sh_jacobi_ptr[2*0+1] = ("eval_sh_jacobi") +ufunc_eval_sh_jacobi_ptr[2*1] = _func_eval_sh_jacobi[double] +ufunc_eval_sh_jacobi_ptr[2*1+1] = ("eval_sh_jacobi") +ufunc_eval_sh_jacobi_ptr[2*2] = _func_eval_sh_jacobi[double_complex] +ufunc_eval_sh_jacobi_ptr[2*2+1] = ("eval_sh_jacobi") +ufunc_eval_sh_jacobi_ptr[2*3] = _func_eval_sh_jacobi[double] +ufunc_eval_sh_jacobi_ptr[2*3+1] = ("eval_sh_jacobi") +ufunc_eval_sh_jacobi_ptr[2*4] = _func_eval_sh_jacobi[double_complex] +ufunc_eval_sh_jacobi_ptr[2*4+1] = ("eval_sh_jacobi") +ufunc_eval_sh_jacobi_data[0] = &ufunc_eval_sh_jacobi_ptr[2*0] +ufunc_eval_sh_jacobi_data[1] = &ufunc_eval_sh_jacobi_ptr[2*1] +ufunc_eval_sh_jacobi_data[2] = &ufunc_eval_sh_jacobi_ptr[2*2] +ufunc_eval_sh_jacobi_data[3] = &ufunc_eval_sh_jacobi_ptr[2*3] +ufunc_eval_sh_jacobi_data[4] = &ufunc_eval_sh_jacobi_ptr[2*4] +eval_sh_jacobi = np.PyUFunc_FromFuncAndData(ufunc_eval_sh_jacobi_loops, ufunc_eval_sh_jacobi_data, ufunc_eval_sh_jacobi_types, 5, 4, 1, 0, "eval_sh_jacobi", ufunc_eval_sh_jacobi_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_eval_sh_legendre_loops[5] +cdef void *ufunc_eval_sh_legendre_ptr[10] +cdef void *ufunc_eval_sh_legendre_data[5] +cdef char ufunc_eval_sh_legendre_types[15] +cdef char *ufunc_eval_sh_legendre_doc = ( + "eval_sh_legendre(n, x, out=None)\n" + "\n" + "Evaluate shifted Legendre polynomial at a point.\n" + "\n" + "These polynomials are defined as\n" + "\n" + ".. math::\n" + "\n" + " P_n^*(x) = P_n(2x - 1)\n" + "\n" + "where :math:`P_n` is a Legendre polynomial. See 2.2.11 in [AS]_\n" + "for details.\n" + "\n" + "Parameters\n" + "----------\n" + "n : array_like\n" + " Degree of the polynomial. If not an integer, the value is\n" + " determined via the relation to `eval_legendre`.\n" + "x : array_like\n" + " Points at which to evaluate the shifted Legendre polynomial\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "P : scalar or ndarray\n" + " Values of the shifted Legendre polynomial\n" + "\n" + "See Also\n" + "--------\n" + "roots_sh_legendre : roots and quadrature weights of shifted\n" + " Legendre polynomials\n" + "sh_legendre : shifted Legendre polynomial object\n" + "eval_legendre : evaluate Legendre polynomials\n" + "numpy.polynomial.legendre.Legendre : Legendre series\n" + "\n" + "References\n" + "----------\n" + ".. [AS] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.") +ufunc_eval_sh_legendre_loops[0] = loop_d_pd__As_pd_d +ufunc_eval_sh_legendre_loops[1] = loop_d_dd__As_ff_f +ufunc_eval_sh_legendre_loops[2] = loop_D_dD__As_fF_F +ufunc_eval_sh_legendre_loops[3] = loop_d_dd__As_dd_d +ufunc_eval_sh_legendre_loops[4] = loop_D_dD__As_dD_D +ufunc_eval_sh_legendre_types[0] = NPY_INTP +ufunc_eval_sh_legendre_types[1] = NPY_DOUBLE +ufunc_eval_sh_legendre_types[2] = NPY_DOUBLE +ufunc_eval_sh_legendre_types[3] = NPY_FLOAT +ufunc_eval_sh_legendre_types[4] = NPY_FLOAT +ufunc_eval_sh_legendre_types[5] = NPY_FLOAT +ufunc_eval_sh_legendre_types[6] = NPY_FLOAT +ufunc_eval_sh_legendre_types[7] = NPY_CFLOAT +ufunc_eval_sh_legendre_types[8] = NPY_CFLOAT +ufunc_eval_sh_legendre_types[9] = NPY_DOUBLE +ufunc_eval_sh_legendre_types[10] = NPY_DOUBLE +ufunc_eval_sh_legendre_types[11] = NPY_DOUBLE +ufunc_eval_sh_legendre_types[12] = NPY_DOUBLE +ufunc_eval_sh_legendre_types[13] = NPY_CDOUBLE +ufunc_eval_sh_legendre_types[14] = NPY_CDOUBLE +ufunc_eval_sh_legendre_ptr[2*0] = _func_eval_sh_legendre_l +ufunc_eval_sh_legendre_ptr[2*0+1] = ("eval_sh_legendre") +ufunc_eval_sh_legendre_ptr[2*1] = _func_eval_sh_legendre[double] +ufunc_eval_sh_legendre_ptr[2*1+1] = ("eval_sh_legendre") +ufunc_eval_sh_legendre_ptr[2*2] = _func_eval_sh_legendre[double_complex] +ufunc_eval_sh_legendre_ptr[2*2+1] = ("eval_sh_legendre") +ufunc_eval_sh_legendre_ptr[2*3] = _func_eval_sh_legendre[double] +ufunc_eval_sh_legendre_ptr[2*3+1] = ("eval_sh_legendre") +ufunc_eval_sh_legendre_ptr[2*4] = _func_eval_sh_legendre[double_complex] +ufunc_eval_sh_legendre_ptr[2*4+1] = ("eval_sh_legendre") +ufunc_eval_sh_legendre_data[0] = &ufunc_eval_sh_legendre_ptr[2*0] +ufunc_eval_sh_legendre_data[1] = &ufunc_eval_sh_legendre_ptr[2*1] +ufunc_eval_sh_legendre_data[2] = &ufunc_eval_sh_legendre_ptr[2*2] +ufunc_eval_sh_legendre_data[3] = &ufunc_eval_sh_legendre_ptr[2*3] +ufunc_eval_sh_legendre_data[4] = &ufunc_eval_sh_legendre_ptr[2*4] +eval_sh_legendre = np.PyUFunc_FromFuncAndData(ufunc_eval_sh_legendre_loops, ufunc_eval_sh_legendre_data, ufunc_eval_sh_legendre_types, 5, 2, 1, 0, "eval_sh_legendre", ufunc_eval_sh_legendre_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_exp10_loops[2] +cdef void *ufunc_exp10_ptr[4] +cdef void *ufunc_exp10_data[2] +cdef char ufunc_exp10_types[4] +cdef char *ufunc_exp10_doc = ( + "exp10(x, out=None)\n" + "\n" + "Compute ``10**x`` element-wise.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " `x` must contain real numbers.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " ``10**x``, computed element-wise.\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> from scipy.special import exp10\n" + "\n" + ">>> exp10(3)\n" + "1000.0\n" + ">>> x = np.array([[-1, -0.5, 0], [0.5, 1, 1.5]])\n" + ">>> exp10(x)\n" + "array([[ 0.1 , 0.31622777, 1. ],\n" + " [ 3.16227766, 10. , 31.6227766 ]])") +ufunc_exp10_loops[0] = loop_d_d__As_f_f +ufunc_exp10_loops[1] = loop_d_d__As_d_d +ufunc_exp10_types[0] = NPY_FLOAT +ufunc_exp10_types[1] = NPY_FLOAT +ufunc_exp10_types[2] = NPY_DOUBLE +ufunc_exp10_types[3] = NPY_DOUBLE +ufunc_exp10_ptr[2*0] = _func_cephes_exp10 +ufunc_exp10_ptr[2*0+1] = ("exp10") +ufunc_exp10_ptr[2*1] = _func_cephes_exp10 +ufunc_exp10_ptr[2*1+1] = ("exp10") +ufunc_exp10_data[0] = &ufunc_exp10_ptr[2*0] +ufunc_exp10_data[1] = &ufunc_exp10_ptr[2*1] +exp10 = np.PyUFunc_FromFuncAndData(ufunc_exp10_loops, ufunc_exp10_data, ufunc_exp10_types, 2, 1, 1, 0, "exp10", ufunc_exp10_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_exp2_loops[2] +cdef void *ufunc_exp2_ptr[4] +cdef void *ufunc_exp2_data[2] +cdef char ufunc_exp2_types[4] +cdef char *ufunc_exp2_doc = ( + "exp2(x, out=None)\n" + "\n" + "Compute ``2**x`` element-wise.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " `x` must contain real numbers.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " ``2**x``, computed element-wise.\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> from scipy.special import exp2\n" + "\n" + ">>> exp2(3)\n" + "8.0\n" + ">>> x = np.array([[-1, -0.5, 0], [0.5, 1, 1.5]])\n" + ">>> exp2(x)\n" + "array([[ 0.5 , 0.70710678, 1. ],\n" + " [ 1.41421356, 2. , 2.82842712]])") +ufunc_exp2_loops[0] = loop_d_d__As_f_f +ufunc_exp2_loops[1] = loop_d_d__As_d_d +ufunc_exp2_types[0] = NPY_FLOAT +ufunc_exp2_types[1] = NPY_FLOAT +ufunc_exp2_types[2] = NPY_DOUBLE +ufunc_exp2_types[3] = NPY_DOUBLE +ufunc_exp2_ptr[2*0] = _func_cephes_exp2 +ufunc_exp2_ptr[2*0+1] = ("exp2") +ufunc_exp2_ptr[2*1] = _func_cephes_exp2 +ufunc_exp2_ptr[2*1+1] = ("exp2") +ufunc_exp2_data[0] = &ufunc_exp2_ptr[2*0] +ufunc_exp2_data[1] = &ufunc_exp2_ptr[2*1] +exp2 = np.PyUFunc_FromFuncAndData(ufunc_exp2_loops, ufunc_exp2_data, ufunc_exp2_types, 2, 1, 1, 0, "exp2", ufunc_exp2_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_expm1_loops[4] +cdef void *ufunc_expm1_ptr[8] +cdef void *ufunc_expm1_data[4] +cdef char ufunc_expm1_types[8] +cdef char *ufunc_expm1_doc = ( + "expm1(x, out=None)\n" + "\n" + "Compute ``exp(x) - 1``.\n" + "\n" + "When `x` is near zero, ``exp(x)`` is near 1, so the numerical calculation\n" + "of ``exp(x) - 1`` can suffer from catastrophic loss of precision.\n" + "``expm1(x)`` is implemented to avoid the loss of precision that occurs when\n" + "`x` is near zero.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " `x` must contain real numbers.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " ``exp(x) - 1`` computed element-wise.\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> from scipy.special import expm1\n" + "\n" + ">>> expm1(1.0)\n" + "1.7182818284590451\n" + ">>> expm1([-0.2, -0.1, 0, 0.1, 0.2])\n" + "array([-0.18126925, -0.09516258, 0. , 0.10517092, 0.22140276])\n" + "\n" + "The exact value of ``exp(7.5e-13) - 1`` is::\n" + "\n" + " 7.5000000000028125000000007031250000001318...*10**-13.\n" + "\n" + "Here is what ``expm1(7.5e-13)`` gives:\n" + "\n" + ">>> expm1(7.5e-13)\n" + "7.5000000000028135e-13\n" + "\n" + "Compare that to ``exp(7.5e-13) - 1``, where the subtraction results in\n" + "a \"catastrophic\" loss of precision:\n" + "\n" + ">>> np.exp(7.5e-13) - 1\n" + "7.5006667543675576e-13") +ufunc_expm1_loops[0] = loop_d_d__As_f_f +ufunc_expm1_loops[1] = loop_d_d__As_d_d +ufunc_expm1_loops[2] = loop_D_D__As_F_F +ufunc_expm1_loops[3] = loop_D_D__As_D_D +ufunc_expm1_types[0] = NPY_FLOAT +ufunc_expm1_types[1] = NPY_FLOAT +ufunc_expm1_types[2] = NPY_DOUBLE +ufunc_expm1_types[3] = NPY_DOUBLE +ufunc_expm1_types[4] = NPY_CFLOAT +ufunc_expm1_types[5] = NPY_CFLOAT +ufunc_expm1_types[6] = NPY_CDOUBLE +ufunc_expm1_types[7] = NPY_CDOUBLE +ufunc_expm1_ptr[2*0] = _func_cephes_expm1 +ufunc_expm1_ptr[2*0+1] = ("expm1") +ufunc_expm1_ptr[2*1] = _func_cephes_expm1 +ufunc_expm1_ptr[2*1+1] = ("expm1") +ufunc_expm1_ptr[2*2] = _func_cexpm1 +ufunc_expm1_ptr[2*2+1] = ("expm1") +ufunc_expm1_ptr[2*3] = _func_cexpm1 +ufunc_expm1_ptr[2*3+1] = ("expm1") +ufunc_expm1_data[0] = &ufunc_expm1_ptr[2*0] +ufunc_expm1_data[1] = &ufunc_expm1_ptr[2*1] +ufunc_expm1_data[2] = &ufunc_expm1_ptr[2*2] +ufunc_expm1_data[3] = &ufunc_expm1_ptr[2*3] +expm1 = np.PyUFunc_FromFuncAndData(ufunc_expm1_loops, ufunc_expm1_data, ufunc_expm1_types, 4, 1, 1, 0, "expm1", ufunc_expm1_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_expn_loops[3] +cdef void *ufunc_expn_ptr[6] +cdef void *ufunc_expn_data[3] +cdef char ufunc_expn_types[9] +cdef char *ufunc_expn_doc = ( + "expn(n, x, out=None)\n" + "\n" + "Generalized exponential integral En.\n" + "\n" + "For integer :math:`n \\geq 0` and real :math:`x \\geq 0` the\n" + "generalized exponential integral is defined as [dlmf]_\n" + "\n" + ".. math::\n" + "\n" + " E_n(x) = x^{n - 1} \\int_x^\\infty \\frac{e^{-t}}{t^n} dt.\n" + "\n" + "Parameters\n" + "----------\n" + "n : array_like\n" + " Non-negative integers\n" + "x : array_like\n" + " Real argument\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the generalized exponential integral\n" + "\n" + "See Also\n" + "--------\n" + "exp1 : special case of :math:`E_n` for :math:`n = 1`\n" + "expi : related to :math:`E_n` when :math:`n = 1`\n" + "\n" + "References\n" + "----------\n" + ".. [dlmf] Digital Library of Mathematical Functions, 8.19.2\n" + " https://dlmf.nist.gov/8.19#E2\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import scipy.special as sc\n" + "\n" + "Its domain is nonnegative n and x.\n" + "\n" + ">>> sc.expn(-1, 1.0), sc.expn(1, -1.0)\n" + "(nan, nan)\n" + "\n" + "It has a pole at ``x = 0`` for ``n = 1, 2``; for larger ``n`` it\n" + "is equal to ``1 / (n - 1)``.\n" + "\n" + ">>> sc.expn([0, 1, 2, 3, 4], 0)\n" + "array([ inf, inf, 1. , 0.5 , 0.33333333])\n" + "\n" + "For n equal to 0 it reduces to ``exp(-x) / x``.\n" + "\n" + ">>> x = np.array([1, 2, 3, 4])\n" + ">>> sc.expn(0, x)\n" + "array([0.36787944, 0.06766764, 0.01659569, 0.00457891])\n" + ">>> np.exp(-x) / x\n" + "array([0.36787944, 0.06766764, 0.01659569, 0.00457891])\n" + "\n" + "For n equal to 1 it reduces to `exp1`.\n" + "\n" + ">>> sc.expn(1, x)\n" + "array([0.21938393, 0.04890051, 0.01304838, 0.00377935])\n" + ">>> sc.exp1(x)\n" + "array([0.21938393, 0.04890051, 0.01304838, 0.00377935])") +ufunc_expn_loops[0] = loop_d_pd__As_pd_d +ufunc_expn_loops[1] = loop_d_dd__As_ff_f +ufunc_expn_loops[2] = loop_d_dd__As_dd_d +ufunc_expn_types[0] = NPY_INTP +ufunc_expn_types[1] = NPY_DOUBLE +ufunc_expn_types[2] = NPY_DOUBLE +ufunc_expn_types[3] = NPY_FLOAT +ufunc_expn_types[4] = NPY_FLOAT +ufunc_expn_types[5] = NPY_FLOAT +ufunc_expn_types[6] = NPY_DOUBLE +ufunc_expn_types[7] = NPY_DOUBLE +ufunc_expn_types[8] = NPY_DOUBLE +ufunc_expn_ptr[2*0] = _func_cephes_expn_wrap +ufunc_expn_ptr[2*0+1] = ("expn") +ufunc_expn_ptr[2*1] = _func_expn_unsafe +ufunc_expn_ptr[2*1+1] = ("expn") +ufunc_expn_ptr[2*2] = _func_expn_unsafe +ufunc_expn_ptr[2*2+1] = ("expn") +ufunc_expn_data[0] = &ufunc_expn_ptr[2*0] +ufunc_expn_data[1] = &ufunc_expn_ptr[2*1] +ufunc_expn_data[2] = &ufunc_expn_ptr[2*2] +expn = np.PyUFunc_FromFuncAndData(ufunc_expn_loops, ufunc_expn_data, ufunc_expn_types, 3, 2, 1, 0, "expn", ufunc_expn_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_fdtr_loops[2] +cdef void *ufunc_fdtr_ptr[4] +cdef void *ufunc_fdtr_data[2] +cdef char ufunc_fdtr_types[8] +cdef char *ufunc_fdtr_doc = ( + "fdtr(dfn, dfd, x, out=None)\n" + "\n" + "F cumulative distribution function.\n" + "\n" + "Returns the value of the cumulative distribution function of the\n" + "F-distribution, also known as Snedecor's F-distribution or the\n" + "Fisher-Snedecor distribution.\n" + "\n" + "The F-distribution with parameters :math:`d_n` and :math:`d_d` is the\n" + "distribution of the random variable,\n" + "\n" + ".. math::\n" + " X = \\frac{U_n/d_n}{U_d/d_d},\n" + "\n" + "where :math:`U_n` and :math:`U_d` are random variables distributed\n" + ":math:`\\chi^2`, with :math:`d_n` and :math:`d_d` degrees of freedom,\n" + "respectively.\n" + "\n" + "Parameters\n" + "----------\n" + "dfn : array_like\n" + " First parameter (positive float).\n" + "dfd : array_like\n" + " Second parameter (positive float).\n" + "x : array_like\n" + " Argument (nonnegative float).\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "y : scalar or ndarray\n" + " The CDF of the F-distribution with parameters `dfn` and `dfd` at `x`.\n" + "\n" + "See Also\n" + "--------\n" + "fdtrc : F distribution survival function\n" + "fdtri : F distribution inverse cumulative distribution\n" + "scipy.stats.f : F distribution\n" + "\n" + "Notes\n" + "-----\n" + "The regularized incomplete beta function is used, according to the\n" + "formula,\n" + "\n" + ".. math::\n" + " F(d_n, d_d; x) = I_{xd_n/(d_d + xd_n)}(d_n/2, d_d/2).\n" + "\n" + "Wrapper for the Cephes [1]_ routine `fdtr`. The F distribution is also\n" + "available as `scipy.stats.f`. Calling `fdtr` directly can improve\n" + "performance compared to the ``cdf`` method of `scipy.stats.f` (see last\n" + "example below).\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + "\n" + "Examples\n" + "--------\n" + "Calculate the function for ``dfn=1`` and ``dfd=2`` at ``x=1``.\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import fdtr\n" + ">>> fdtr(1, 2, 1)\n" + "0.5773502691896258\n" + "\n" + "Calculate the function at several points by providing a NumPy array for\n" + "`x`.\n" + "\n" + ">>> x = np.array([0.5, 2., 3.])\n" + ">>> fdtr(1, 2, x)\n" + "array([0.4472136 , 0.70710678, 0.77459667])\n" + "\n" + "Plot the function for several parameter sets.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> dfn_parameters = [1, 5, 10, 50]\n" + ">>> dfd_parameters = [1, 1, 2, 3]\n" + ">>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot']\n" + ">>> parameters_list = list(zip(dfn_parameters, dfd_parameters,\n" + "... linestyles))\n" + ">>> x = np.linspace(0, 30, 1000)\n" + ">>> fig, ax = plt.subplots()\n" + ">>> for parameter_set in parameters_list:\n" + "... dfn, dfd, style = parameter_set\n" + "... fdtr_vals = fdtr(dfn, dfd, x)\n" + "... ax.plot(x, fdtr_vals, label=rf\"$d_n={dfn},\\, d_d={dfd}$\",\n" + "... ls=style)\n" + ">>> ax.legend()\n" + ">>> ax.set_xlabel(\"$x$\")\n" + ">>> ax.set_title(\"F distribution cumulative distribution function\")\n" + ">>> plt.show()\n" + "\n" + "The F distribution is also available as `scipy.stats.f`. Using `fdtr`\n" + "directly can be much faster than calling the ``cdf`` method of\n" + "`scipy.stats.f`, especially for small arrays or individual values.\n" + "To get the same results one must use the following parametrization:\n" + "``stats.f(dfn, dfd).cdf(x)=fdtr(dfn, dfd, x)``.\n" + "\n" + ">>> from scipy.stats import f\n" + ">>> dfn, dfd = 1, 2\n" + ">>> x = 1\n" + ">>> fdtr_res = fdtr(dfn, dfd, x) # this will often be faster than below\n" + ">>> f_dist_res = f(dfn, dfd).cdf(x)\n" + ">>> fdtr_res == f_dist_res # test that results are equal\n" + "True") +ufunc_fdtr_loops[0] = loop_d_ddd__As_fff_f +ufunc_fdtr_loops[1] = loop_d_ddd__As_ddd_d +ufunc_fdtr_types[0] = NPY_FLOAT +ufunc_fdtr_types[1] = NPY_FLOAT +ufunc_fdtr_types[2] = NPY_FLOAT +ufunc_fdtr_types[3] = NPY_FLOAT +ufunc_fdtr_types[4] = NPY_DOUBLE +ufunc_fdtr_types[5] = NPY_DOUBLE +ufunc_fdtr_types[6] = NPY_DOUBLE +ufunc_fdtr_types[7] = NPY_DOUBLE +ufunc_fdtr_ptr[2*0] = _func_cephes_fdtr +ufunc_fdtr_ptr[2*0+1] = ("fdtr") +ufunc_fdtr_ptr[2*1] = _func_cephes_fdtr +ufunc_fdtr_ptr[2*1+1] = ("fdtr") +ufunc_fdtr_data[0] = &ufunc_fdtr_ptr[2*0] +ufunc_fdtr_data[1] = &ufunc_fdtr_ptr[2*1] +fdtr = np.PyUFunc_FromFuncAndData(ufunc_fdtr_loops, ufunc_fdtr_data, ufunc_fdtr_types, 2, 3, 1, 0, "fdtr", ufunc_fdtr_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_fdtrc_loops[2] +cdef void *ufunc_fdtrc_ptr[4] +cdef void *ufunc_fdtrc_data[2] +cdef char ufunc_fdtrc_types[8] +cdef char *ufunc_fdtrc_doc = ( + "fdtrc(dfn, dfd, x, out=None)\n" + "\n" + "F survival function.\n" + "\n" + "Returns the complemented F-distribution function (the integral of the\n" + "density from `x` to infinity).\n" + "\n" + "Parameters\n" + "----------\n" + "dfn : array_like\n" + " First parameter (positive float).\n" + "dfd : array_like\n" + " Second parameter (positive float).\n" + "x : array_like\n" + " Argument (nonnegative float).\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "y : scalar or ndarray\n" + " The complemented F-distribution function with parameters `dfn` and\n" + " `dfd` at `x`.\n" + "\n" + "See Also\n" + "--------\n" + "fdtr : F distribution cumulative distribution function\n" + "fdtri : F distribution inverse cumulative distribution function\n" + "scipy.stats.f : F distribution\n" + "\n" + "Notes\n" + "-----\n" + "The regularized incomplete beta function is used, according to the\n" + "formula,\n" + "\n" + ".. math::\n" + " F(d_n, d_d; x) = I_{d_d/(d_d + xd_n)}(d_d/2, d_n/2).\n" + "\n" + "Wrapper for the Cephes [1]_ routine `fdtrc`. The F distribution is also\n" + "available as `scipy.stats.f`. Calling `fdtrc` directly can improve\n" + "performance compared to the ``sf`` method of `scipy.stats.f` (see last\n" + "example below).\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + "\n" + "Examples\n" + "--------\n" + "Calculate the function for ``dfn=1`` and ``dfd=2`` at ``x=1``.\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import fdtrc\n" + ">>> fdtrc(1, 2, 1)\n" + "0.42264973081037427\n" + "\n" + "Calculate the function at several points by providing a NumPy array for\n" + "`x`.\n" + "\n" + ">>> x = np.array([0.5, 2., 3.])\n" + ">>> fdtrc(1, 2, x)\n" + "array([0.5527864 , 0.29289322, 0.22540333])\n" + "\n" + "Plot the function for several parameter sets.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> dfn_parameters = [1, 5, 10, 50]\n" + ">>> dfd_parameters = [1, 1, 2, 3]\n" + ">>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot']\n" + ">>> parameters_list = list(zip(dfn_parameters, dfd_parameters,\n" + "... linestyles))\n" + ">>> x = np.linspace(0, 30, 1000)\n" + ">>> fig, ax = plt.subplots()\n" + ">>> for parameter_set in parameters_list:\n" + "... dfn, dfd, style = parameter_set\n" + "... fdtrc_vals = fdtrc(dfn, dfd, x)\n" + "... ax.plot(x, fdtrc_vals, label=rf\"$d_n={dfn},\\, d_d={dfd}$\",\n" + "... ls=style)\n" + ">>> ax.legend()\n" + ">>> ax.set_xlabel(\"$x$\")\n" + ">>> ax.set_title(\"F distribution survival function\")\n" + ">>> plt.show()\n" + "\n" + "The F distribution is also available as `scipy.stats.f`. Using `fdtrc`\n" + "directly can be much faster than calling the ``sf`` method of\n" + "`scipy.stats.f`, especially for small arrays or individual values.\n" + "To get the same results one must use the following parametrization:\n" + "``stats.f(dfn, dfd).sf(x)=fdtrc(dfn, dfd, x)``.\n" + "\n" + ">>> from scipy.stats import f\n" + ">>> dfn, dfd = 1, 2\n" + ">>> x = 1\n" + ">>> fdtrc_res = fdtrc(dfn, dfd, x) # this will often be faster than below\n" + ">>> f_dist_res = f(dfn, dfd).sf(x)\n" + ">>> f_dist_res == fdtrc_res # test that results are equal\n" + "True") +ufunc_fdtrc_loops[0] = loop_d_ddd__As_fff_f +ufunc_fdtrc_loops[1] = loop_d_ddd__As_ddd_d +ufunc_fdtrc_types[0] = NPY_FLOAT +ufunc_fdtrc_types[1] = NPY_FLOAT +ufunc_fdtrc_types[2] = NPY_FLOAT +ufunc_fdtrc_types[3] = NPY_FLOAT +ufunc_fdtrc_types[4] = NPY_DOUBLE +ufunc_fdtrc_types[5] = NPY_DOUBLE +ufunc_fdtrc_types[6] = NPY_DOUBLE +ufunc_fdtrc_types[7] = NPY_DOUBLE +ufunc_fdtrc_ptr[2*0] = _func_cephes_fdtrc +ufunc_fdtrc_ptr[2*0+1] = ("fdtrc") +ufunc_fdtrc_ptr[2*1] = _func_cephes_fdtrc +ufunc_fdtrc_ptr[2*1+1] = ("fdtrc") +ufunc_fdtrc_data[0] = &ufunc_fdtrc_ptr[2*0] +ufunc_fdtrc_data[1] = &ufunc_fdtrc_ptr[2*1] +fdtrc = np.PyUFunc_FromFuncAndData(ufunc_fdtrc_loops, ufunc_fdtrc_data, ufunc_fdtrc_types, 2, 3, 1, 0, "fdtrc", ufunc_fdtrc_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_fdtri_loops[2] +cdef void *ufunc_fdtri_ptr[4] +cdef void *ufunc_fdtri_data[2] +cdef char ufunc_fdtri_types[8] +cdef char *ufunc_fdtri_doc = ( + "fdtri(dfn, dfd, p, out=None)\n" + "\n" + "The `p`-th quantile of the F-distribution.\n" + "\n" + "This function is the inverse of the F-distribution CDF, `fdtr`, returning\n" + "the `x` such that `fdtr(dfn, dfd, x) = p`.\n" + "\n" + "Parameters\n" + "----------\n" + "dfn : array_like\n" + " First parameter (positive float).\n" + "dfd : array_like\n" + " Second parameter (positive float).\n" + "p : array_like\n" + " Cumulative probability, in [0, 1].\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "x : scalar or ndarray\n" + " The quantile corresponding to `p`.\n" + "\n" + "See Also\n" + "--------\n" + "fdtr : F distribution cumulative distribution function\n" + "fdtrc : F distribution survival function\n" + "scipy.stats.f : F distribution\n" + "\n" + "Notes\n" + "-----\n" + "The computation is carried out using the relation to the inverse\n" + "regularized beta function, :math:`I^{-1}_x(a, b)`. Let\n" + ":math:`z = I^{-1}_p(d_d/2, d_n/2).` Then,\n" + "\n" + ".. math::\n" + " x = \\frac{d_d (1 - z)}{d_n z}.\n" + "\n" + "If `p` is such that :math:`x < 0.5`, the following relation is used\n" + "instead for improved stability: let\n" + ":math:`z' = I^{-1}_{1 - p}(d_n/2, d_d/2).` Then,\n" + "\n" + ".. math::\n" + " x = \\frac{d_d z'}{d_n (1 - z')}.\n" + "\n" + "Wrapper for the Cephes [1]_ routine `fdtri`.\n" + "\n" + "The F distribution is also available as `scipy.stats.f`. Calling\n" + "`fdtri` directly can improve performance compared to the ``ppf``\n" + "method of `scipy.stats.f` (see last example below).\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + "\n" + "Examples\n" + "--------\n" + "`fdtri` represents the inverse of the F distribution CDF which is\n" + "available as `fdtr`. Here, we calculate the CDF for ``df1=1``, ``df2=2``\n" + "at ``x=3``. `fdtri` then returns ``3`` given the same values for `df1`,\n" + "`df2` and the computed CDF value.\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import fdtri, fdtr\n" + ">>> df1, df2 = 1, 2\n" + ">>> x = 3\n" + ">>> cdf_value = fdtr(df1, df2, x)\n" + ">>> fdtri(df1, df2, cdf_value)\n" + "3.000000000000006\n" + "\n" + "Calculate the function at several points by providing a NumPy array for\n" + "`x`.\n" + "\n" + ">>> x = np.array([0.1, 0.4, 0.7])\n" + ">>> fdtri(1, 2, x)\n" + "array([0.02020202, 0.38095238, 1.92156863])\n" + "\n" + "Plot the function for several parameter sets.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> dfn_parameters = [50, 10, 1, 50]\n" + ">>> dfd_parameters = [0.5, 1, 1, 5]\n" + ">>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot']\n" + ">>> parameters_list = list(zip(dfn_parameters, dfd_parameters,\n" + "... linestyles))\n" + ">>> x = np.linspace(0, 1, 1000)\n" + ">>> fig, ax = plt.subplots()\n" + ">>> for parameter_set in parameters_list:\n" + "... dfn, dfd, style = parameter_set\n" + "... fdtri_vals = fdtri(dfn, dfd, x)\n" + "... ax.plot(x, fdtri_vals, label=rf\"$d_n={dfn},\\, d_d={dfd}$\",\n" + "... ls=style)\n" + ">>> ax.legend()\n" + ">>> ax.set_xlabel(\"$x$\")\n" + ">>> title = \"F distribution inverse cumulative distribution function\"\n" + ">>> ax.set_title(title)\n" + ">>> ax.set_ylim(0, 30)\n" + ">>> plt.show()\n" + "\n" + "The F distribution is also available as `scipy.stats.f`. Using `fdtri`\n" + "directly can be much faster than calling the ``ppf`` method of\n" + "`scipy.stats.f`, especially for small arrays or individual values.\n" + "To get the same results one must use the following parametrization:\n" + "``stats.f(dfn, dfd).ppf(x)=fdtri(dfn, dfd, x)``.\n" + "\n" + ">>> from scipy.stats import f\n" + ">>> dfn, dfd = 1, 2\n" + ">>> x = 0.7\n" + ">>> fdtri_res = fdtri(dfn, dfd, x) # this will often be faster than below\n" + ">>> f_dist_res = f(dfn, dfd).ppf(x)\n" + ">>> f_dist_res == fdtri_res # test that results are equal\n" + "True") +ufunc_fdtri_loops[0] = loop_d_ddd__As_fff_f +ufunc_fdtri_loops[1] = loop_d_ddd__As_ddd_d +ufunc_fdtri_types[0] = NPY_FLOAT +ufunc_fdtri_types[1] = NPY_FLOAT +ufunc_fdtri_types[2] = NPY_FLOAT +ufunc_fdtri_types[3] = NPY_FLOAT +ufunc_fdtri_types[4] = NPY_DOUBLE +ufunc_fdtri_types[5] = NPY_DOUBLE +ufunc_fdtri_types[6] = NPY_DOUBLE +ufunc_fdtri_types[7] = NPY_DOUBLE +ufunc_fdtri_ptr[2*0] = _func_cephes_fdtri +ufunc_fdtri_ptr[2*0+1] = ("fdtri") +ufunc_fdtri_ptr[2*1] = _func_cephes_fdtri +ufunc_fdtri_ptr[2*1+1] = ("fdtri") +ufunc_fdtri_data[0] = &ufunc_fdtri_ptr[2*0] +ufunc_fdtri_data[1] = &ufunc_fdtri_ptr[2*1] +fdtri = np.PyUFunc_FromFuncAndData(ufunc_fdtri_loops, ufunc_fdtri_data, ufunc_fdtri_types, 2, 3, 1, 0, "fdtri", ufunc_fdtri_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_fdtridfd_loops[2] +cdef void *ufunc_fdtridfd_ptr[4] +cdef void *ufunc_fdtridfd_data[2] +cdef char ufunc_fdtridfd_types[8] +cdef char *ufunc_fdtridfd_doc = ( + "fdtridfd(dfn, p, x, out=None)\n" + "\n" + "Inverse to `fdtr` vs dfd\n" + "\n" + "Finds the F density argument dfd such that ``fdtr(dfn, dfd, x) == p``.\n" + "\n" + "Parameters\n" + "----------\n" + "dfn : array_like\n" + " First parameter (positive float).\n" + "p : array_like\n" + " Cumulative probability, in [0, 1].\n" + "x : array_like\n" + " Argument (nonnegative float).\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "dfd : scalar or ndarray\n" + " `dfd` such that ``fdtr(dfn, dfd, x) == p``.\n" + "\n" + "See Also\n" + "--------\n" + "fdtr : F distribution cumulative distribution function\n" + "fdtrc : F distribution survival function\n" + "fdtri : F distribution quantile function\n" + "scipy.stats.f : F distribution\n" + "\n" + "Examples\n" + "--------\n" + "Compute the F distribution cumulative distribution function for one\n" + "parameter set.\n" + "\n" + ">>> from scipy.special import fdtridfd, fdtr\n" + ">>> dfn, dfd, x = 10, 5, 2\n" + ">>> cdf_value = fdtr(dfn, dfd, x)\n" + ">>> cdf_value\n" + "0.7700248806501017\n" + "\n" + "Verify that `fdtridfd` recovers the original value for `dfd`:\n" + "\n" + ">>> fdtridfd(dfn, cdf_value, x)\n" + "5.0") +ufunc_fdtridfd_loops[0] = loop_d_ddd__As_fff_f +ufunc_fdtridfd_loops[1] = loop_d_ddd__As_ddd_d +ufunc_fdtridfd_types[0] = NPY_FLOAT +ufunc_fdtridfd_types[1] = NPY_FLOAT +ufunc_fdtridfd_types[2] = NPY_FLOAT +ufunc_fdtridfd_types[3] = NPY_FLOAT +ufunc_fdtridfd_types[4] = NPY_DOUBLE +ufunc_fdtridfd_types[5] = NPY_DOUBLE +ufunc_fdtridfd_types[6] = NPY_DOUBLE +ufunc_fdtridfd_types[7] = NPY_DOUBLE +ufunc_fdtridfd_ptr[2*0] = _func_fdtridfd +ufunc_fdtridfd_ptr[2*0+1] = ("fdtridfd") +ufunc_fdtridfd_ptr[2*1] = _func_fdtridfd +ufunc_fdtridfd_ptr[2*1+1] = ("fdtridfd") +ufunc_fdtridfd_data[0] = &ufunc_fdtridfd_ptr[2*0] +ufunc_fdtridfd_data[1] = &ufunc_fdtridfd_ptr[2*1] +fdtridfd = np.PyUFunc_FromFuncAndData(ufunc_fdtridfd_loops, ufunc_fdtridfd_data, ufunc_fdtridfd_types, 2, 3, 1, 0, "fdtridfd", ufunc_fdtridfd_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_fresnel_loops[4] +cdef void *ufunc_fresnel_ptr[8] +cdef void *ufunc_fresnel_data[4] +cdef char ufunc_fresnel_types[12] +cdef char *ufunc_fresnel_doc = ( + "fresnel(z, out=None)\n" + "\n" + "Fresnel integrals.\n" + "\n" + "The Fresnel integrals are defined as\n" + "\n" + ".. math::\n" + "\n" + " S(z) &= \\int_0^z \\sin(\\pi t^2 /2) dt \\\\\n" + " C(z) &= \\int_0^z \\cos(\\pi t^2 /2) dt.\n" + "\n" + "See [dlmf]_ for details.\n" + "\n" + "Parameters\n" + "----------\n" + "z : array_like\n" + " Real or complex valued argument\n" + "out : 2-tuple of ndarrays, optional\n" + " Optional output arrays for the function results\n" + "\n" + "Returns\n" + "-------\n" + "S, C : 2-tuple of scalar or ndarray\n" + " Values of the Fresnel integrals\n" + "\n" + "See Also\n" + "--------\n" + "fresnel_zeros : zeros of the Fresnel integrals\n" + "\n" + "References\n" + "----------\n" + ".. [dlmf] NIST Digital Library of Mathematical Functions\n" + " https://dlmf.nist.gov/7.2#iii\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import scipy.special as sc\n" + "\n" + "As z goes to infinity along the real axis, S and C converge to 0.5.\n" + "\n" + ">>> S, C = sc.fresnel([0.1, 1, 10, 100, np.inf])\n" + ">>> S\n" + "array([0.00052359, 0.43825915, 0.46816998, 0.4968169 , 0.5 ])\n" + ">>> C\n" + "array([0.09999753, 0.7798934 , 0.49989869, 0.4999999 , 0.5 ])\n" + "\n" + "They are related to the error function `erf`.\n" + "\n" + ">>> z = np.array([1, 2, 3, 4])\n" + ">>> zeta = 0.5 * np.sqrt(np.pi) * (1 - 1j) * z\n" + ">>> S, C = sc.fresnel(z)\n" + ">>> C + 1j*S\n" + "array([0.7798934 +0.43825915j, 0.48825341+0.34341568j,\n" + " 0.60572079+0.496313j , 0.49842603+0.42051575j])\n" + ">>> 0.5 * (1 + 1j) * sc.erf(zeta)\n" + "array([0.7798934 +0.43825915j, 0.48825341+0.34341568j,\n" + " 0.60572079+0.496313j , 0.49842603+0.42051575j])") +ufunc_fresnel_loops[0] = loop_i_d_dd_As_f_ff +ufunc_fresnel_loops[1] = loop_i_d_dd_As_d_dd +ufunc_fresnel_loops[2] = loop_i_D_DD_As_F_FF +ufunc_fresnel_loops[3] = loop_i_D_DD_As_D_DD +ufunc_fresnel_types[0] = NPY_FLOAT +ufunc_fresnel_types[1] = NPY_FLOAT +ufunc_fresnel_types[2] = NPY_FLOAT +ufunc_fresnel_types[3] = NPY_DOUBLE +ufunc_fresnel_types[4] = NPY_DOUBLE +ufunc_fresnel_types[5] = NPY_DOUBLE +ufunc_fresnel_types[6] = NPY_CFLOAT +ufunc_fresnel_types[7] = NPY_CFLOAT +ufunc_fresnel_types[8] = NPY_CFLOAT +ufunc_fresnel_types[9] = NPY_CDOUBLE +ufunc_fresnel_types[10] = NPY_CDOUBLE +ufunc_fresnel_types[11] = NPY_CDOUBLE +ufunc_fresnel_ptr[2*0] = _func_cephes_fresnl_wrap +ufunc_fresnel_ptr[2*0+1] = ("fresnel") +ufunc_fresnel_ptr[2*1] = _func_cephes_fresnl_wrap +ufunc_fresnel_ptr[2*1+1] = ("fresnel") +ufunc_fresnel_ptr[2*2] = _func_cfresnl_wrap +ufunc_fresnel_ptr[2*2+1] = ("fresnel") +ufunc_fresnel_ptr[2*3] = _func_cfresnl_wrap +ufunc_fresnel_ptr[2*3+1] = ("fresnel") +ufunc_fresnel_data[0] = &ufunc_fresnel_ptr[2*0] +ufunc_fresnel_data[1] = &ufunc_fresnel_ptr[2*1] +ufunc_fresnel_data[2] = &ufunc_fresnel_ptr[2*2] +ufunc_fresnel_data[3] = &ufunc_fresnel_ptr[2*3] +fresnel = np.PyUFunc_FromFuncAndData(ufunc_fresnel_loops, ufunc_fresnel_data, ufunc_fresnel_types, 4, 1, 2, 0, "fresnel", ufunc_fresnel_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_gammainc_loops[2] +cdef void *ufunc_gammainc_ptr[4] +cdef void *ufunc_gammainc_data[2] +cdef char ufunc_gammainc_types[6] +cdef char *ufunc_gammainc_doc = ( + "gammainc(a, x, out=None)\n" + "\n" + "Regularized lower incomplete gamma function.\n" + "\n" + "It is defined as\n" + "\n" + ".. math::\n" + "\n" + " P(a, x) = \\frac{1}{\\Gamma(a)} \\int_0^x t^{a - 1}e^{-t} dt\n" + "\n" + "for :math:`a > 0` and :math:`x \\geq 0`. See [dlmf]_ for details.\n" + "\n" + "Parameters\n" + "----------\n" + "a : array_like\n" + " Positive parameter\n" + "x : array_like\n" + " Nonnegative argument\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the lower incomplete gamma function\n" + "\n" + "See Also\n" + "--------\n" + "gammaincc : regularized upper incomplete gamma function\n" + "gammaincinv : inverse of the regularized lower incomplete gamma function\n" + "gammainccinv : inverse of the regularized upper incomplete gamma function\n" + "\n" + "Notes\n" + "-----\n" + "The function satisfies the relation ``gammainc(a, x) +\n" + "gammaincc(a, x) = 1`` where `gammaincc` is the regularized upper\n" + "incomplete gamma function.\n" + "\n" + "The implementation largely follows that of [boost]_.\n" + "\n" + "References\n" + "----------\n" + ".. [dlmf] NIST Digital Library of Mathematical functions\n" + " https://dlmf.nist.gov/8.2#E4\n" + ".. [boost] Maddock et. al., \"Incomplete Gamma Functions\",\n" + " https://www.boost.org/doc/libs/1_61_0/libs/math/doc/html/math_toolkit/sf_gamma/igamma.html\n" + "\n" + "Examples\n" + "--------\n" + ">>> import scipy.special as sc\n" + "\n" + "It is the CDF of the gamma distribution, so it starts at 0 and\n" + "monotonically increases to 1.\n" + "\n" + ">>> sc.gammainc(0.5, [0, 1, 10, 100])\n" + "array([0. , 0.84270079, 0.99999226, 1. ])\n" + "\n" + "It is equal to one minus the upper incomplete gamma function.\n" + "\n" + ">>> a, x = 0.5, 0.4\n" + ">>> sc.gammainc(a, x)\n" + "0.6289066304773024\n" + ">>> 1 - sc.gammaincc(a, x)\n" + "0.6289066304773024") +ufunc_gammainc_loops[0] = loop_d_dd__As_ff_f +ufunc_gammainc_loops[1] = loop_d_dd__As_dd_d +ufunc_gammainc_types[0] = NPY_FLOAT +ufunc_gammainc_types[1] = NPY_FLOAT +ufunc_gammainc_types[2] = NPY_FLOAT +ufunc_gammainc_types[3] = NPY_DOUBLE +ufunc_gammainc_types[4] = NPY_DOUBLE +ufunc_gammainc_types[5] = NPY_DOUBLE +ufunc_gammainc_ptr[2*0] = _func_cephes_igam +ufunc_gammainc_ptr[2*0+1] = ("gammainc") +ufunc_gammainc_ptr[2*1] = _func_cephes_igam +ufunc_gammainc_ptr[2*1+1] = ("gammainc") +ufunc_gammainc_data[0] = &ufunc_gammainc_ptr[2*0] +ufunc_gammainc_data[1] = &ufunc_gammainc_ptr[2*1] +gammainc = np.PyUFunc_FromFuncAndData(ufunc_gammainc_loops, ufunc_gammainc_data, ufunc_gammainc_types, 2, 2, 1, 0, "gammainc", ufunc_gammainc_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_gammaincc_loops[2] +cdef void *ufunc_gammaincc_ptr[4] +cdef void *ufunc_gammaincc_data[2] +cdef char ufunc_gammaincc_types[6] +cdef char *ufunc_gammaincc_doc = ( + "gammaincc(a, x, out=None)\n" + "\n" + "Regularized upper incomplete gamma function.\n" + "\n" + "It is defined as\n" + "\n" + ".. math::\n" + "\n" + " Q(a, x) = \\frac{1}{\\Gamma(a)} \\int_x^\\infty t^{a - 1}e^{-t} dt\n" + "\n" + "for :math:`a > 0` and :math:`x \\geq 0`. See [dlmf]_ for details.\n" + "\n" + "Parameters\n" + "----------\n" + "a : array_like\n" + " Positive parameter\n" + "x : array_like\n" + " Nonnegative argument\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the upper incomplete gamma function\n" + "\n" + "See Also\n" + "--------\n" + "gammainc : regularized lower incomplete gamma function\n" + "gammaincinv : inverse of the regularized lower incomplete gamma function\n" + "gammainccinv : inverse of the regularized upper incomplete gamma function\n" + "\n" + "Notes\n" + "-----\n" + "The function satisfies the relation ``gammainc(a, x) +\n" + "gammaincc(a, x) = 1`` where `gammainc` is the regularized lower\n" + "incomplete gamma function.\n" + "\n" + "The implementation largely follows that of [boost]_.\n" + "\n" + "References\n" + "----------\n" + ".. [dlmf] NIST Digital Library of Mathematical functions\n" + " https://dlmf.nist.gov/8.2#E4\n" + ".. [boost] Maddock et. al., \"Incomplete Gamma Functions\",\n" + " https://www.boost.org/doc/libs/1_61_0/libs/math/doc/html/math_toolkit/sf_gamma/igamma.html\n" + "\n" + "Examples\n" + "--------\n" + ">>> import scipy.special as sc\n" + "\n" + "It is the survival function of the gamma distribution, so it\n" + "starts at 1 and monotonically decreases to 0.\n" + "\n" + ">>> sc.gammaincc(0.5, [0, 1, 10, 100, 1000])\n" + "array([1.00000000e+00, 1.57299207e-01, 7.74421643e-06, 2.08848758e-45,\n" + " 0.00000000e+00])\n" + "\n" + "It is equal to one minus the lower incomplete gamma function.\n" + "\n" + ">>> a, x = 0.5, 0.4\n" + ">>> sc.gammaincc(a, x)\n" + "0.37109336952269756\n" + ">>> 1 - sc.gammainc(a, x)\n" + "0.37109336952269756") +ufunc_gammaincc_loops[0] = loop_d_dd__As_ff_f +ufunc_gammaincc_loops[1] = loop_d_dd__As_dd_d +ufunc_gammaincc_types[0] = NPY_FLOAT +ufunc_gammaincc_types[1] = NPY_FLOAT +ufunc_gammaincc_types[2] = NPY_FLOAT +ufunc_gammaincc_types[3] = NPY_DOUBLE +ufunc_gammaincc_types[4] = NPY_DOUBLE +ufunc_gammaincc_types[5] = NPY_DOUBLE +ufunc_gammaincc_ptr[2*0] = _func_cephes_igamc +ufunc_gammaincc_ptr[2*0+1] = ("gammaincc") +ufunc_gammaincc_ptr[2*1] = _func_cephes_igamc +ufunc_gammaincc_ptr[2*1+1] = ("gammaincc") +ufunc_gammaincc_data[0] = &ufunc_gammaincc_ptr[2*0] +ufunc_gammaincc_data[1] = &ufunc_gammaincc_ptr[2*1] +gammaincc = np.PyUFunc_FromFuncAndData(ufunc_gammaincc_loops, ufunc_gammaincc_data, ufunc_gammaincc_types, 2, 2, 1, 0, "gammaincc", ufunc_gammaincc_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_gammainccinv_loops[2] +cdef void *ufunc_gammainccinv_ptr[4] +cdef void *ufunc_gammainccinv_data[2] +cdef char ufunc_gammainccinv_types[6] +cdef char *ufunc_gammainccinv_doc = ( + "gammainccinv(a, y, out=None)\n" + "\n" + "Inverse of the regularized upper incomplete gamma function.\n" + "\n" + "Given an input :math:`y` between 0 and 1, returns :math:`x` such\n" + "that :math:`y = Q(a, x)`. Here :math:`Q` is the regularized upper\n" + "incomplete gamma function; see `gammaincc`. This is well-defined\n" + "because the upper incomplete gamma function is monotonic as can\n" + "be seen from its definition in [dlmf]_.\n" + "\n" + "Parameters\n" + "----------\n" + "a : array_like\n" + " Positive parameter\n" + "y : array_like\n" + " Argument between 0 and 1, inclusive\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the inverse of the upper incomplete gamma function\n" + "\n" + "See Also\n" + "--------\n" + "gammaincc : regularized upper incomplete gamma function\n" + "gammainc : regularized lower incomplete gamma function\n" + "gammaincinv : inverse of the regularized lower incomplete gamma function\n" + "\n" + "References\n" + "----------\n" + ".. [dlmf] NIST Digital Library of Mathematical Functions\n" + " https://dlmf.nist.gov/8.2#E4\n" + "\n" + "Examples\n" + "--------\n" + ">>> import scipy.special as sc\n" + "\n" + "It starts at infinity and monotonically decreases to 0.\n" + "\n" + ">>> sc.gammainccinv(0.5, [0, 0.1, 0.5, 1])\n" + "array([ inf, 1.35277173, 0.22746821, 0. ])\n" + "\n" + "It inverts the upper incomplete gamma function.\n" + "\n" + ">>> a, x = 0.5, [0, 0.1, 0.5, 1]\n" + ">>> sc.gammaincc(a, sc.gammainccinv(a, x))\n" + "array([0. , 0.1, 0.5, 1. ])\n" + "\n" + ">>> a, x = 0.5, [0, 10, 50]\n" + ">>> sc.gammainccinv(a, sc.gammaincc(a, x))\n" + "array([ 0., 10., 50.])") +ufunc_gammainccinv_loops[0] = loop_d_dd__As_ff_f +ufunc_gammainccinv_loops[1] = loop_d_dd__As_dd_d +ufunc_gammainccinv_types[0] = NPY_FLOAT +ufunc_gammainccinv_types[1] = NPY_FLOAT +ufunc_gammainccinv_types[2] = NPY_FLOAT +ufunc_gammainccinv_types[3] = NPY_DOUBLE +ufunc_gammainccinv_types[4] = NPY_DOUBLE +ufunc_gammainccinv_types[5] = NPY_DOUBLE +ufunc_gammainccinv_ptr[2*0] = _func_cephes_igamci +ufunc_gammainccinv_ptr[2*0+1] = ("gammainccinv") +ufunc_gammainccinv_ptr[2*1] = _func_cephes_igamci +ufunc_gammainccinv_ptr[2*1+1] = ("gammainccinv") +ufunc_gammainccinv_data[0] = &ufunc_gammainccinv_ptr[2*0] +ufunc_gammainccinv_data[1] = &ufunc_gammainccinv_ptr[2*1] +gammainccinv = np.PyUFunc_FromFuncAndData(ufunc_gammainccinv_loops, ufunc_gammainccinv_data, ufunc_gammainccinv_types, 2, 2, 1, 0, "gammainccinv", ufunc_gammainccinv_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_gammaincinv_loops[2] +cdef void *ufunc_gammaincinv_ptr[4] +cdef void *ufunc_gammaincinv_data[2] +cdef char ufunc_gammaincinv_types[6] +cdef char *ufunc_gammaincinv_doc = ( + "gammaincinv(a, y, out=None)\n" + "\n" + "Inverse to the regularized lower incomplete gamma function.\n" + "\n" + "Given an input :math:`y` between 0 and 1, returns :math:`x` such\n" + "that :math:`y = P(a, x)`. Here :math:`P` is the regularized lower\n" + "incomplete gamma function; see `gammainc`. This is well-defined\n" + "because the lower incomplete gamma function is monotonic as can be\n" + "seen from its definition in [dlmf]_.\n" + "\n" + "Parameters\n" + "----------\n" + "a : array_like\n" + " Positive parameter\n" + "y : array_like\n" + " Parameter between 0 and 1, inclusive\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the inverse of the lower incomplete gamma function\n" + "\n" + "See Also\n" + "--------\n" + "gammainc : regularized lower incomplete gamma function\n" + "gammaincc : regularized upper incomplete gamma function\n" + "gammainccinv : inverse of the regularized upper incomplete gamma function\n" + "\n" + "References\n" + "----------\n" + ".. [dlmf] NIST Digital Library of Mathematical Functions\n" + " https://dlmf.nist.gov/8.2#E4\n" + "\n" + "Examples\n" + "--------\n" + ">>> import scipy.special as sc\n" + "\n" + "It starts at 0 and monotonically increases to infinity.\n" + "\n" + ">>> sc.gammaincinv(0.5, [0, 0.1 ,0.5, 1])\n" + "array([0. , 0.00789539, 0.22746821, inf])\n" + "\n" + "It inverts the lower incomplete gamma function.\n" + "\n" + ">>> a, x = 0.5, [0, 0.1, 0.5, 1]\n" + ">>> sc.gammainc(a, sc.gammaincinv(a, x))\n" + "array([0. , 0.1, 0.5, 1. ])\n" + "\n" + ">>> a, x = 0.5, [0, 10, 25]\n" + ">>> sc.gammaincinv(a, sc.gammainc(a, x))\n" + "array([ 0. , 10. , 25.00001465])") +ufunc_gammaincinv_loops[0] = loop_d_dd__As_ff_f +ufunc_gammaincinv_loops[1] = loop_d_dd__As_dd_d +ufunc_gammaincinv_types[0] = NPY_FLOAT +ufunc_gammaincinv_types[1] = NPY_FLOAT +ufunc_gammaincinv_types[2] = NPY_FLOAT +ufunc_gammaincinv_types[3] = NPY_DOUBLE +ufunc_gammaincinv_types[4] = NPY_DOUBLE +ufunc_gammaincinv_types[5] = NPY_DOUBLE +ufunc_gammaincinv_ptr[2*0] = _func_cephes_igami +ufunc_gammaincinv_ptr[2*0+1] = ("gammaincinv") +ufunc_gammaincinv_ptr[2*1] = _func_cephes_igami +ufunc_gammaincinv_ptr[2*1+1] = ("gammaincinv") +ufunc_gammaincinv_data[0] = &ufunc_gammaincinv_ptr[2*0] +ufunc_gammaincinv_data[1] = &ufunc_gammaincinv_ptr[2*1] +gammaincinv = np.PyUFunc_FromFuncAndData(ufunc_gammaincinv_loops, ufunc_gammaincinv_data, ufunc_gammaincinv_types, 2, 2, 1, 0, "gammaincinv", ufunc_gammaincinv_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_gammasgn_loops[2] +cdef void *ufunc_gammasgn_ptr[4] +cdef void *ufunc_gammasgn_data[2] +cdef char ufunc_gammasgn_types[4] +cdef char *ufunc_gammasgn_doc = ( + "gammasgn(x, out=None)\n" + "\n" + "Sign of the gamma function.\n" + "\n" + "It is defined as\n" + "\n" + ".. math::\n" + "\n" + " \\text{gammasgn}(x) =\n" + " \\begin{cases}\n" + " +1 & \\Gamma(x) > 0 \\\\\n" + " -1 & \\Gamma(x) < 0\n" + " \\end{cases}\n" + "\n" + "where :math:`\\Gamma` is the gamma function; see `gamma`. This\n" + "definition is complete since the gamma function is never zero;\n" + "see the discussion after [dlmf]_.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real argument\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Sign of the gamma function\n" + "\n" + "See Also\n" + "--------\n" + "gamma : the gamma function\n" + "gammaln : log of the absolute value of the gamma function\n" + "loggamma : analytic continuation of the log of the gamma function\n" + "\n" + "Notes\n" + "-----\n" + "The gamma function can be computed as ``gammasgn(x) *\n" + "np.exp(gammaln(x))``.\n" + "\n" + "References\n" + "----------\n" + ".. [dlmf] NIST Digital Library of Mathematical Functions\n" + " https://dlmf.nist.gov/5.2#E1\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import scipy.special as sc\n" + "\n" + "It is 1 for `x > 0`.\n" + "\n" + ">>> sc.gammasgn([1, 2, 3, 4])\n" + "array([1., 1., 1., 1.])\n" + "\n" + "It alternates between -1 and 1 for negative integers.\n" + "\n" + ">>> sc.gammasgn([-0.5, -1.5, -2.5, -3.5])\n" + "array([-1., 1., -1., 1.])\n" + "\n" + "It can be used to compute the gamma function.\n" + "\n" + ">>> x = [1.5, 0.5, -0.5, -1.5]\n" + ">>> sc.gammasgn(x) * np.exp(sc.gammaln(x))\n" + "array([ 0.88622693, 1.77245385, -3.5449077 , 2.3632718 ])\n" + ">>> sc.gamma(x)\n" + "array([ 0.88622693, 1.77245385, -3.5449077 , 2.3632718 ])") +ufunc_gammasgn_loops[0] = loop_d_d__As_f_f +ufunc_gammasgn_loops[1] = loop_d_d__As_d_d +ufunc_gammasgn_types[0] = NPY_FLOAT +ufunc_gammasgn_types[1] = NPY_FLOAT +ufunc_gammasgn_types[2] = NPY_DOUBLE +ufunc_gammasgn_types[3] = NPY_DOUBLE +ufunc_gammasgn_ptr[2*0] = _func_cephes_gammasgn +ufunc_gammasgn_ptr[2*0+1] = ("gammasgn") +ufunc_gammasgn_ptr[2*1] = _func_cephes_gammasgn +ufunc_gammasgn_ptr[2*1+1] = ("gammasgn") +ufunc_gammasgn_data[0] = &ufunc_gammasgn_ptr[2*0] +ufunc_gammasgn_data[1] = &ufunc_gammasgn_ptr[2*1] +gammasgn = np.PyUFunc_FromFuncAndData(ufunc_gammasgn_loops, ufunc_gammasgn_data, ufunc_gammasgn_types, 2, 1, 1, 0, "gammasgn", ufunc_gammasgn_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_gdtr_loops[2] +cdef void *ufunc_gdtr_ptr[4] +cdef void *ufunc_gdtr_data[2] +cdef char ufunc_gdtr_types[8] +cdef char *ufunc_gdtr_doc = ( + "gdtr(a, b, x, out=None)\n" + "\n" + "Gamma distribution cumulative distribution function.\n" + "\n" + "Returns the integral from zero to `x` of the gamma probability density\n" + "function,\n" + "\n" + ".. math::\n" + "\n" + " F = \\int_0^x \\frac{a^b}{\\Gamma(b)} t^{b-1} e^{-at}\\,dt,\n" + "\n" + "where :math:`\\Gamma` is the gamma function.\n" + "\n" + "Parameters\n" + "----------\n" + "a : array_like\n" + " The rate parameter of the gamma distribution, sometimes denoted\n" + " :math:`\\beta` (float). It is also the reciprocal of the scale\n" + " parameter :math:`\\theta`.\n" + "b : array_like\n" + " The shape parameter of the gamma distribution, sometimes denoted\n" + " :math:`\\alpha` (float).\n" + "x : array_like\n" + " The quantile (upper limit of integration; float).\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "F : scalar or ndarray\n" + " The CDF of the gamma distribution with parameters `a` and `b`\n" + " evaluated at `x`.\n" + "\n" + "See Also\n" + "--------\n" + "gdtrc : 1 - CDF of the gamma distribution.\n" + "scipy.stats.gamma: Gamma distribution\n" + "\n" + "Notes\n" + "-----\n" + "The evaluation is carried out using the relation to the incomplete gamma\n" + "integral (regularized gamma function).\n" + "\n" + "Wrapper for the Cephes [1]_ routine `gdtr`. Calling `gdtr` directly can\n" + "improve performance compared to the ``cdf`` method of `scipy.stats.gamma`\n" + "(see last example below).\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + "\n" + "Examples\n" + "--------\n" + "Compute the function for ``a=1``, ``b=2`` at ``x=5``.\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import gdtr\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> gdtr(1., 2., 5.)\n" + "0.9595723180054873\n" + "\n" + "Compute the function for ``a=1`` and ``b=2`` at several points by\n" + "providing a NumPy array for `x`.\n" + "\n" + ">>> xvalues = np.array([1., 2., 3., 4])\n" + ">>> gdtr(1., 1., xvalues)\n" + "array([0.63212056, 0.86466472, 0.95021293, 0.98168436])\n" + "\n" + "`gdtr` can evaluate different parameter sets by providing arrays with\n" + "broadcasting compatible shapes for `a`, `b` and `x`. Here we compute the\n" + "function for three different `a` at four positions `x` and ``b=3``,\n" + "resulting in a 3x4 array.\n" + "\n" + ">>> a = np.array([[0.5], [1.5], [2.5]])\n" + ">>> x = np.array([1., 2., 3., 4])\n" + ">>> a.shape, x.shape\n" + "((3, 1), (4,))\n" + "\n" + ">>> gdtr(a, 3., x)\n" + "array([[0.01438768, 0.0803014 , 0.19115317, 0.32332358],\n" + " [0.19115317, 0.57680992, 0.82642193, 0.9380312 ],\n" + " [0.45618688, 0.87534798, 0.97974328, 0.9972306 ]])\n" + "\n" + "Plot the function for four different parameter sets.\n" + "\n" + ">>> a_parameters = [0.3, 1, 2, 6]\n" + ">>> b_parameters = [2, 10, 15, 20]\n" + ">>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot']\n" + ">>> parameters_list = list(zip(a_parameters, b_parameters, linestyles))\n" + ">>> x = np.linspace(0, 30, 1000)\n" + ">>> fig, ax = plt.subplots()\n" + ">>> for parameter_set in parameters_list:\n" + "... a, b, style = parameter_set\n" + "... gdtr_vals = gdtr(a, b, x)\n" + "... ax.plot(x, gdtr_vals, label=fr\"$a= {a},\\, b={b}$\", ls=style)\n" + ">>> ax.legend()\n" + ">>> ax.set_xlabel(\"$x$\")\n" + ">>> ax.set_title(\"Gamma distribution cumulative distribution function\")\n" + ">>> plt.show()\n" + "\n" + "The gamma distribution is also available as `scipy.stats.gamma`. Using\n" + "`gdtr` directly can be much faster than calling the ``cdf`` method of\n" + "`scipy.stats.gamma`, especially for small arrays or individual values.\n" + "To get the same results one must use the following parametrization:\n" + "``stats.gamma(b, scale=1/a).cdf(x)=gdtr(a, b, x)``.\n" + "\n" + ">>> from scipy.stats import gamma\n" + ">>> a = 2.\n" + ">>> b = 3\n" + ">>> x = 1.\n" + ">>> gdtr_result = gdtr(a, b, x) # this will often be faster than below\n" + ">>> gamma_dist_result = gamma(b, scale=1/a).cdf(x)\n" + ">>> gdtr_result == gamma_dist_result # test that results are equal\n" + "True") +ufunc_gdtr_loops[0] = loop_d_ddd__As_fff_f +ufunc_gdtr_loops[1] = loop_d_ddd__As_ddd_d +ufunc_gdtr_types[0] = NPY_FLOAT +ufunc_gdtr_types[1] = NPY_FLOAT +ufunc_gdtr_types[2] = NPY_FLOAT +ufunc_gdtr_types[3] = NPY_FLOAT +ufunc_gdtr_types[4] = NPY_DOUBLE +ufunc_gdtr_types[5] = NPY_DOUBLE +ufunc_gdtr_types[6] = NPY_DOUBLE +ufunc_gdtr_types[7] = NPY_DOUBLE +ufunc_gdtr_ptr[2*0] = _func_cephes_gdtr +ufunc_gdtr_ptr[2*0+1] = ("gdtr") +ufunc_gdtr_ptr[2*1] = _func_cephes_gdtr +ufunc_gdtr_ptr[2*1+1] = ("gdtr") +ufunc_gdtr_data[0] = &ufunc_gdtr_ptr[2*0] +ufunc_gdtr_data[1] = &ufunc_gdtr_ptr[2*1] +gdtr = np.PyUFunc_FromFuncAndData(ufunc_gdtr_loops, ufunc_gdtr_data, ufunc_gdtr_types, 2, 3, 1, 0, "gdtr", ufunc_gdtr_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_gdtrc_loops[2] +cdef void *ufunc_gdtrc_ptr[4] +cdef void *ufunc_gdtrc_data[2] +cdef char ufunc_gdtrc_types[8] +cdef char *ufunc_gdtrc_doc = ( + "gdtrc(a, b, x, out=None)\n" + "\n" + "Gamma distribution survival function.\n" + "\n" + "Integral from `x` to infinity of the gamma probability density function,\n" + "\n" + ".. math::\n" + "\n" + " F = \\int_x^\\infty \\frac{a^b}{\\Gamma(b)} t^{b-1} e^{-at}\\,dt,\n" + "\n" + "where :math:`\\Gamma` is the gamma function.\n" + "\n" + "Parameters\n" + "----------\n" + "a : array_like\n" + " The rate parameter of the gamma distribution, sometimes denoted\n" + " :math:`\\beta` (float). It is also the reciprocal of the scale\n" + " parameter :math:`\\theta`.\n" + "b : array_like\n" + " The shape parameter of the gamma distribution, sometimes denoted\n" + " :math:`\\alpha` (float).\n" + "x : array_like\n" + " The quantile (lower limit of integration; float).\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "F : scalar or ndarray\n" + " The survival function of the gamma distribution with parameters `a`\n" + " and `b` evaluated at `x`.\n" + "\n" + "See Also\n" + "--------\n" + "gdtr: Gamma distribution cumulative distribution function\n" + "scipy.stats.gamma: Gamma distribution\n" + "gdtrix\n" + "\n" + "Notes\n" + "-----\n" + "The evaluation is carried out using the relation to the incomplete gamma\n" + "integral (regularized gamma function).\n" + "\n" + "Wrapper for the Cephes [1]_ routine `gdtrc`. Calling `gdtrc` directly can\n" + "improve performance compared to the ``sf`` method of `scipy.stats.gamma`\n" + "(see last example below).\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + "\n" + "Examples\n" + "--------\n" + "Compute the function for ``a=1`` and ``b=2`` at ``x=5``.\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import gdtrc\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> gdtrc(1., 2., 5.)\n" + "0.04042768199451279\n" + "\n" + "Compute the function for ``a=1``, ``b=2`` at several points by providing\n" + "a NumPy array for `x`.\n" + "\n" + ">>> xvalues = np.array([1., 2., 3., 4])\n" + ">>> gdtrc(1., 1., xvalues)\n" + "array([0.36787944, 0.13533528, 0.04978707, 0.01831564])\n" + "\n" + "`gdtrc` can evaluate different parameter sets by providing arrays with\n" + "broadcasting compatible shapes for `a`, `b` and `x`. Here we compute the\n" + "function for three different `a` at four positions `x` and ``b=3``,\n" + "resulting in a 3x4 array.\n" + "\n" + ">>> a = np.array([[0.5], [1.5], [2.5]])\n" + ">>> x = np.array([1., 2., 3., 4])\n" + ">>> a.shape, x.shape\n" + "((3, 1), (4,))\n" + "\n" + ">>> gdtrc(a, 3., x)\n" + "array([[0.98561232, 0.9196986 , 0.80884683, 0.67667642],\n" + " [0.80884683, 0.42319008, 0.17357807, 0.0619688 ],\n" + " [0.54381312, 0.12465202, 0.02025672, 0.0027694 ]])\n" + "\n" + "Plot the function for four different parameter sets.\n" + "\n" + ">>> a_parameters = [0.3, 1, 2, 6]\n" + ">>> b_parameters = [2, 10, 15, 20]\n" + ">>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot']\n" + ">>> parameters_list = list(zip(a_parameters, b_parameters, linestyles))\n" + ">>> x = np.linspace(0, 30, 1000)\n" + ">>> fig, ax = plt.subplots()\n" + ">>> for parameter_set in parameters_list:\n" + "... a, b, style = parameter_set\n" + "... gdtrc_vals = gdtrc(a, b, x)\n" + "... ax.plot(x, gdtrc_vals, label=fr\"$a= {a},\\, b={b}$\", ls=style)\n" + ">>> ax.legend()\n" + ">>> ax.set_xlabel(\"$x$\")\n" + ">>> ax.set_title(\"Gamma distribution survival function\")\n" + ">>> plt.show()\n" + "\n" + "The gamma distribution is also available as `scipy.stats.gamma`.\n" + "Using `gdtrc` directly can be much faster than calling the ``sf`` method\n" + "of `scipy.stats.gamma`, especially for small arrays or individual\n" + "values. To get the same results one must use the following parametrization:\n" + "``stats.gamma(b, scale=1/a).sf(x)=gdtrc(a, b, x)``.\n" + "\n" + ">>> from scipy.stats import gamma\n" + ">>> a = 2\n" + ">>> b = 3\n" + ">>> x = 1.\n" + ">>> gdtrc_result = gdtrc(a, b, x) # this will often be faster than below\n" + ">>> gamma_dist_result = gamma(b, scale=1/a).sf(x)\n" + ">>> gdtrc_result == gamma_dist_result # test that results are equal\n" + "True") +ufunc_gdtrc_loops[0] = loop_d_ddd__As_fff_f +ufunc_gdtrc_loops[1] = loop_d_ddd__As_ddd_d +ufunc_gdtrc_types[0] = NPY_FLOAT +ufunc_gdtrc_types[1] = NPY_FLOAT +ufunc_gdtrc_types[2] = NPY_FLOAT +ufunc_gdtrc_types[3] = NPY_FLOAT +ufunc_gdtrc_types[4] = NPY_DOUBLE +ufunc_gdtrc_types[5] = NPY_DOUBLE +ufunc_gdtrc_types[6] = NPY_DOUBLE +ufunc_gdtrc_types[7] = NPY_DOUBLE +ufunc_gdtrc_ptr[2*0] = _func_cephes_gdtrc +ufunc_gdtrc_ptr[2*0+1] = ("gdtrc") +ufunc_gdtrc_ptr[2*1] = _func_cephes_gdtrc +ufunc_gdtrc_ptr[2*1+1] = ("gdtrc") +ufunc_gdtrc_data[0] = &ufunc_gdtrc_ptr[2*0] +ufunc_gdtrc_data[1] = &ufunc_gdtrc_ptr[2*1] +gdtrc = np.PyUFunc_FromFuncAndData(ufunc_gdtrc_loops, ufunc_gdtrc_data, ufunc_gdtrc_types, 2, 3, 1, 0, "gdtrc", ufunc_gdtrc_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_gdtria_loops[2] +cdef void *ufunc_gdtria_ptr[4] +cdef void *ufunc_gdtria_data[2] +cdef char ufunc_gdtria_types[8] +cdef char *ufunc_gdtria_doc = ( + "gdtria(p, b, x, out=None)\n" + "\n" + "Inverse of `gdtr` vs a.\n" + "\n" + "Returns the inverse with respect to the parameter `a` of ``p =\n" + "gdtr(a, b, x)``, the cumulative distribution function of the gamma\n" + "distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "p : array_like\n" + " Probability values.\n" + "b : array_like\n" + " `b` parameter values of `gdtr(a, b, x)`. `b` is the \"shape\" parameter\n" + " of the gamma distribution.\n" + "x : array_like\n" + " Nonnegative real values, from the domain of the gamma distribution.\n" + "out : ndarray, optional\n" + " If a fourth argument is given, it must be a numpy.ndarray whose size\n" + " matches the broadcast result of `a`, `b` and `x`. `out` is then the\n" + " array returned by the function.\n" + "\n" + "Returns\n" + "-------\n" + "a : scalar or ndarray\n" + " Values of the `a` parameter such that `p = gdtr(a, b, x)`. `1/a`\n" + " is the \"scale\" parameter of the gamma distribution.\n" + "\n" + "See Also\n" + "--------\n" + "gdtr : CDF of the gamma distribution.\n" + "gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.\n" + "gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.\n" + "\n" + "Notes\n" + "-----\n" + "Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.\n" + "\n" + "The cumulative distribution function `p` is computed using a routine by\n" + "DiDinato and Morris [2]_. Computation of `a` involves a search for a value\n" + "that produces the desired value of `p`. The search relies on the\n" + "monotonicity of `p` with `a`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Barry Brown, James Lovato, and Kathy Russell,\n" + " CDFLIB: Library of Fortran Routines for Cumulative Distribution\n" + " Functions, Inverses, and Other Parameters.\n" + ".. [2] DiDinato, A. R. and Morris, A. H.,\n" + " Computation of the incomplete gamma function ratios and their\n" + " inverse. ACM Trans. Math. Softw. 12 (1986), 377-393.\n" + "\n" + "Examples\n" + "--------\n" + "First evaluate `gdtr`.\n" + "\n" + ">>> from scipy.special import gdtr, gdtria\n" + ">>> p = gdtr(1.2, 3.4, 5.6)\n" + ">>> print(p)\n" + "0.94378087442\n" + "\n" + "Verify the inverse.\n" + "\n" + ">>> gdtria(p, 3.4, 5.6)\n" + "1.2") +ufunc_gdtria_loops[0] = loop_d_ddd__As_fff_f +ufunc_gdtria_loops[1] = loop_d_ddd__As_ddd_d +ufunc_gdtria_types[0] = NPY_FLOAT +ufunc_gdtria_types[1] = NPY_FLOAT +ufunc_gdtria_types[2] = NPY_FLOAT +ufunc_gdtria_types[3] = NPY_FLOAT +ufunc_gdtria_types[4] = NPY_DOUBLE +ufunc_gdtria_types[5] = NPY_DOUBLE +ufunc_gdtria_types[6] = NPY_DOUBLE +ufunc_gdtria_types[7] = NPY_DOUBLE +ufunc_gdtria_ptr[2*0] = _func_gdtria +ufunc_gdtria_ptr[2*0+1] = ("gdtria") +ufunc_gdtria_ptr[2*1] = _func_gdtria +ufunc_gdtria_ptr[2*1+1] = ("gdtria") +ufunc_gdtria_data[0] = &ufunc_gdtria_ptr[2*0] +ufunc_gdtria_data[1] = &ufunc_gdtria_ptr[2*1] +gdtria = np.PyUFunc_FromFuncAndData(ufunc_gdtria_loops, ufunc_gdtria_data, ufunc_gdtria_types, 2, 3, 1, 0, "gdtria", ufunc_gdtria_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_gdtrib_loops[2] +cdef void *ufunc_gdtrib_ptr[4] +cdef void *ufunc_gdtrib_data[2] +cdef char ufunc_gdtrib_types[8] +cdef char *ufunc_gdtrib_doc = ( + "gdtrib(a, p, x, out=None)\n" + "\n" + "Inverse of `gdtr` vs b.\n" + "\n" + "Returns the inverse with respect to the parameter `b` of ``p =\n" + "gdtr(a, b, x)``, the cumulative distribution function of the gamma\n" + "distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "a : array_like\n" + " `a` parameter values of `gdtr(a, b, x)`. `1/a` is the \"scale\"\n" + " parameter of the gamma distribution.\n" + "p : array_like\n" + " Probability values.\n" + "x : array_like\n" + " Nonnegative real values, from the domain of the gamma distribution.\n" + "out : ndarray, optional\n" + " If a fourth argument is given, it must be a numpy.ndarray whose size\n" + " matches the broadcast result of `a`, `b` and `x`. `out` is then the\n" + " array returned by the function.\n" + "\n" + "Returns\n" + "-------\n" + "b : scalar or ndarray\n" + " Values of the `b` parameter such that `p = gdtr(a, b, x)`. `b` is\n" + " the \"shape\" parameter of the gamma distribution.\n" + "\n" + "See Also\n" + "--------\n" + "gdtr : CDF of the gamma distribution.\n" + "gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.\n" + "gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.\n" + "\n" + "Notes\n" + "-----\n" + "Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.\n" + "\n" + "The cumulative distribution function `p` is computed using a routine by\n" + "DiDinato and Morris [2]_. Computation of `b` involves a search for a value\n" + "that produces the desired value of `p`. The search relies on the\n" + "monotonicity of `p` with `b`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Barry Brown, James Lovato, and Kathy Russell,\n" + " CDFLIB: Library of Fortran Routines for Cumulative Distribution\n" + " Functions, Inverses, and Other Parameters.\n" + ".. [2] DiDinato, A. R. and Morris, A. H.,\n" + " Computation of the incomplete gamma function ratios and their\n" + " inverse. ACM Trans. Math. Softw. 12 (1986), 377-393.\n" + "\n" + "Examples\n" + "--------\n" + "First evaluate `gdtr`.\n" + "\n" + ">>> from scipy.special import gdtr, gdtrib\n" + ">>> p = gdtr(1.2, 3.4, 5.6)\n" + ">>> print(p)\n" + "0.94378087442\n" + "\n" + "Verify the inverse.\n" + "\n" + ">>> gdtrib(1.2, p, 5.6)\n" + "3.3999999999723882") +ufunc_gdtrib_loops[0] = loop_d_ddd__As_fff_f +ufunc_gdtrib_loops[1] = loop_d_ddd__As_ddd_d +ufunc_gdtrib_types[0] = NPY_FLOAT +ufunc_gdtrib_types[1] = NPY_FLOAT +ufunc_gdtrib_types[2] = NPY_FLOAT +ufunc_gdtrib_types[3] = NPY_FLOAT +ufunc_gdtrib_types[4] = NPY_DOUBLE +ufunc_gdtrib_types[5] = NPY_DOUBLE +ufunc_gdtrib_types[6] = NPY_DOUBLE +ufunc_gdtrib_types[7] = NPY_DOUBLE +ufunc_gdtrib_ptr[2*0] = _func_gdtrib +ufunc_gdtrib_ptr[2*0+1] = ("gdtrib") +ufunc_gdtrib_ptr[2*1] = _func_gdtrib +ufunc_gdtrib_ptr[2*1+1] = ("gdtrib") +ufunc_gdtrib_data[0] = &ufunc_gdtrib_ptr[2*0] +ufunc_gdtrib_data[1] = &ufunc_gdtrib_ptr[2*1] +gdtrib = np.PyUFunc_FromFuncAndData(ufunc_gdtrib_loops, ufunc_gdtrib_data, ufunc_gdtrib_types, 2, 3, 1, 0, "gdtrib", ufunc_gdtrib_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_gdtrix_loops[2] +cdef void *ufunc_gdtrix_ptr[4] +cdef void *ufunc_gdtrix_data[2] +cdef char ufunc_gdtrix_types[8] +cdef char *ufunc_gdtrix_doc = ( + "gdtrix(a, b, p, out=None)\n" + "\n" + "Inverse of `gdtr` vs x.\n" + "\n" + "Returns the inverse with respect to the parameter `x` of ``p =\n" + "gdtr(a, b, x)``, the cumulative distribution function of the gamma\n" + "distribution. This is also known as the pth quantile of the\n" + "distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "a : array_like\n" + " `a` parameter values of `gdtr(a, b, x)`. `1/a` is the \"scale\"\n" + " parameter of the gamma distribution.\n" + "b : array_like\n" + " `b` parameter values of `gdtr(a, b, x)`. `b` is the \"shape\" parameter\n" + " of the gamma distribution.\n" + "p : array_like\n" + " Probability values.\n" + "out : ndarray, optional\n" + " If a fourth argument is given, it must be a numpy.ndarray whose size\n" + " matches the broadcast result of `a`, `b` and `x`. `out` is then the\n" + " array returned by the function.\n" + "\n" + "Returns\n" + "-------\n" + "x : scalar or ndarray\n" + " Values of the `x` parameter such that `p = gdtr(a, b, x)`.\n" + "\n" + "See Also\n" + "--------\n" + "gdtr : CDF of the gamma distribution.\n" + "gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.\n" + "gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.\n" + "\n" + "Notes\n" + "-----\n" + "Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.\n" + "\n" + "The cumulative distribution function `p` is computed using a routine by\n" + "DiDinato and Morris [2]_. Computation of `x` involves a search for a value\n" + "that produces the desired value of `p`. The search relies on the\n" + "monotonicity of `p` with `x`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Barry Brown, James Lovato, and Kathy Russell,\n" + " CDFLIB: Library of Fortran Routines for Cumulative Distribution\n" + " Functions, Inverses, and Other Parameters.\n" + ".. [2] DiDinato, A. R. and Morris, A. H.,\n" + " Computation of the incomplete gamma function ratios and their\n" + " inverse. ACM Trans. Math. Softw. 12 (1986), 377-393.\n" + "\n" + "Examples\n" + "--------\n" + "First evaluate `gdtr`.\n" + "\n" + ">>> from scipy.special import gdtr, gdtrix\n" + ">>> p = gdtr(1.2, 3.4, 5.6)\n" + ">>> print(p)\n" + "0.94378087442\n" + "\n" + "Verify the inverse.\n" + "\n" + ">>> gdtrix(1.2, 3.4, p)\n" + "5.5999999999999996") +ufunc_gdtrix_loops[0] = loop_d_ddd__As_fff_f +ufunc_gdtrix_loops[1] = loop_d_ddd__As_ddd_d +ufunc_gdtrix_types[0] = NPY_FLOAT +ufunc_gdtrix_types[1] = NPY_FLOAT +ufunc_gdtrix_types[2] = NPY_FLOAT +ufunc_gdtrix_types[3] = NPY_FLOAT +ufunc_gdtrix_types[4] = NPY_DOUBLE +ufunc_gdtrix_types[5] = NPY_DOUBLE +ufunc_gdtrix_types[6] = NPY_DOUBLE +ufunc_gdtrix_types[7] = NPY_DOUBLE +ufunc_gdtrix_ptr[2*0] = _func_gdtrix +ufunc_gdtrix_ptr[2*0+1] = ("gdtrix") +ufunc_gdtrix_ptr[2*1] = _func_gdtrix +ufunc_gdtrix_ptr[2*1+1] = ("gdtrix") +ufunc_gdtrix_data[0] = &ufunc_gdtrix_ptr[2*0] +ufunc_gdtrix_data[1] = &ufunc_gdtrix_ptr[2*1] +gdtrix = np.PyUFunc_FromFuncAndData(ufunc_gdtrix_loops, ufunc_gdtrix_data, ufunc_gdtrix_types, 2, 3, 1, 0, "gdtrix", ufunc_gdtrix_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_huber_loops[2] +cdef void *ufunc_huber_ptr[4] +cdef void *ufunc_huber_data[2] +cdef char ufunc_huber_types[6] +cdef char *ufunc_huber_doc = ( + "huber(delta, r, out=None)\n" + "\n" + "Huber loss function.\n" + "\n" + ".. math:: \\text{huber}(\\delta, r) = \\begin{cases} \\infty & \\delta < 0 \\\\\n" + " \\frac{1}{2}r^2 & 0 \\le \\delta, | r | \\le \\delta \\\\\n" + " \\delta ( |r| - \\frac{1}{2}\\delta ) & \\text{otherwise} \\end{cases}\n" + "\n" + "Parameters\n" + "----------\n" + "delta : ndarray\n" + " Input array, indicating the quadratic vs. linear loss changepoint.\n" + "r : ndarray\n" + " Input array, possibly representing residuals.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " The computed Huber loss function values.\n" + "\n" + "See Also\n" + "--------\n" + "pseudo_huber : smooth approximation of this function\n" + "\n" + "Notes\n" + "-----\n" + "`huber` is useful as a loss function in robust statistics or machine\n" + "learning to reduce the influence of outliers as compared to the common\n" + "squared error loss, residuals with a magnitude higher than `delta` are\n" + "not squared [1]_.\n" + "\n" + "Typically, `r` represents residuals, the difference\n" + "between a model prediction and data. Then, for :math:`|r|\\leq\\delta`,\n" + "`huber` resembles the squared error and for :math:`|r|>\\delta` the\n" + "absolute error. This way, the Huber loss often achieves\n" + "a fast convergence in model fitting for small residuals like the squared\n" + "error loss function and still reduces the influence of outliers\n" + "(:math:`|r|>\\delta`) like the absolute error loss. As :math:`\\delta` is\n" + "the cutoff between squared and absolute error regimes, it has\n" + "to be tuned carefully for each problem. `huber` is also\n" + "convex, making it suitable for gradient based optimization.\n" + "\n" + ".. versionadded:: 0.15.0\n" + "\n" + "References\n" + "----------\n" + ".. [1] Peter Huber. \"Robust Estimation of a Location Parameter\",\n" + " 1964. Annals of Statistics. 53 (1): 73 - 101.\n" + "\n" + "Examples\n" + "--------\n" + "Import all necessary modules.\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import huber\n" + ">>> import matplotlib.pyplot as plt\n" + "\n" + "Compute the function for ``delta=1`` at ``r=2``\n" + "\n" + ">>> huber(1., 2.)\n" + "1.5\n" + "\n" + "Compute the function for different `delta` by providing a NumPy array or\n" + "list for `delta`.\n" + "\n" + ">>> huber([1., 3., 5.], 4.)\n" + "array([3.5, 7.5, 8. ])\n" + "\n" + "Compute the function at different points by providing a NumPy array or\n" + "list for `r`.\n" + "\n" + ">>> huber(2., np.array([1., 1.5, 3.]))\n" + "array([0.5 , 1.125, 4. ])\n" + "\n" + "The function can be calculated for different `delta` and `r` by\n" + "providing arrays for both with compatible shapes for broadcasting.\n" + "\n" + ">>> r = np.array([1., 2.5, 8., 10.])\n" + ">>> deltas = np.array([[1.], [5.], [9.]])\n" + ">>> print(r.shape, deltas.shape)\n" + "(4,) (3, 1)\n" + "\n" + ">>> huber(deltas, r)\n" + "array([[ 0.5 , 2. , 7.5 , 9.5 ],\n" + " [ 0.5 , 3.125, 27.5 , 37.5 ],\n" + " [ 0.5 , 3.125, 32. , 49.5 ]])\n" + "\n" + "Plot the function for different `delta`.\n" + "\n" + ">>> x = np.linspace(-4, 4, 500)\n" + ">>> deltas = [1, 2, 3]\n" + ">>> linestyles = [\"dashed\", \"dotted\", \"dashdot\"]\n" + ">>> fig, ax = plt.subplots()\n" + ">>> combined_plot_parameters = list(zip(deltas, linestyles))\n" + ">>> for delta, style in combined_plot_parameters:\n" + "... ax.plot(x, huber(delta, x), label=fr\"$\\delta={delta}$\", ls=style)\n" + ">>> ax.legend(loc=\"upper center\")\n" + ">>> ax.set_xlabel(\"$x$\")\n" + ">>> ax.set_title(r\"Huber loss function $h_{\\delta}(x)$\")\n" + ">>> ax.set_xlim(-4, 4)\n" + ">>> ax.set_ylim(0, 8)\n" + ">>> plt.show()") +ufunc_huber_loops[0] = loop_d_dd__As_ff_f +ufunc_huber_loops[1] = loop_d_dd__As_dd_d +ufunc_huber_types[0] = NPY_FLOAT +ufunc_huber_types[1] = NPY_FLOAT +ufunc_huber_types[2] = NPY_FLOAT +ufunc_huber_types[3] = NPY_DOUBLE +ufunc_huber_types[4] = NPY_DOUBLE +ufunc_huber_types[5] = NPY_DOUBLE +ufunc_huber_ptr[2*0] = _func_huber +ufunc_huber_ptr[2*0+1] = ("huber") +ufunc_huber_ptr[2*1] = _func_huber +ufunc_huber_ptr[2*1+1] = ("huber") +ufunc_huber_data[0] = &ufunc_huber_ptr[2*0] +ufunc_huber_data[1] = &ufunc_huber_ptr[2*1] +huber = np.PyUFunc_FromFuncAndData(ufunc_huber_loops, ufunc_huber_data, ufunc_huber_types, 2, 2, 1, 0, "huber", ufunc_huber_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_hyp0f1_loops[4] +cdef void *ufunc_hyp0f1_ptr[8] +cdef void *ufunc_hyp0f1_data[4] +cdef char ufunc_hyp0f1_types[12] +cdef char *ufunc_hyp0f1_doc = ( + "hyp0f1(v, z, out=None)\n" + "\n" + "Confluent hypergeometric limit function 0F1.\n" + "\n" + "Parameters\n" + "----------\n" + "v : array_like\n" + " Real-valued parameter\n" + "z : array_like\n" + " Real- or complex-valued argument\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " The confluent hypergeometric limit function\n" + "\n" + "Notes\n" + "-----\n" + "This function is defined as:\n" + "\n" + ".. math:: _0F_1(v, z) = \\sum_{k=0}^{\\infty}\\frac{z^k}{(v)_k k!}.\n" + "\n" + "It's also the limit as :math:`q \\to \\infty` of :math:`_1F_1(q; v; z/q)`,\n" + "and satisfies the differential equation :math:`f''(z) + vf'(z) =\n" + "f(z)`. See [1]_ for more information.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Wolfram MathWorld, \"Confluent Hypergeometric Limit Function\",\n" + " http://mathworld.wolfram.com/ConfluentHypergeometricLimitFunction.html\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import scipy.special as sc\n" + "\n" + "It is one when `z` is zero.\n" + "\n" + ">>> sc.hyp0f1(1, 0)\n" + "1.0\n" + "\n" + "It is the limit of the confluent hypergeometric function as `q`\n" + "goes to infinity.\n" + "\n" + ">>> q = np.array([1, 10, 100, 1000])\n" + ">>> v = 1\n" + ">>> z = 1\n" + ">>> sc.hyp1f1(q, v, z / q)\n" + "array([2.71828183, 2.31481985, 2.28303778, 2.27992985])\n" + ">>> sc.hyp0f1(v, z)\n" + "2.2795853023360673\n" + "\n" + "It is related to Bessel functions.\n" + "\n" + ">>> n = 1\n" + ">>> x = np.linspace(0, 1, 5)\n" + ">>> sc.jv(n, x)\n" + "array([0. , 0.12402598, 0.24226846, 0.3492436 , 0.44005059])\n" + ">>> (0.5 * x)**n / sc.factorial(n) * sc.hyp0f1(n + 1, -0.25 * x**2)\n" + "array([0. , 0.12402598, 0.24226846, 0.3492436 , 0.44005059])") +ufunc_hyp0f1_loops[0] = loop_d_dd__As_ff_f +ufunc_hyp0f1_loops[1] = loop_D_dD__As_fF_F +ufunc_hyp0f1_loops[2] = loop_d_dd__As_dd_d +ufunc_hyp0f1_loops[3] = loop_D_dD__As_dD_D +ufunc_hyp0f1_types[0] = NPY_FLOAT +ufunc_hyp0f1_types[1] = NPY_FLOAT +ufunc_hyp0f1_types[2] = NPY_FLOAT +ufunc_hyp0f1_types[3] = NPY_FLOAT +ufunc_hyp0f1_types[4] = NPY_CFLOAT +ufunc_hyp0f1_types[5] = NPY_CFLOAT +ufunc_hyp0f1_types[6] = NPY_DOUBLE +ufunc_hyp0f1_types[7] = NPY_DOUBLE +ufunc_hyp0f1_types[8] = NPY_DOUBLE +ufunc_hyp0f1_types[9] = NPY_DOUBLE +ufunc_hyp0f1_types[10] = NPY_CDOUBLE +ufunc_hyp0f1_types[11] = NPY_CDOUBLE +ufunc_hyp0f1_ptr[2*0] = _func__hyp0f1_real +ufunc_hyp0f1_ptr[2*0+1] = ("hyp0f1") +ufunc_hyp0f1_ptr[2*1] = _func__hyp0f1_cmplx +ufunc_hyp0f1_ptr[2*1+1] = ("hyp0f1") +ufunc_hyp0f1_ptr[2*2] = _func__hyp0f1_real +ufunc_hyp0f1_ptr[2*2+1] = ("hyp0f1") +ufunc_hyp0f1_ptr[2*3] = _func__hyp0f1_cmplx +ufunc_hyp0f1_ptr[2*3+1] = ("hyp0f1") +ufunc_hyp0f1_data[0] = &ufunc_hyp0f1_ptr[2*0] +ufunc_hyp0f1_data[1] = &ufunc_hyp0f1_ptr[2*1] +ufunc_hyp0f1_data[2] = &ufunc_hyp0f1_ptr[2*2] +ufunc_hyp0f1_data[3] = &ufunc_hyp0f1_ptr[2*3] +hyp0f1 = np.PyUFunc_FromFuncAndData(ufunc_hyp0f1_loops, ufunc_hyp0f1_data, ufunc_hyp0f1_types, 4, 2, 1, 0, "hyp0f1", ufunc_hyp0f1_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_hyp1f1_loops[4] +cdef void *ufunc_hyp1f1_ptr[8] +cdef void *ufunc_hyp1f1_data[4] +cdef char ufunc_hyp1f1_types[16] +cdef char *ufunc_hyp1f1_doc = ( + "hyp1f1(a, b, x, out=None)\n" + "\n" + "Confluent hypergeometric function 1F1.\n" + "\n" + "The confluent hypergeometric function is defined by the series\n" + "\n" + ".. math::\n" + "\n" + " {}_1F_1(a; b; x) = \\sum_{k = 0}^\\infty \\frac{(a)_k}{(b)_k k!} x^k.\n" + "\n" + "See [dlmf]_ for more details. Here :math:`(\\cdot)_k` is the\n" + "Pochhammer symbol; see `poch`.\n" + "\n" + "Parameters\n" + "----------\n" + "a, b : array_like\n" + " Real parameters\n" + "x : array_like\n" + " Real or complex argument\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the confluent hypergeometric function\n" + "\n" + "See Also\n" + "--------\n" + "hyperu : another confluent hypergeometric function\n" + "hyp0f1 : confluent hypergeometric limit function\n" + "hyp2f1 : Gaussian hypergeometric function\n" + "\n" + "References\n" + "----------\n" + ".. [dlmf] NIST Digital Library of Mathematical Functions\n" + " https://dlmf.nist.gov/13.2#E2\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import scipy.special as sc\n" + "\n" + "It is one when `x` is zero:\n" + "\n" + ">>> sc.hyp1f1(0.5, 0.5, 0)\n" + "1.0\n" + "\n" + "It is singular when `b` is a nonpositive integer.\n" + "\n" + ">>> sc.hyp1f1(0.5, -1, 0)\n" + "inf\n" + "\n" + "It is a polynomial when `a` is a nonpositive integer.\n" + "\n" + ">>> a, b, x = -1, 0.5, np.array([1.0, 2.0, 3.0, 4.0])\n" + ">>> sc.hyp1f1(a, b, x)\n" + "array([-1., -3., -5., -7.])\n" + ">>> 1 + (a / b) * x\n" + "array([-1., -3., -5., -7.])\n" + "\n" + "It reduces to the exponential function when `a = b`.\n" + "\n" + ">>> sc.hyp1f1(2, 2, [1, 2, 3, 4])\n" + "array([ 2.71828183, 7.3890561 , 20.08553692, 54.59815003])\n" + ">>> np.exp([1, 2, 3, 4])\n" + "array([ 2.71828183, 7.3890561 , 20.08553692, 54.59815003])") +ufunc_hyp1f1_loops[0] = loop_d_ddd__As_fff_f +ufunc_hyp1f1_loops[1] = loop_D_ddD__As_ffF_F +ufunc_hyp1f1_loops[2] = loop_d_ddd__As_ddd_d +ufunc_hyp1f1_loops[3] = loop_D_ddD__As_ddD_D +ufunc_hyp1f1_types[0] = NPY_FLOAT +ufunc_hyp1f1_types[1] = NPY_FLOAT +ufunc_hyp1f1_types[2] = NPY_FLOAT +ufunc_hyp1f1_types[3] = NPY_FLOAT +ufunc_hyp1f1_types[4] = NPY_FLOAT +ufunc_hyp1f1_types[5] = NPY_FLOAT +ufunc_hyp1f1_types[6] = NPY_CFLOAT +ufunc_hyp1f1_types[7] = NPY_CFLOAT +ufunc_hyp1f1_types[8] = NPY_DOUBLE +ufunc_hyp1f1_types[9] = NPY_DOUBLE +ufunc_hyp1f1_types[10] = NPY_DOUBLE +ufunc_hyp1f1_types[11] = NPY_DOUBLE +ufunc_hyp1f1_types[12] = NPY_DOUBLE +ufunc_hyp1f1_types[13] = NPY_DOUBLE +ufunc_hyp1f1_types[14] = NPY_CDOUBLE +ufunc_hyp1f1_types[15] = NPY_CDOUBLE +ufunc_hyp1f1_ptr[2*0] = scipy.special._ufuncs_cxx._export_hyp1f1_double +ufunc_hyp1f1_ptr[2*0+1] = ("hyp1f1") +ufunc_hyp1f1_ptr[2*1] = _func_chyp1f1_wrap +ufunc_hyp1f1_ptr[2*1+1] = ("hyp1f1") +ufunc_hyp1f1_ptr[2*2] = scipy.special._ufuncs_cxx._export_hyp1f1_double +ufunc_hyp1f1_ptr[2*2+1] = ("hyp1f1") +ufunc_hyp1f1_ptr[2*3] = _func_chyp1f1_wrap +ufunc_hyp1f1_ptr[2*3+1] = ("hyp1f1") +ufunc_hyp1f1_data[0] = &ufunc_hyp1f1_ptr[2*0] +ufunc_hyp1f1_data[1] = &ufunc_hyp1f1_ptr[2*1] +ufunc_hyp1f1_data[2] = &ufunc_hyp1f1_ptr[2*2] +ufunc_hyp1f1_data[3] = &ufunc_hyp1f1_ptr[2*3] +hyp1f1 = np.PyUFunc_FromFuncAndData(ufunc_hyp1f1_loops, ufunc_hyp1f1_data, ufunc_hyp1f1_types, 4, 3, 1, 0, "hyp1f1", ufunc_hyp1f1_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_hyperu_loops[2] +cdef void *ufunc_hyperu_ptr[4] +cdef void *ufunc_hyperu_data[2] +cdef char ufunc_hyperu_types[8] +cdef char *ufunc_hyperu_doc = ( + "hyperu(a, b, x, out=None)\n" + "\n" + "Confluent hypergeometric function U\n" + "\n" + "It is defined as the solution to the equation\n" + "\n" + ".. math::\n" + "\n" + " x \\frac{d^2w}{dx^2} + (b - x) \\frac{dw}{dx} - aw = 0\n" + "\n" + "which satisfies the property\n" + "\n" + ".. math::\n" + "\n" + " U(a, b, x) \\sim x^{-a}\n" + "\n" + "as :math:`x \\to \\infty`. See [dlmf]_ for more details.\n" + "\n" + "Parameters\n" + "----------\n" + "a, b : array_like\n" + " Real-valued parameters\n" + "x : array_like\n" + " Real-valued argument\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of `U`\n" + "\n" + "References\n" + "----------\n" + ".. [dlmf] NIST Digital Library of Mathematics Functions\n" + " https://dlmf.nist.gov/13.2#E6\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import scipy.special as sc\n" + "\n" + "It has a branch cut along the negative `x` axis.\n" + "\n" + ">>> x = np.linspace(-0.1, -10, 5)\n" + ">>> sc.hyperu(1, 1, x)\n" + "array([nan, nan, nan, nan, nan])\n" + "\n" + "It approaches zero as `x` goes to infinity.\n" + "\n" + ">>> x = np.array([1, 10, 100])\n" + ">>> sc.hyperu(1, 1, x)\n" + "array([0.59634736, 0.09156333, 0.00990194])\n" + "\n" + "It satisfies Kummer's transformation.\n" + "\n" + ">>> a, b, x = 2, 1, 1\n" + ">>> sc.hyperu(a, b, x)\n" + "0.1926947246463881\n" + ">>> x**(1 - b) * sc.hyperu(a - b + 1, 2 - b, x)\n" + "0.1926947246463881") +ufunc_hyperu_loops[0] = loop_d_ddd__As_fff_f +ufunc_hyperu_loops[1] = loop_d_ddd__As_ddd_d +ufunc_hyperu_types[0] = NPY_FLOAT +ufunc_hyperu_types[1] = NPY_FLOAT +ufunc_hyperu_types[2] = NPY_FLOAT +ufunc_hyperu_types[3] = NPY_FLOAT +ufunc_hyperu_types[4] = NPY_DOUBLE +ufunc_hyperu_types[5] = NPY_DOUBLE +ufunc_hyperu_types[6] = NPY_DOUBLE +ufunc_hyperu_types[7] = NPY_DOUBLE +ufunc_hyperu_ptr[2*0] = _func_hyperu +ufunc_hyperu_ptr[2*0+1] = ("hyperu") +ufunc_hyperu_ptr[2*1] = _func_hyperu +ufunc_hyperu_ptr[2*1+1] = ("hyperu") +ufunc_hyperu_data[0] = &ufunc_hyperu_ptr[2*0] +ufunc_hyperu_data[1] = &ufunc_hyperu_ptr[2*1] +hyperu = np.PyUFunc_FromFuncAndData(ufunc_hyperu_loops, ufunc_hyperu_data, ufunc_hyperu_types, 2, 3, 1, 0, "hyperu", ufunc_hyperu_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_i0_loops[2] +cdef void *ufunc_i0_ptr[4] +cdef void *ufunc_i0_data[2] +cdef char ufunc_i0_types[4] +cdef char *ufunc_i0_doc = ( + "i0(x, out=None)\n" + "\n" + "Modified Bessel function of order 0.\n" + "\n" + "Defined as,\n" + "\n" + ".. math::\n" + " I_0(x) = \\sum_{k=0}^\\infty \\frac{(x^2/4)^k}{(k!)^2} = J_0(\\imath x),\n" + "\n" + "where :math:`J_0` is the Bessel function of the first kind of order 0.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Argument (float)\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "I : scalar or ndarray\n" + " Value of the modified Bessel function of order 0 at `x`.\n" + "\n" + "See Also\n" + "--------\n" + "iv: Modified Bessel function of any order\n" + "i0e: Exponentially scaled modified Bessel function of order 0\n" + "\n" + "Notes\n" + "-----\n" + "The range is partitioned into the two intervals [0, 8] and (8, infinity).\n" + "Chebyshev polynomial expansions are employed in each interval.\n" + "\n" + "This function is a wrapper for the Cephes [1]_ routine `i0`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + "\n" + "Examples\n" + "--------\n" + "Calculate the function at one point:\n" + "\n" + ">>> from scipy.special import i0\n" + ">>> i0(1.)\n" + "1.2660658777520082\n" + "\n" + "Calculate at several points:\n" + "\n" + ">>> import numpy as np\n" + ">>> i0(np.array([-2., 0., 3.5]))\n" + "array([2.2795853 , 1. , 7.37820343])\n" + "\n" + "Plot the function from -10 to 10.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> fig, ax = plt.subplots()\n" + ">>> x = np.linspace(-10., 10., 1000)\n" + ">>> y = i0(x)\n" + ">>> ax.plot(x, y)\n" + ">>> plt.show()") +ufunc_i0_loops[0] = loop_d_d__As_f_f +ufunc_i0_loops[1] = loop_d_d__As_d_d +ufunc_i0_types[0] = NPY_FLOAT +ufunc_i0_types[1] = NPY_FLOAT +ufunc_i0_types[2] = NPY_DOUBLE +ufunc_i0_types[3] = NPY_DOUBLE +ufunc_i0_ptr[2*0] = _func_cephes_i0 +ufunc_i0_ptr[2*0+1] = ("i0") +ufunc_i0_ptr[2*1] = _func_cephes_i0 +ufunc_i0_ptr[2*1+1] = ("i0") +ufunc_i0_data[0] = &ufunc_i0_ptr[2*0] +ufunc_i0_data[1] = &ufunc_i0_ptr[2*1] +i0 = np.PyUFunc_FromFuncAndData(ufunc_i0_loops, ufunc_i0_data, ufunc_i0_types, 2, 1, 1, 0, "i0", ufunc_i0_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_i0e_loops[2] +cdef void *ufunc_i0e_ptr[4] +cdef void *ufunc_i0e_data[2] +cdef char ufunc_i0e_types[4] +cdef char *ufunc_i0e_doc = ( + "i0e(x, out=None)\n" + "\n" + "Exponentially scaled modified Bessel function of order 0.\n" + "\n" + "Defined as::\n" + "\n" + " i0e(x) = exp(-abs(x)) * i0(x).\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Argument (float)\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "I : scalar or ndarray\n" + " Value of the exponentially scaled modified Bessel function of order 0\n" + " at `x`.\n" + "\n" + "See Also\n" + "--------\n" + "iv: Modified Bessel function of the first kind\n" + "i0: Modified Bessel function of order 0\n" + "\n" + "Notes\n" + "-----\n" + "The range is partitioned into the two intervals [0, 8] and (8, infinity).\n" + "Chebyshev polynomial expansions are employed in each interval. The\n" + "polynomial expansions used are the same as those in `i0`, but\n" + "they are not multiplied by the dominant exponential factor.\n" + "\n" + "This function is a wrapper for the Cephes [1]_ routine `i0e`. `i0e`\n" + "is useful for large arguments `x`: for these, `i0` quickly overflows.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + "\n" + "Examples\n" + "--------\n" + "In the following example `i0` returns infinity whereas `i0e` still returns\n" + "a finite number.\n" + "\n" + ">>> from scipy.special import i0, i0e\n" + ">>> i0(1000.), i0e(1000.)\n" + "(inf, 0.012617240455891257)\n" + "\n" + "Calculate the function at several points by providing a NumPy array or\n" + "list for `x`:\n" + "\n" + ">>> import numpy as np\n" + ">>> i0e(np.array([-2., 0., 3.]))\n" + "array([0.30850832, 1. , 0.24300035])\n" + "\n" + "Plot the function from -10 to 10.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> fig, ax = plt.subplots()\n" + ">>> x = np.linspace(-10., 10., 1000)\n" + ">>> y = i0e(x)\n" + ">>> ax.plot(x, y)\n" + ">>> plt.show()") +ufunc_i0e_loops[0] = loop_d_d__As_f_f +ufunc_i0e_loops[1] = loop_d_d__As_d_d +ufunc_i0e_types[0] = NPY_FLOAT +ufunc_i0e_types[1] = NPY_FLOAT +ufunc_i0e_types[2] = NPY_DOUBLE +ufunc_i0e_types[3] = NPY_DOUBLE +ufunc_i0e_ptr[2*0] = _func_cephes_i0e +ufunc_i0e_ptr[2*0+1] = ("i0e") +ufunc_i0e_ptr[2*1] = _func_cephes_i0e +ufunc_i0e_ptr[2*1+1] = ("i0e") +ufunc_i0e_data[0] = &ufunc_i0e_ptr[2*0] +ufunc_i0e_data[1] = &ufunc_i0e_ptr[2*1] +i0e = np.PyUFunc_FromFuncAndData(ufunc_i0e_loops, ufunc_i0e_data, ufunc_i0e_types, 2, 1, 1, 0, "i0e", ufunc_i0e_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_i1_loops[2] +cdef void *ufunc_i1_ptr[4] +cdef void *ufunc_i1_data[2] +cdef char ufunc_i1_types[4] +cdef char *ufunc_i1_doc = ( + "i1(x, out=None)\n" + "\n" + "Modified Bessel function of order 1.\n" + "\n" + "Defined as,\n" + "\n" + ".. math::\n" + " I_1(x) = \\frac{1}{2}x \\sum_{k=0}^\\infty \\frac{(x^2/4)^k}{k! (k + 1)!}\n" + " = -\\imath J_1(\\imath x),\n" + "\n" + "where :math:`J_1` is the Bessel function of the first kind of order 1.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Argument (float)\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "I : scalar or ndarray\n" + " Value of the modified Bessel function of order 1 at `x`.\n" + "\n" + "See Also\n" + "--------\n" + "iv: Modified Bessel function of the first kind\n" + "i1e: Exponentially scaled modified Bessel function of order 1\n" + "\n" + "Notes\n" + "-----\n" + "The range is partitioned into the two intervals [0, 8] and (8, infinity).\n" + "Chebyshev polynomial expansions are employed in each interval.\n" + "\n" + "This function is a wrapper for the Cephes [1]_ routine `i1`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + "\n" + "Examples\n" + "--------\n" + "Calculate the function at one point:\n" + "\n" + ">>> from scipy.special import i1\n" + ">>> i1(1.)\n" + "0.5651591039924851\n" + "\n" + "Calculate the function at several points:\n" + "\n" + ">>> import numpy as np\n" + ">>> i1(np.array([-2., 0., 6.]))\n" + "array([-1.59063685, 0. , 61.34193678])\n" + "\n" + "Plot the function between -10 and 10.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> fig, ax = plt.subplots()\n" + ">>> x = np.linspace(-10., 10., 1000)\n" + ">>> y = i1(x)\n" + ">>> ax.plot(x, y)\n" + ">>> plt.show()") +ufunc_i1_loops[0] = loop_d_d__As_f_f +ufunc_i1_loops[1] = loop_d_d__As_d_d +ufunc_i1_types[0] = NPY_FLOAT +ufunc_i1_types[1] = NPY_FLOAT +ufunc_i1_types[2] = NPY_DOUBLE +ufunc_i1_types[3] = NPY_DOUBLE +ufunc_i1_ptr[2*0] = _func_cephes_i1 +ufunc_i1_ptr[2*0+1] = ("i1") +ufunc_i1_ptr[2*1] = _func_cephes_i1 +ufunc_i1_ptr[2*1+1] = ("i1") +ufunc_i1_data[0] = &ufunc_i1_ptr[2*0] +ufunc_i1_data[1] = &ufunc_i1_ptr[2*1] +i1 = np.PyUFunc_FromFuncAndData(ufunc_i1_loops, ufunc_i1_data, ufunc_i1_types, 2, 1, 1, 0, "i1", ufunc_i1_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_i1e_loops[2] +cdef void *ufunc_i1e_ptr[4] +cdef void *ufunc_i1e_data[2] +cdef char ufunc_i1e_types[4] +cdef char *ufunc_i1e_doc = ( + "i1e(x, out=None)\n" + "\n" + "Exponentially scaled modified Bessel function of order 1.\n" + "\n" + "Defined as::\n" + "\n" + " i1e(x) = exp(-abs(x)) * i1(x)\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Argument (float)\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "I : scalar or ndarray\n" + " Value of the exponentially scaled modified Bessel function of order 1\n" + " at `x`.\n" + "\n" + "See Also\n" + "--------\n" + "iv: Modified Bessel function of the first kind\n" + "i1: Modified Bessel function of order 1\n" + "\n" + "Notes\n" + "-----\n" + "The range is partitioned into the two intervals [0, 8] and (8, infinity).\n" + "Chebyshev polynomial expansions are employed in each interval. The\n" + "polynomial expansions used are the same as those in `i1`, but\n" + "they are not multiplied by the dominant exponential factor.\n" + "\n" + "This function is a wrapper for the Cephes [1]_ routine `i1e`. `i1e`\n" + "is useful for large arguments `x`: for these, `i1` quickly overflows.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + "\n" + "Examples\n" + "--------\n" + "In the following example `i1` returns infinity whereas `i1e` still returns\n" + "a finite number.\n" + "\n" + ">>> from scipy.special import i1, i1e\n" + ">>> i1(1000.), i1e(1000.)\n" + "(inf, 0.01261093025692863)\n" + "\n" + "Calculate the function at several points by providing a NumPy array or\n" + "list for `x`:\n" + "\n" + ">>> import numpy as np\n" + ">>> i1e(np.array([-2., 0., 6.]))\n" + "array([-0.21526929, 0. , 0.15205146])\n" + "\n" + "Plot the function between -10 and 10.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> fig, ax = plt.subplots()\n" + ">>> x = np.linspace(-10., 10., 1000)\n" + ">>> y = i1e(x)\n" + ">>> ax.plot(x, y)\n" + ">>> plt.show()") +ufunc_i1e_loops[0] = loop_d_d__As_f_f +ufunc_i1e_loops[1] = loop_d_d__As_d_d +ufunc_i1e_types[0] = NPY_FLOAT +ufunc_i1e_types[1] = NPY_FLOAT +ufunc_i1e_types[2] = NPY_DOUBLE +ufunc_i1e_types[3] = NPY_DOUBLE +ufunc_i1e_ptr[2*0] = _func_cephes_i1e +ufunc_i1e_ptr[2*0+1] = ("i1e") +ufunc_i1e_ptr[2*1] = _func_cephes_i1e +ufunc_i1e_ptr[2*1+1] = ("i1e") +ufunc_i1e_data[0] = &ufunc_i1e_ptr[2*0] +ufunc_i1e_data[1] = &ufunc_i1e_ptr[2*1] +i1e = np.PyUFunc_FromFuncAndData(ufunc_i1e_loops, ufunc_i1e_data, ufunc_i1e_types, 2, 1, 1, 0, "i1e", ufunc_i1e_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_inv_boxcox_loops[2] +cdef void *ufunc_inv_boxcox_ptr[4] +cdef void *ufunc_inv_boxcox_data[2] +cdef char ufunc_inv_boxcox_types[6] +cdef char *ufunc_inv_boxcox_doc = ( + "inv_boxcox(y, lmbda, out=None)\n" + "\n" + "Compute the inverse of the Box-Cox transformation.\n" + "\n" + "Find ``x`` such that::\n" + "\n" + " y = (x**lmbda - 1) / lmbda if lmbda != 0\n" + " log(x) if lmbda == 0\n" + "\n" + "Parameters\n" + "----------\n" + "y : array_like\n" + " Data to be transformed.\n" + "lmbda : array_like\n" + " Power parameter of the Box-Cox transform.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "x : scalar or ndarray\n" + " Transformed data.\n" + "\n" + "Notes\n" + "-----\n" + "\n" + ".. versionadded:: 0.16.0\n" + "\n" + "Examples\n" + "--------\n" + ">>> from scipy.special import boxcox, inv_boxcox\n" + ">>> y = boxcox([1, 4, 10], 2.5)\n" + ">>> inv_boxcox(y, 2.5)\n" + "array([1., 4., 10.])") +ufunc_inv_boxcox_loops[0] = loop_d_dd__As_ff_f +ufunc_inv_boxcox_loops[1] = loop_d_dd__As_dd_d +ufunc_inv_boxcox_types[0] = NPY_FLOAT +ufunc_inv_boxcox_types[1] = NPY_FLOAT +ufunc_inv_boxcox_types[2] = NPY_FLOAT +ufunc_inv_boxcox_types[3] = NPY_DOUBLE +ufunc_inv_boxcox_types[4] = NPY_DOUBLE +ufunc_inv_boxcox_types[5] = NPY_DOUBLE +ufunc_inv_boxcox_ptr[2*0] = _func_inv_boxcox +ufunc_inv_boxcox_ptr[2*0+1] = ("inv_boxcox") +ufunc_inv_boxcox_ptr[2*1] = _func_inv_boxcox +ufunc_inv_boxcox_ptr[2*1+1] = ("inv_boxcox") +ufunc_inv_boxcox_data[0] = &ufunc_inv_boxcox_ptr[2*0] +ufunc_inv_boxcox_data[1] = &ufunc_inv_boxcox_ptr[2*1] +inv_boxcox = np.PyUFunc_FromFuncAndData(ufunc_inv_boxcox_loops, ufunc_inv_boxcox_data, ufunc_inv_boxcox_types, 2, 2, 1, 0, "inv_boxcox", ufunc_inv_boxcox_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_inv_boxcox1p_loops[2] +cdef void *ufunc_inv_boxcox1p_ptr[4] +cdef void *ufunc_inv_boxcox1p_data[2] +cdef char ufunc_inv_boxcox1p_types[6] +cdef char *ufunc_inv_boxcox1p_doc = ( + "inv_boxcox1p(y, lmbda, out=None)\n" + "\n" + "Compute the inverse of the Box-Cox transformation.\n" + "\n" + "Find ``x`` such that::\n" + "\n" + " y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0\n" + " log(1+x) if lmbda == 0\n" + "\n" + "Parameters\n" + "----------\n" + "y : array_like\n" + " Data to be transformed.\n" + "lmbda : array_like\n" + " Power parameter of the Box-Cox transform.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "x : scalar or ndarray\n" + " Transformed data.\n" + "\n" + "Notes\n" + "-----\n" + "\n" + ".. versionadded:: 0.16.0\n" + "\n" + "Examples\n" + "--------\n" + ">>> from scipy.special import boxcox1p, inv_boxcox1p\n" + ">>> y = boxcox1p([1, 4, 10], 2.5)\n" + ">>> inv_boxcox1p(y, 2.5)\n" + "array([1., 4., 10.])") +ufunc_inv_boxcox1p_loops[0] = loop_d_dd__As_ff_f +ufunc_inv_boxcox1p_loops[1] = loop_d_dd__As_dd_d +ufunc_inv_boxcox1p_types[0] = NPY_FLOAT +ufunc_inv_boxcox1p_types[1] = NPY_FLOAT +ufunc_inv_boxcox1p_types[2] = NPY_FLOAT +ufunc_inv_boxcox1p_types[3] = NPY_DOUBLE +ufunc_inv_boxcox1p_types[4] = NPY_DOUBLE +ufunc_inv_boxcox1p_types[5] = NPY_DOUBLE +ufunc_inv_boxcox1p_ptr[2*0] = _func_inv_boxcox1p +ufunc_inv_boxcox1p_ptr[2*0+1] = ("inv_boxcox1p") +ufunc_inv_boxcox1p_ptr[2*1] = _func_inv_boxcox1p +ufunc_inv_boxcox1p_ptr[2*1+1] = ("inv_boxcox1p") +ufunc_inv_boxcox1p_data[0] = &ufunc_inv_boxcox1p_ptr[2*0] +ufunc_inv_boxcox1p_data[1] = &ufunc_inv_boxcox1p_ptr[2*1] +inv_boxcox1p = np.PyUFunc_FromFuncAndData(ufunc_inv_boxcox1p_loops, ufunc_inv_boxcox1p_data, ufunc_inv_boxcox1p_types, 2, 2, 1, 0, "inv_boxcox1p", ufunc_inv_boxcox1p_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_j0_loops[2] +cdef void *ufunc_j0_ptr[4] +cdef void *ufunc_j0_data[2] +cdef char ufunc_j0_types[4] +cdef char *ufunc_j0_doc = ( + "j0(x, out=None)\n" + "\n" + "Bessel function of the first kind of order 0.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Argument (float).\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "J : scalar or ndarray\n" + " Value of the Bessel function of the first kind of order 0 at `x`.\n" + "\n" + "See Also\n" + "--------\n" + "jv : Bessel function of real order and complex argument.\n" + "spherical_jn : spherical Bessel functions.\n" + "\n" + "Notes\n" + "-----\n" + "The domain is divided into the intervals [0, 5] and (5, infinity). In the\n" + "first interval the following rational approximation is used:\n" + "\n" + ".. math::\n" + "\n" + " J_0(x) \\approx (w - r_1^2)(w - r_2^2) \\frac{P_3(w)}{Q_8(w)},\n" + "\n" + "where :math:`w = x^2` and :math:`r_1`, :math:`r_2` are the zeros of\n" + ":math:`J_0`, and :math:`P_3` and :math:`Q_8` are polynomials of degrees 3\n" + "and 8, respectively.\n" + "\n" + "In the second interval, the Hankel asymptotic expansion is employed with\n" + "two rational functions of degree 6/6 and 7/7.\n" + "\n" + "This function is a wrapper for the Cephes [1]_ routine `j0`.\n" + "It should not be confused with the spherical Bessel functions (see\n" + "`spherical_jn`).\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + "\n" + "Examples\n" + "--------\n" + "Calculate the function at one point:\n" + "\n" + ">>> from scipy.special import j0\n" + ">>> j0(1.)\n" + "0.7651976865579665\n" + "\n" + "Calculate the function at several points:\n" + "\n" + ">>> import numpy as np\n" + ">>> j0(np.array([-2., 0., 4.]))\n" + "array([ 0.22389078, 1. , -0.39714981])\n" + "\n" + "Plot the function from -20 to 20.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> fig, ax = plt.subplots()\n" + ">>> x = np.linspace(-20., 20., 1000)\n" + ">>> y = j0(x)\n" + ">>> ax.plot(x, y)\n" + ">>> plt.show()") +ufunc_j0_loops[0] = loop_d_d__As_f_f +ufunc_j0_loops[1] = loop_d_d__As_d_d +ufunc_j0_types[0] = NPY_FLOAT +ufunc_j0_types[1] = NPY_FLOAT +ufunc_j0_types[2] = NPY_DOUBLE +ufunc_j0_types[3] = NPY_DOUBLE +ufunc_j0_ptr[2*0] = _func_cephes_j0 +ufunc_j0_ptr[2*0+1] = ("j0") +ufunc_j0_ptr[2*1] = _func_cephes_j0 +ufunc_j0_ptr[2*1+1] = ("j0") +ufunc_j0_data[0] = &ufunc_j0_ptr[2*0] +ufunc_j0_data[1] = &ufunc_j0_ptr[2*1] +j0 = np.PyUFunc_FromFuncAndData(ufunc_j0_loops, ufunc_j0_data, ufunc_j0_types, 2, 1, 1, 0, "j0", ufunc_j0_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_j1_loops[2] +cdef void *ufunc_j1_ptr[4] +cdef void *ufunc_j1_data[2] +cdef char ufunc_j1_types[4] +cdef char *ufunc_j1_doc = ( + "j1(x, out=None)\n" + "\n" + "Bessel function of the first kind of order 1.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Argument (float).\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "J : scalar or ndarray\n" + " Value of the Bessel function of the first kind of order 1 at `x`.\n" + "\n" + "See Also\n" + "--------\n" + "jv: Bessel function of the first kind\n" + "spherical_jn: spherical Bessel functions.\n" + "\n" + "Notes\n" + "-----\n" + "The domain is divided into the intervals [0, 8] and (8, infinity). In the\n" + "first interval a 24 term Chebyshev expansion is used. In the second, the\n" + "asymptotic trigonometric representation is employed using two rational\n" + "functions of degree 5/5.\n" + "\n" + "This function is a wrapper for the Cephes [1]_ routine `j1`.\n" + "It should not be confused with the spherical Bessel functions (see\n" + "`spherical_jn`).\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + "\n" + "Examples\n" + "--------\n" + "Calculate the function at one point:\n" + "\n" + ">>> from scipy.special import j1\n" + ">>> j1(1.)\n" + "0.44005058574493355\n" + "\n" + "Calculate the function at several points:\n" + "\n" + ">>> import numpy as np\n" + ">>> j1(np.array([-2., 0., 4.]))\n" + "array([-0.57672481, 0. , -0.06604333])\n" + "\n" + "Plot the function from -20 to 20.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> fig, ax = plt.subplots()\n" + ">>> x = np.linspace(-20., 20., 1000)\n" + ">>> y = j1(x)\n" + ">>> ax.plot(x, y)\n" + ">>> plt.show()") +ufunc_j1_loops[0] = loop_d_d__As_f_f +ufunc_j1_loops[1] = loop_d_d__As_d_d +ufunc_j1_types[0] = NPY_FLOAT +ufunc_j1_types[1] = NPY_FLOAT +ufunc_j1_types[2] = NPY_DOUBLE +ufunc_j1_types[3] = NPY_DOUBLE +ufunc_j1_ptr[2*0] = _func_cephes_j1 +ufunc_j1_ptr[2*0+1] = ("j1") +ufunc_j1_ptr[2*1] = _func_cephes_j1 +ufunc_j1_ptr[2*1+1] = ("j1") +ufunc_j1_data[0] = &ufunc_j1_ptr[2*0] +ufunc_j1_data[1] = &ufunc_j1_ptr[2*1] +j1 = np.PyUFunc_FromFuncAndData(ufunc_j1_loops, ufunc_j1_data, ufunc_j1_types, 2, 1, 1, 0, "j1", ufunc_j1_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_k0_loops[2] +cdef void *ufunc_k0_ptr[4] +cdef void *ufunc_k0_data[2] +cdef char ufunc_k0_types[4] +cdef char *ufunc_k0_doc = ( + "k0(x, out=None)\n" + "\n" + "Modified Bessel function of the second kind of order 0, :math:`K_0`.\n" + "\n" + "This function is also sometimes referred to as the modified Bessel\n" + "function of the third kind of order 0.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Argument (float).\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "K : scalar or ndarray\n" + " Value of the modified Bessel function :math:`K_0` at `x`.\n" + "\n" + "See Also\n" + "--------\n" + "kv: Modified Bessel function of the second kind of any order\n" + "k0e: Exponentially scaled modified Bessel function of the second kind\n" + "\n" + "Notes\n" + "-----\n" + "The range is partitioned into the two intervals [0, 2] and (2, infinity).\n" + "Chebyshev polynomial expansions are employed in each interval.\n" + "\n" + "This function is a wrapper for the Cephes [1]_ routine `k0`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + "\n" + "Examples\n" + "--------\n" + "Calculate the function at one point:\n" + "\n" + ">>> from scipy.special import k0\n" + ">>> k0(1.)\n" + "0.42102443824070823\n" + "\n" + "Calculate the function at several points:\n" + "\n" + ">>> import numpy as np\n" + ">>> k0(np.array([0.5, 2., 3.]))\n" + "array([0.92441907, 0.11389387, 0.0347395 ])\n" + "\n" + "Plot the function from 0 to 10.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> fig, ax = plt.subplots()\n" + ">>> x = np.linspace(0., 10., 1000)\n" + ">>> y = k0(x)\n" + ">>> ax.plot(x, y)\n" + ">>> plt.show()") +ufunc_k0_loops[0] = loop_d_d__As_f_f +ufunc_k0_loops[1] = loop_d_d__As_d_d +ufunc_k0_types[0] = NPY_FLOAT +ufunc_k0_types[1] = NPY_FLOAT +ufunc_k0_types[2] = NPY_DOUBLE +ufunc_k0_types[3] = NPY_DOUBLE +ufunc_k0_ptr[2*0] = _func_cephes_k0 +ufunc_k0_ptr[2*0+1] = ("k0") +ufunc_k0_ptr[2*1] = _func_cephes_k0 +ufunc_k0_ptr[2*1+1] = ("k0") +ufunc_k0_data[0] = &ufunc_k0_ptr[2*0] +ufunc_k0_data[1] = &ufunc_k0_ptr[2*1] +k0 = np.PyUFunc_FromFuncAndData(ufunc_k0_loops, ufunc_k0_data, ufunc_k0_types, 2, 1, 1, 0, "k0", ufunc_k0_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_k0e_loops[2] +cdef void *ufunc_k0e_ptr[4] +cdef void *ufunc_k0e_data[2] +cdef char ufunc_k0e_types[4] +cdef char *ufunc_k0e_doc = ( + "k0e(x, out=None)\n" + "\n" + "Exponentially scaled modified Bessel function K of order 0\n" + "\n" + "Defined as::\n" + "\n" + " k0e(x) = exp(x) * k0(x).\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Argument (float)\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "K : scalar or ndarray\n" + " Value of the exponentially scaled modified Bessel function K of order\n" + " 0 at `x`.\n" + "\n" + "See Also\n" + "--------\n" + "kv: Modified Bessel function of the second kind of any order\n" + "k0: Modified Bessel function of the second kind\n" + "\n" + "Notes\n" + "-----\n" + "The range is partitioned into the two intervals [0, 2] and (2, infinity).\n" + "Chebyshev polynomial expansions are employed in each interval.\n" + "\n" + "This function is a wrapper for the Cephes [1]_ routine `k0e`. `k0e` is\n" + "useful for large arguments: for these, `k0` easily underflows.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + "\n" + "Examples\n" + "--------\n" + "In the following example `k0` returns 0 whereas `k0e` still returns a\n" + "useful finite number:\n" + "\n" + ">>> from scipy.special import k0, k0e\n" + ">>> k0(1000.), k0e(1000)\n" + "(0., 0.03962832160075422)\n" + "\n" + "Calculate the function at several points by providing a NumPy array or\n" + "list for `x`:\n" + "\n" + ">>> import numpy as np\n" + ">>> k0e(np.array([0.5, 2., 3.]))\n" + "array([1.52410939, 0.84156822, 0.6977616 ])\n" + "\n" + "Plot the function from 0 to 10.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> fig, ax = plt.subplots()\n" + ">>> x = np.linspace(0., 10., 1000)\n" + ">>> y = k0e(x)\n" + ">>> ax.plot(x, y)\n" + ">>> plt.show()") +ufunc_k0e_loops[0] = loop_d_d__As_f_f +ufunc_k0e_loops[1] = loop_d_d__As_d_d +ufunc_k0e_types[0] = NPY_FLOAT +ufunc_k0e_types[1] = NPY_FLOAT +ufunc_k0e_types[2] = NPY_DOUBLE +ufunc_k0e_types[3] = NPY_DOUBLE +ufunc_k0e_ptr[2*0] = _func_cephes_k0e +ufunc_k0e_ptr[2*0+1] = ("k0e") +ufunc_k0e_ptr[2*1] = _func_cephes_k0e +ufunc_k0e_ptr[2*1+1] = ("k0e") +ufunc_k0e_data[0] = &ufunc_k0e_ptr[2*0] +ufunc_k0e_data[1] = &ufunc_k0e_ptr[2*1] +k0e = np.PyUFunc_FromFuncAndData(ufunc_k0e_loops, ufunc_k0e_data, ufunc_k0e_types, 2, 1, 1, 0, "k0e", ufunc_k0e_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_k1_loops[2] +cdef void *ufunc_k1_ptr[4] +cdef void *ufunc_k1_data[2] +cdef char ufunc_k1_types[4] +cdef char *ufunc_k1_doc = ( + "k1(x, out=None)\n" + "\n" + "Modified Bessel function of the second kind of order 1, :math:`K_1(x)`.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Argument (float)\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "K : scalar or ndarray\n" + " Value of the modified Bessel function K of order 1 at `x`.\n" + "\n" + "See Also\n" + "--------\n" + "kv: Modified Bessel function of the second kind of any order\n" + "k1e: Exponentially scaled modified Bessel function K of order 1\n" + "\n" + "Notes\n" + "-----\n" + "The range is partitioned into the two intervals [0, 2] and (2, infinity).\n" + "Chebyshev polynomial expansions are employed in each interval.\n" + "\n" + "This function is a wrapper for the Cephes [1]_ routine `k1`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + "\n" + "Examples\n" + "--------\n" + "Calculate the function at one point:\n" + "\n" + ">>> from scipy.special import k1\n" + ">>> k1(1.)\n" + "0.6019072301972346\n" + "\n" + "Calculate the function at several points:\n" + "\n" + ">>> import numpy as np\n" + ">>> k1(np.array([0.5, 2., 3.]))\n" + "array([1.65644112, 0.13986588, 0.04015643])\n" + "\n" + "Plot the function from 0 to 10.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> fig, ax = plt.subplots()\n" + ">>> x = np.linspace(0., 10., 1000)\n" + ">>> y = k1(x)\n" + ">>> ax.plot(x, y)\n" + ">>> plt.show()") +ufunc_k1_loops[0] = loop_d_d__As_f_f +ufunc_k1_loops[1] = loop_d_d__As_d_d +ufunc_k1_types[0] = NPY_FLOAT +ufunc_k1_types[1] = NPY_FLOAT +ufunc_k1_types[2] = NPY_DOUBLE +ufunc_k1_types[3] = NPY_DOUBLE +ufunc_k1_ptr[2*0] = _func_cephes_k1 +ufunc_k1_ptr[2*0+1] = ("k1") +ufunc_k1_ptr[2*1] = _func_cephes_k1 +ufunc_k1_ptr[2*1+1] = ("k1") +ufunc_k1_data[0] = &ufunc_k1_ptr[2*0] +ufunc_k1_data[1] = &ufunc_k1_ptr[2*1] +k1 = np.PyUFunc_FromFuncAndData(ufunc_k1_loops, ufunc_k1_data, ufunc_k1_types, 2, 1, 1, 0, "k1", ufunc_k1_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_k1e_loops[2] +cdef void *ufunc_k1e_ptr[4] +cdef void *ufunc_k1e_data[2] +cdef char ufunc_k1e_types[4] +cdef char *ufunc_k1e_doc = ( + "k1e(x, out=None)\n" + "\n" + "Exponentially scaled modified Bessel function K of order 1\n" + "\n" + "Defined as::\n" + "\n" + " k1e(x) = exp(x) * k1(x)\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Argument (float)\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "K : scalar or ndarray\n" + " Value of the exponentially scaled modified Bessel function K of order\n" + " 1 at `x`.\n" + "\n" + "See Also\n" + "--------\n" + "kv: Modified Bessel function of the second kind of any order\n" + "k1: Modified Bessel function of the second kind of order 1\n" + "\n" + "Notes\n" + "-----\n" + "The range is partitioned into the two intervals [0, 2] and (2, infinity).\n" + "Chebyshev polynomial expansions are employed in each interval.\n" + "\n" + "This function is a wrapper for the Cephes [1]_ routine `k1e`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + "\n" + "Examples\n" + "--------\n" + "In the following example `k1` returns 0 whereas `k1e` still returns a\n" + "useful floating point number.\n" + "\n" + ">>> from scipy.special import k1, k1e\n" + ">>> k1(1000.), k1e(1000.)\n" + "(0., 0.03964813081296021)\n" + "\n" + "Calculate the function at several points by providing a NumPy array or\n" + "list for `x`:\n" + "\n" + ">>> import numpy as np\n" + ">>> k1e(np.array([0.5, 2., 3.]))\n" + "array([2.73100971, 1.03347685, 0.80656348])\n" + "\n" + "Plot the function from 0 to 10.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> fig, ax = plt.subplots()\n" + ">>> x = np.linspace(0., 10., 1000)\n" + ">>> y = k1e(x)\n" + ">>> ax.plot(x, y)\n" + ">>> plt.show()") +ufunc_k1e_loops[0] = loop_d_d__As_f_f +ufunc_k1e_loops[1] = loop_d_d__As_d_d +ufunc_k1e_types[0] = NPY_FLOAT +ufunc_k1e_types[1] = NPY_FLOAT +ufunc_k1e_types[2] = NPY_DOUBLE +ufunc_k1e_types[3] = NPY_DOUBLE +ufunc_k1e_ptr[2*0] = _func_cephes_k1e +ufunc_k1e_ptr[2*0+1] = ("k1e") +ufunc_k1e_ptr[2*1] = _func_cephes_k1e +ufunc_k1e_ptr[2*1+1] = ("k1e") +ufunc_k1e_data[0] = &ufunc_k1e_ptr[2*0] +ufunc_k1e_data[1] = &ufunc_k1e_ptr[2*1] +k1e = np.PyUFunc_FromFuncAndData(ufunc_k1e_loops, ufunc_k1e_data, ufunc_k1e_types, 2, 1, 1, 0, "k1e", ufunc_k1e_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_kl_div_loops[2] +cdef void *ufunc_kl_div_ptr[4] +cdef void *ufunc_kl_div_data[2] +cdef char ufunc_kl_div_types[6] +cdef char *ufunc_kl_div_doc = ( + "kl_div(x, y, out=None)\n" + "\n" + "Elementwise function for computing Kullback-Leibler divergence.\n" + "\n" + ".. math::\n" + "\n" + " \\mathrm{kl\\_div}(x, y) =\n" + " \\begin{cases}\n" + " x \\log(x / y) - x + y & x > 0, y > 0 \\\\\n" + " y & x = 0, y \\ge 0 \\\\\n" + " \\infty & \\text{otherwise}\n" + " \\end{cases}\n" + "\n" + "Parameters\n" + "----------\n" + "x, y : array_like\n" + " Real arguments\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the Kullback-Liebler divergence.\n" + "\n" + "See Also\n" + "--------\n" + "entr, rel_entr, scipy.stats.entropy\n" + "\n" + "Notes\n" + "-----\n" + ".. versionadded:: 0.15.0\n" + "\n" + "This function is non-negative and is jointly convex in `x` and `y`.\n" + "\n" + "The origin of this function is in convex programming; see [1]_ for\n" + "details. This is why the function contains the extra :math:`-x\n" + "+ y` terms over what might be expected from the Kullback-Leibler\n" + "divergence. For a version of the function without the extra terms,\n" + "see `rel_entr`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Boyd, Stephen and Lieven Vandenberghe. *Convex optimization*.\n" + " Cambridge University Press, 2004.\n" + " :doi:`https://doi.org/10.1017/CBO9780511804441`") +ufunc_kl_div_loops[0] = loop_d_dd__As_ff_f +ufunc_kl_div_loops[1] = loop_d_dd__As_dd_d +ufunc_kl_div_types[0] = NPY_FLOAT +ufunc_kl_div_types[1] = NPY_FLOAT +ufunc_kl_div_types[2] = NPY_FLOAT +ufunc_kl_div_types[3] = NPY_DOUBLE +ufunc_kl_div_types[4] = NPY_DOUBLE +ufunc_kl_div_types[5] = NPY_DOUBLE +ufunc_kl_div_ptr[2*0] = _func_kl_div +ufunc_kl_div_ptr[2*0+1] = ("kl_div") +ufunc_kl_div_ptr[2*1] = _func_kl_div +ufunc_kl_div_ptr[2*1+1] = ("kl_div") +ufunc_kl_div_data[0] = &ufunc_kl_div_ptr[2*0] +ufunc_kl_div_data[1] = &ufunc_kl_div_ptr[2*1] +kl_div = np.PyUFunc_FromFuncAndData(ufunc_kl_div_loops, ufunc_kl_div_data, ufunc_kl_div_types, 2, 2, 1, 0, "kl_div", ufunc_kl_div_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_kn_loops[3] +cdef void *ufunc_kn_ptr[6] +cdef void *ufunc_kn_data[3] +cdef char ufunc_kn_types[9] +cdef char *ufunc_kn_doc = ( + "kn(n, x, out=None)\n" + "\n" + "Modified Bessel function of the second kind of integer order `n`\n" + "\n" + "Returns the modified Bessel function of the second kind for integer order\n" + "`n` at real `z`.\n" + "\n" + "These are also sometimes called functions of the third kind, Basset\n" + "functions, or Macdonald functions.\n" + "\n" + "Parameters\n" + "----------\n" + "n : array_like of int\n" + " Order of Bessel functions (floats will truncate with a warning)\n" + "x : array_like of float\n" + " Argument at which to evaluate the Bessel functions\n" + "out : ndarray, optional\n" + " Optional output array for the function results.\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Value of the Modified Bessel function of the second kind,\n" + " :math:`K_n(x)`.\n" + "\n" + "See Also\n" + "--------\n" + "kv : Same function, but accepts real order and complex argument\n" + "kvp : Derivative of this function\n" + "\n" + "Notes\n" + "-----\n" + "Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the\n" + "algorithm used, see [2]_ and the references therein.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Donald E. Amos, \"AMOS, A Portable Package for Bessel Functions\n" + " of a Complex Argument and Nonnegative Order\",\n" + " http://netlib.org/amos/\n" + ".. [2] Donald E. Amos, \"Algorithm 644: A portable package for Bessel\n" + " functions of a complex argument and nonnegative order\", ACM\n" + " TOMS Vol. 12 Issue 3, Sept. 1986, p. 265\n" + "\n" + "Examples\n" + "--------\n" + "Plot the function of several orders for real input:\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import kn\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> x = np.linspace(0, 5, 1000)\n" + ">>> for N in range(6):\n" + "... plt.plot(x, kn(N, x), label='$K_{}(x)$'.format(N))\n" + ">>> plt.ylim(0, 10)\n" + ">>> plt.legend()\n" + ">>> plt.title(r'Modified Bessel function of the second kind $K_n(x)$')\n" + ">>> plt.show()\n" + "\n" + "Calculate for a single value at multiple orders:\n" + "\n" + ">>> kn([4, 5, 6], 1)\n" + "array([ 44.23241585, 360.9605896 , 3653.83831186])") +ufunc_kn_loops[0] = loop_d_pd__As_pd_d +ufunc_kn_loops[1] = loop_d_dd__As_ff_f +ufunc_kn_loops[2] = loop_d_dd__As_dd_d +ufunc_kn_types[0] = NPY_INTP +ufunc_kn_types[1] = NPY_DOUBLE +ufunc_kn_types[2] = NPY_DOUBLE +ufunc_kn_types[3] = NPY_FLOAT +ufunc_kn_types[4] = NPY_FLOAT +ufunc_kn_types[5] = NPY_FLOAT +ufunc_kn_types[6] = NPY_DOUBLE +ufunc_kn_types[7] = NPY_DOUBLE +ufunc_kn_types[8] = NPY_DOUBLE +ufunc_kn_ptr[2*0] = _func_special_cyl_bessel_k_int +ufunc_kn_ptr[2*0+1] = ("kn") +ufunc_kn_ptr[2*1] = _func_kn_unsafe +ufunc_kn_ptr[2*1+1] = ("kn") +ufunc_kn_ptr[2*2] = _func_kn_unsafe +ufunc_kn_ptr[2*2+1] = ("kn") +ufunc_kn_data[0] = &ufunc_kn_ptr[2*0] +ufunc_kn_data[1] = &ufunc_kn_ptr[2*1] +ufunc_kn_data[2] = &ufunc_kn_ptr[2*2] +kn = np.PyUFunc_FromFuncAndData(ufunc_kn_loops, ufunc_kn_data, ufunc_kn_types, 3, 2, 1, 0, "kn", ufunc_kn_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_kolmogi_loops[2] +cdef void *ufunc_kolmogi_ptr[4] +cdef void *ufunc_kolmogi_data[2] +cdef char ufunc_kolmogi_types[4] +cdef char *ufunc_kolmogi_doc = ( + "kolmogi(p, out=None)\n" + "\n" + "Inverse Survival Function of Kolmogorov distribution\n" + "\n" + "It is the inverse function to `kolmogorov`.\n" + "Returns y such that ``kolmogorov(y) == p``.\n" + "\n" + "Parameters\n" + "----------\n" + "p : float array_like\n" + " Probability\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " The value(s) of kolmogi(p)\n" + "\n" + "See Also\n" + "--------\n" + "kolmogorov : The Survival Function for the distribution\n" + "scipy.stats.kstwobign : Provides the functionality as a continuous distribution\n" + "smirnov, smirnovi : Functions for the one-sided distribution\n" + "\n" + "Notes\n" + "-----\n" + "`kolmogorov` is used by `stats.kstest` in the application of the\n" + "Kolmogorov-Smirnov Goodness of Fit test. For historical reasons this\n" + "function is exposed in `scpy.special`, but the recommended way to achieve\n" + "the most accurate CDF/SF/PDF/PPF/ISF computations is to use the\n" + "`stats.kstwobign` distribution.\n" + "\n" + "Examples\n" + "--------\n" + ">>> from scipy.special import kolmogi\n" + ">>> kolmogi([0, 0.1, 0.25, 0.5, 0.75, 0.9, 1.0])\n" + "array([ inf, 1.22384787, 1.01918472, 0.82757356, 0.67644769,\n" + " 0.57117327, 0. ])") +ufunc_kolmogi_loops[0] = loop_d_d__As_f_f +ufunc_kolmogi_loops[1] = loop_d_d__As_d_d +ufunc_kolmogi_types[0] = NPY_FLOAT +ufunc_kolmogi_types[1] = NPY_FLOAT +ufunc_kolmogi_types[2] = NPY_DOUBLE +ufunc_kolmogi_types[3] = NPY_DOUBLE +ufunc_kolmogi_ptr[2*0] = _func_cephes_kolmogi +ufunc_kolmogi_ptr[2*0+1] = ("kolmogi") +ufunc_kolmogi_ptr[2*1] = _func_cephes_kolmogi +ufunc_kolmogi_ptr[2*1+1] = ("kolmogi") +ufunc_kolmogi_data[0] = &ufunc_kolmogi_ptr[2*0] +ufunc_kolmogi_data[1] = &ufunc_kolmogi_ptr[2*1] +kolmogi = np.PyUFunc_FromFuncAndData(ufunc_kolmogi_loops, ufunc_kolmogi_data, ufunc_kolmogi_types, 2, 1, 1, 0, "kolmogi", ufunc_kolmogi_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_kolmogorov_loops[2] +cdef void *ufunc_kolmogorov_ptr[4] +cdef void *ufunc_kolmogorov_data[2] +cdef char ufunc_kolmogorov_types[4] +cdef char *ufunc_kolmogorov_doc = ( + "kolmogorov(y, out=None)\n" + "\n" + "Complementary cumulative distribution (Survival Function) function of\n" + "Kolmogorov distribution.\n" + "\n" + "Returns the complementary cumulative distribution function of\n" + "Kolmogorov's limiting distribution (``D_n*\\sqrt(n)`` as n goes to infinity)\n" + "of a two-sided test for equality between an empirical and a theoretical\n" + "distribution. It is equal to the (limit as n->infinity of the)\n" + "probability that ``sqrt(n) * max absolute deviation > y``.\n" + "\n" + "Parameters\n" + "----------\n" + "y : float array_like\n" + " Absolute deviation between the Empirical CDF (ECDF) and the target CDF,\n" + " multiplied by sqrt(n).\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " The value(s) of kolmogorov(y)\n" + "\n" + "See Also\n" + "--------\n" + "kolmogi : The Inverse Survival Function for the distribution\n" + "scipy.stats.kstwobign : Provides the functionality as a continuous distribution\n" + "smirnov, smirnovi : Functions for the one-sided distribution\n" + "\n" + "Notes\n" + "-----\n" + "`kolmogorov` is used by `stats.kstest` in the application of the\n" + "Kolmogorov-Smirnov Goodness of Fit test. For historical reasons this\n" + "function is exposed in `scpy.special`, but the recommended way to achieve\n" + "the most accurate CDF/SF/PDF/PPF/ISF computations is to use the\n" + "`stats.kstwobign` distribution.\n" + "\n" + "Examples\n" + "--------\n" + "Show the probability of a gap at least as big as 0, 0.5 and 1.0.\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import kolmogorov\n" + ">>> from scipy.stats import kstwobign\n" + ">>> kolmogorov([0, 0.5, 1.0])\n" + "array([ 1. , 0.96394524, 0.26999967])\n" + "\n" + "Compare a sample of size 1000 drawn from a Laplace(0, 1) distribution against\n" + "the target distribution, a Normal(0, 1) distribution.\n" + "\n" + ">>> from scipy.stats import norm, laplace\n" + ">>> rng = np.random.default_rng()\n" + ">>> n = 1000\n" + ">>> lap01 = laplace(0, 1)\n" + ">>> x = np.sort(lap01.rvs(n, random_state=rng))\n" + ">>> np.mean(x), np.std(x)\n" + "(-0.05841730131499543, 1.3968109101997568)\n" + "\n" + "Construct the Empirical CDF and the K-S statistic Dn.\n" + "\n" + ">>> target = norm(0,1) # Normal mean 0, stddev 1\n" + ">>> cdfs = target.cdf(x)\n" + ">>> ecdfs = np.arange(n+1, dtype=float)/n\n" + ">>> gaps = np.column_stack([cdfs - ecdfs[:n], ecdfs[1:] - cdfs])\n" + ">>> Dn = np.max(gaps)\n" + ">>> Kn = np.sqrt(n) * Dn\n" + ">>> print('Dn=%f, sqrt(n)*Dn=%f' % (Dn, Kn))\n" + "Dn=0.043363, sqrt(n)*Dn=1.371265\n" + ">>> print(chr(10).join(['For a sample of size n drawn from a N(0, 1) distribution:',\n" + "... ' the approximate Kolmogorov probability that sqrt(n)*Dn>=%f is %f' %\n" + "... (Kn, kolmogorov(Kn)),\n" + "... ' the approximate Kolmogorov probability that sqrt(n)*Dn<=%f is %f' %\n" + "... (Kn, kstwobign.cdf(Kn))]))\n" + "For a sample of size n drawn from a N(0, 1) distribution:\n" + " the approximate Kolmogorov probability that sqrt(n)*Dn>=1.371265 is 0.046533\n" + " the approximate Kolmogorov probability that sqrt(n)*Dn<=1.371265 is 0.953467\n" + "\n" + "Plot the Empirical CDF against the target N(0, 1) CDF.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> plt.step(np.concatenate([[-3], x]), ecdfs, where='post', label='Empirical CDF')\n" + ">>> x3 = np.linspace(-3, 3, 100)\n" + ">>> plt.plot(x3, target.cdf(x3), label='CDF for N(0, 1)')\n" + ">>> plt.ylim([0, 1]); plt.grid(True); plt.legend();\n" + ">>> # Add vertical lines marking Dn+ and Dn-\n" + ">>> iminus, iplus = np.argmax(gaps, axis=0)\n" + ">>> plt.vlines([x[iminus]], ecdfs[iminus], cdfs[iminus],\n" + "... color='r', linestyle='dashed', lw=4)\n" + ">>> plt.vlines([x[iplus]], cdfs[iplus], ecdfs[iplus+1],\n" + "... color='r', linestyle='dashed', lw=4)\n" + ">>> plt.show()") +ufunc_kolmogorov_loops[0] = loop_d_d__As_f_f +ufunc_kolmogorov_loops[1] = loop_d_d__As_d_d +ufunc_kolmogorov_types[0] = NPY_FLOAT +ufunc_kolmogorov_types[1] = NPY_FLOAT +ufunc_kolmogorov_types[2] = NPY_DOUBLE +ufunc_kolmogorov_types[3] = NPY_DOUBLE +ufunc_kolmogorov_ptr[2*0] = _func_cephes_kolmogorov +ufunc_kolmogorov_ptr[2*0+1] = ("kolmogorov") +ufunc_kolmogorov_ptr[2*1] = _func_cephes_kolmogorov +ufunc_kolmogorov_ptr[2*1+1] = ("kolmogorov") +ufunc_kolmogorov_data[0] = &ufunc_kolmogorov_ptr[2*0] +ufunc_kolmogorov_data[1] = &ufunc_kolmogorov_ptr[2*1] +kolmogorov = np.PyUFunc_FromFuncAndData(ufunc_kolmogorov_loops, ufunc_kolmogorov_data, ufunc_kolmogorov_types, 2, 1, 1, 0, "kolmogorov", ufunc_kolmogorov_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_log1p_loops[4] +cdef void *ufunc_log1p_ptr[8] +cdef void *ufunc_log1p_data[4] +cdef char ufunc_log1p_types[8] +cdef char *ufunc_log1p_doc = ( + "log1p(x, out=None)\n" + "\n" + "Calculates log(1 + x) for use when `x` is near zero.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real or complex valued input.\n" + "out : ndarray, optional\n" + " Optional output array for the function results.\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of ``log(1 + x)``.\n" + "\n" + "See Also\n" + "--------\n" + "expm1, cosm1\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import scipy.special as sc\n" + "\n" + "It is more accurate than using ``log(1 + x)`` directly for ``x``\n" + "near 0. Note that in the below example ``1 + 1e-17 == 1`` to\n" + "double precision.\n" + "\n" + ">>> sc.log1p(1e-17)\n" + "1e-17\n" + ">>> np.log(1 + 1e-17)\n" + "0.0") +ufunc_log1p_loops[0] = loop_d_d__As_f_f +ufunc_log1p_loops[1] = loop_d_d__As_d_d +ufunc_log1p_loops[2] = loop_D_D__As_F_F +ufunc_log1p_loops[3] = loop_D_D__As_D_D +ufunc_log1p_types[0] = NPY_FLOAT +ufunc_log1p_types[1] = NPY_FLOAT +ufunc_log1p_types[2] = NPY_DOUBLE +ufunc_log1p_types[3] = NPY_DOUBLE +ufunc_log1p_types[4] = NPY_CFLOAT +ufunc_log1p_types[5] = NPY_CFLOAT +ufunc_log1p_types[6] = NPY_CDOUBLE +ufunc_log1p_types[7] = NPY_CDOUBLE +ufunc_log1p_ptr[2*0] = _func_cephes_log1p +ufunc_log1p_ptr[2*0+1] = ("log1p") +ufunc_log1p_ptr[2*1] = _func_cephes_log1p +ufunc_log1p_ptr[2*1+1] = ("log1p") +ufunc_log1p_ptr[2*2] = _func_clog1p +ufunc_log1p_ptr[2*2+1] = ("log1p") +ufunc_log1p_ptr[2*3] = _func_clog1p +ufunc_log1p_ptr[2*3+1] = ("log1p") +ufunc_log1p_data[0] = &ufunc_log1p_ptr[2*0] +ufunc_log1p_data[1] = &ufunc_log1p_ptr[2*1] +ufunc_log1p_data[2] = &ufunc_log1p_ptr[2*2] +ufunc_log1p_data[3] = &ufunc_log1p_ptr[2*3] +log1p = np.PyUFunc_FromFuncAndData(ufunc_log1p_loops, ufunc_log1p_data, ufunc_log1p_types, 4, 1, 1, 0, "log1p", ufunc_log1p_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_log_ndtr_loops[4] +cdef void *ufunc_log_ndtr_ptr[8] +cdef void *ufunc_log_ndtr_data[4] +cdef char ufunc_log_ndtr_types[8] +cdef char *ufunc_log_ndtr_doc = ( + "log_ndtr(x, out=None)\n" + "\n" + "Logarithm of Gaussian cumulative distribution function.\n" + "\n" + "Returns the log of the area under the standard Gaussian probability\n" + "density function, integrated from minus infinity to `x`::\n" + "\n" + " log(1/sqrt(2*pi) * integral(exp(-t**2 / 2), t=-inf..x))\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like, real or complex\n" + " Argument\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " The value of the log of the normal CDF evaluated at `x`\n" + "\n" + "See Also\n" + "--------\n" + "erf\n" + "erfc\n" + "scipy.stats.norm\n" + "ndtr\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> from scipy.special import log_ndtr, ndtr\n" + "\n" + "The benefit of ``log_ndtr(x)`` over the naive implementation\n" + "``np.log(ndtr(x))`` is most evident with moderate to large positive\n" + "values of ``x``:\n" + "\n" + ">>> x = np.array([6, 7, 9, 12, 15, 25])\n" + ">>> log_ndtr(x)\n" + "array([-9.86587646e-010, -1.27981254e-012, -1.12858841e-019,\n" + " -1.77648211e-033, -3.67096620e-051, -3.05669671e-138])\n" + "\n" + "The results of the naive calculation for the moderate ``x`` values\n" + "have only 5 or 6 correct significant digits. For values of ``x``\n" + "greater than approximately 8.3, the naive expression returns 0:\n" + "\n" + ">>> np.log(ndtr(x))\n" + "array([-9.86587701e-10, -1.27986510e-12, 0.00000000e+00,\n" + " 0.00000000e+00, 0.00000000e+00, 0.00000000e+00])") +ufunc_log_ndtr_loops[0] = loop_d_d__As_f_f +ufunc_log_ndtr_loops[1] = loop_d_d__As_d_d +ufunc_log_ndtr_loops[2] = loop_D_D__As_F_F +ufunc_log_ndtr_loops[3] = loop_D_D__As_D_D +ufunc_log_ndtr_types[0] = NPY_FLOAT +ufunc_log_ndtr_types[1] = NPY_FLOAT +ufunc_log_ndtr_types[2] = NPY_DOUBLE +ufunc_log_ndtr_types[3] = NPY_DOUBLE +ufunc_log_ndtr_types[4] = NPY_CFLOAT +ufunc_log_ndtr_types[5] = NPY_CFLOAT +ufunc_log_ndtr_types[6] = NPY_CDOUBLE +ufunc_log_ndtr_types[7] = NPY_CDOUBLE +ufunc_log_ndtr_ptr[2*0] = scipy.special._ufuncs_cxx._export_faddeeva_log_ndtr +ufunc_log_ndtr_ptr[2*0+1] = ("log_ndtr") +ufunc_log_ndtr_ptr[2*1] = scipy.special._ufuncs_cxx._export_faddeeva_log_ndtr +ufunc_log_ndtr_ptr[2*1+1] = ("log_ndtr") +ufunc_log_ndtr_ptr[2*2] = scipy.special._ufuncs_cxx._export_faddeeva_log_ndtr_complex +ufunc_log_ndtr_ptr[2*2+1] = ("log_ndtr") +ufunc_log_ndtr_ptr[2*3] = scipy.special._ufuncs_cxx._export_faddeeva_log_ndtr_complex +ufunc_log_ndtr_ptr[2*3+1] = ("log_ndtr") +ufunc_log_ndtr_data[0] = &ufunc_log_ndtr_ptr[2*0] +ufunc_log_ndtr_data[1] = &ufunc_log_ndtr_ptr[2*1] +ufunc_log_ndtr_data[2] = &ufunc_log_ndtr_ptr[2*2] +ufunc_log_ndtr_data[3] = &ufunc_log_ndtr_ptr[2*3] +log_ndtr = np.PyUFunc_FromFuncAndData(ufunc_log_ndtr_loops, ufunc_log_ndtr_data, ufunc_log_ndtr_types, 4, 1, 1, 0, "log_ndtr", ufunc_log_ndtr_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_lpmv_loops[2] +cdef void *ufunc_lpmv_ptr[4] +cdef void *ufunc_lpmv_data[2] +cdef char ufunc_lpmv_types[8] +cdef char *ufunc_lpmv_doc = ( + "lpmv(m, v, x, out=None)\n" + "\n" + "Associated Legendre function of integer order and real degree.\n" + "\n" + "Defined as\n" + "\n" + ".. math::\n" + "\n" + " P_v^m = (-1)^m (1 - x^2)^{m/2} \\frac{d^m}{dx^m} P_v(x)\n" + "\n" + "where\n" + "\n" + ".. math::\n" + "\n" + " P_v = \\sum_{k = 0}^\\infty \\frac{(-v)_k (v + 1)_k}{(k!)^2}\n" + " \\left(\\frac{1 - x}{2}\\right)^k\n" + "\n" + "is the Legendre function of the first kind. Here :math:`(\\cdot)_k`\n" + "is the Pochhammer symbol; see `poch`.\n" + "\n" + "Parameters\n" + "----------\n" + "m : array_like\n" + " Order (int or float). If passed a float not equal to an\n" + " integer the function returns NaN.\n" + "v : array_like\n" + " Degree (float).\n" + "x : array_like\n" + " Argument (float). Must have ``|x| <= 1``.\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "pmv : scalar or ndarray\n" + " Value of the associated Legendre function.\n" + "\n" + "See Also\n" + "--------\n" + "lpmn : Compute the associated Legendre function for all orders\n" + " ``0, ..., m`` and degrees ``0, ..., n``.\n" + "clpmn : Compute the associated Legendre function at complex\n" + " arguments.\n" + "\n" + "Notes\n" + "-----\n" + "Note that this implementation includes the Condon-Shortley phase.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Zhang, Jin, \"Computation of Special Functions\", John Wiley\n" + " and Sons, Inc, 1996.") +ufunc_lpmv_loops[0] = loop_d_ddd__As_fff_f +ufunc_lpmv_loops[1] = loop_d_ddd__As_ddd_d +ufunc_lpmv_types[0] = NPY_FLOAT +ufunc_lpmv_types[1] = NPY_FLOAT +ufunc_lpmv_types[2] = NPY_FLOAT +ufunc_lpmv_types[3] = NPY_FLOAT +ufunc_lpmv_types[4] = NPY_DOUBLE +ufunc_lpmv_types[5] = NPY_DOUBLE +ufunc_lpmv_types[6] = NPY_DOUBLE +ufunc_lpmv_types[7] = NPY_DOUBLE +ufunc_lpmv_ptr[2*0] = _func_pmv_wrap +ufunc_lpmv_ptr[2*0+1] = ("lpmv") +ufunc_lpmv_ptr[2*1] = _func_pmv_wrap +ufunc_lpmv_ptr[2*1+1] = ("lpmv") +ufunc_lpmv_data[0] = &ufunc_lpmv_ptr[2*0] +ufunc_lpmv_data[1] = &ufunc_lpmv_ptr[2*1] +lpmv = np.PyUFunc_FromFuncAndData(ufunc_lpmv_loops, ufunc_lpmv_data, ufunc_lpmv_types, 2, 3, 1, 0, "lpmv", ufunc_lpmv_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_modstruve_loops[2] +cdef void *ufunc_modstruve_ptr[4] +cdef void *ufunc_modstruve_data[2] +cdef char ufunc_modstruve_types[6] +cdef char *ufunc_modstruve_doc = ( + "modstruve(v, x, out=None)\n" + "\n" + "Modified Struve function.\n" + "\n" + "Return the value of the modified Struve function of order `v` at `x`. The\n" + "modified Struve function is defined as,\n" + "\n" + ".. math::\n" + " L_v(x) = -\\imath \\exp(-\\pi\\imath v/2) H_v(\\imath x),\n" + "\n" + "where :math:`H_v` is the Struve function.\n" + "\n" + "Parameters\n" + "----------\n" + "v : array_like\n" + " Order of the modified Struve function (float).\n" + "x : array_like\n" + " Argument of the Struve function (float; must be positive unless `v` is\n" + " an integer).\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "L : scalar or ndarray\n" + " Value of the modified Struve function of order `v` at `x`.\n" + "\n" + "See Also\n" + "--------\n" + "struve\n" + "\n" + "Notes\n" + "-----\n" + "Three methods discussed in [1]_ are used to evaluate the function:\n" + "\n" + "- power series\n" + "- expansion in Bessel functions (if :math:`|x| < |v| + 20`)\n" + "- asymptotic large-x expansion (if :math:`x \\geq 0.7v + 12`)\n" + "\n" + "Rounding errors are estimated based on the largest terms in the sums, and\n" + "the result associated with the smallest error is returned.\n" + "\n" + "References\n" + "----------\n" + ".. [1] NIST Digital Library of Mathematical Functions\n" + " https://dlmf.nist.gov/11\n" + "\n" + "Examples\n" + "--------\n" + "Calculate the modified Struve function of order 1 at 2.\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import modstruve\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> modstruve(1, 2.)\n" + "1.102759787367716\n" + "\n" + "Calculate the modified Struve function at 2 for orders 1, 2 and 3 by\n" + "providing a list for the order parameter `v`.\n" + "\n" + ">>> modstruve([1, 2, 3], 2.)\n" + "array([1.10275979, 0.41026079, 0.11247294])\n" + "\n" + "Calculate the modified Struve function of order 1 for several points\n" + "by providing an array for `x`.\n" + "\n" + ">>> points = np.array([2., 5., 8.])\n" + ">>> modstruve(1, points)\n" + "array([ 1.10275979, 23.72821578, 399.24709139])\n" + "\n" + "Compute the modified Struve function for several orders at several\n" + "points by providing arrays for `v` and `z`. The arrays have to be\n" + "broadcastable to the correct shapes.\n" + "\n" + ">>> orders = np.array([[1], [2], [3]])\n" + ">>> points.shape, orders.shape\n" + "((3,), (3, 1))\n" + "\n" + ">>> modstruve(orders, points)\n" + "array([[1.10275979e+00, 2.37282158e+01, 3.99247091e+02],\n" + " [4.10260789e-01, 1.65535979e+01, 3.25973609e+02],\n" + " [1.12472937e-01, 9.42430454e+00, 2.33544042e+02]])\n" + "\n" + "Plot the modified Struve functions of order 0 to 3 from -5 to 5.\n" + "\n" + ">>> fig, ax = plt.subplots()\n" + ">>> x = np.linspace(-5., 5., 1000)\n" + ">>> for i in range(4):\n" + "... ax.plot(x, modstruve(i, x), label=f'$L_{i!r}$')\n" + ">>> ax.legend(ncol=2)\n" + ">>> ax.set_xlim(-5, 5)\n" + ">>> ax.set_title(r\"Modified Struve functions $L_{\\nu}$\")\n" + ">>> plt.show()") +ufunc_modstruve_loops[0] = loop_d_dd__As_ff_f +ufunc_modstruve_loops[1] = loop_d_dd__As_dd_d +ufunc_modstruve_types[0] = NPY_FLOAT +ufunc_modstruve_types[1] = NPY_FLOAT +ufunc_modstruve_types[2] = NPY_FLOAT +ufunc_modstruve_types[3] = NPY_DOUBLE +ufunc_modstruve_types[4] = NPY_DOUBLE +ufunc_modstruve_types[5] = NPY_DOUBLE +ufunc_modstruve_ptr[2*0] = _func_cephes_struve_l +ufunc_modstruve_ptr[2*0+1] = ("modstruve") +ufunc_modstruve_ptr[2*1] = _func_cephes_struve_l +ufunc_modstruve_ptr[2*1+1] = ("modstruve") +ufunc_modstruve_data[0] = &ufunc_modstruve_ptr[2*0] +ufunc_modstruve_data[1] = &ufunc_modstruve_ptr[2*1] +modstruve = np.PyUFunc_FromFuncAndData(ufunc_modstruve_loops, ufunc_modstruve_data, ufunc_modstruve_types, 2, 2, 1, 0, "modstruve", ufunc_modstruve_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_nbdtr_loops[3] +cdef void *ufunc_nbdtr_ptr[6] +cdef void *ufunc_nbdtr_data[3] +cdef char ufunc_nbdtr_types[12] +cdef char *ufunc_nbdtr_doc = ( + "nbdtr(k, n, p, out=None)\n" + "\n" + "Negative binomial cumulative distribution function.\n" + "\n" + "Returns the sum of the terms 0 through `k` of the negative binomial\n" + "distribution probability mass function,\n" + "\n" + ".. math::\n" + "\n" + " F = \\sum_{j=0}^k {{n + j - 1}\\choose{j}} p^n (1 - p)^j.\n" + "\n" + "In a sequence of Bernoulli trials with individual success probabilities\n" + "`p`, this is the probability that `k` or fewer failures precede the nth\n" + "success.\n" + "\n" + "Parameters\n" + "----------\n" + "k : array_like\n" + " The maximum number of allowed failures (nonnegative int).\n" + "n : array_like\n" + " The target number of successes (positive int).\n" + "p : array_like\n" + " Probability of success in a single event (float).\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "F : scalar or ndarray\n" + " The probability of `k` or fewer failures before `n` successes in a\n" + " sequence of events with individual success probability `p`.\n" + "\n" + "See Also\n" + "--------\n" + "nbdtrc : Negative binomial survival function\n" + "nbdtrik : Negative binomial quantile function\n" + "scipy.stats.nbinom : Negative binomial distribution\n" + "\n" + "Notes\n" + "-----\n" + "If floating point values are passed for `k` or `n`, they will be truncated\n" + "to integers.\n" + "\n" + "The terms are not summed directly; instead the regularized incomplete beta\n" + "function is employed, according to the formula,\n" + "\n" + ".. math::\n" + " \\mathrm{nbdtr}(k, n, p) = I_{p}(n, k + 1).\n" + "\n" + "Wrapper for the Cephes [1]_ routine `nbdtr`.\n" + "\n" + "The negative binomial distribution is also available as\n" + "`scipy.stats.nbinom`. Using `nbdtr` directly can improve performance\n" + "compared to the ``cdf`` method of `scipy.stats.nbinom` (see last example).\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + "\n" + "Examples\n" + "--------\n" + "Compute the function for ``k=10`` and ``n=5`` at ``p=0.5``.\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import nbdtr\n" + ">>> nbdtr(10, 5, 0.5)\n" + "0.940765380859375\n" + "\n" + "Compute the function for ``n=10`` and ``p=0.5`` at several points by\n" + "providing a NumPy array or list for `k`.\n" + "\n" + ">>> nbdtr([5, 10, 15], 10, 0.5)\n" + "array([0.15087891, 0.58809853, 0.88523853])\n" + "\n" + "Plot the function for four different parameter sets.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> k = np.arange(130)\n" + ">>> n_parameters = [20, 20, 20, 80]\n" + ">>> p_parameters = [0.2, 0.5, 0.8, 0.5]\n" + ">>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot']\n" + ">>> parameters_list = list(zip(p_parameters, n_parameters,\n" + "... linestyles))\n" + ">>> fig, ax = plt.subplots(figsize=(8, 8))\n" + ">>> for parameter_set in parameters_list:\n" + "... p, n, style = parameter_set\n" + "... nbdtr_vals = nbdtr(k, n, p)\n" + "... ax.plot(k, nbdtr_vals, label=rf\"$n={n},\\, p={p}$\",\n" + "... ls=style)\n" + ">>> ax.legend()\n" + ">>> ax.set_xlabel(\"$k$\")\n" + ">>> ax.set_title(\"Negative binomial cumulative distribution function\")\n" + ">>> plt.show()\n" + "\n" + "The negative binomial distribution is also available as\n" + "`scipy.stats.nbinom`. Using `nbdtr` directly can be much faster than\n" + "calling the ``cdf`` method of `scipy.stats.nbinom`, especially for small\n" + "arrays or individual values. To get the same results one must use the\n" + "following parametrization: ``nbinom(n, p).cdf(k)=nbdtr(k, n, p)``.\n" + "\n" + ">>> from scipy.stats import nbinom\n" + ">>> k, n, p = 5, 3, 0.5\n" + ">>> nbdtr_res = nbdtr(k, n, p) # this will often be faster than below\n" + ">>> stats_res = nbinom(n, p).cdf(k)\n" + ">>> stats_res, nbdtr_res # test that results are equal\n" + "(0.85546875, 0.85546875)\n" + "\n" + "`nbdtr` can evaluate different parameter sets by providing arrays with\n" + "shapes compatible for broadcasting for `k`, `n` and `p`. Here we compute\n" + "the function for three different `k` at four locations `p`, resulting in\n" + "a 3x4 array.\n" + "\n" + ">>> k = np.array([[5], [10], [15]])\n" + ">>> p = np.array([0.3, 0.5, 0.7, 0.9])\n" + ">>> k.shape, p.shape\n" + "((3, 1), (4,))\n" + "\n" + ">>> nbdtr(k, 5, p)\n" + "array([[0.15026833, 0.62304687, 0.95265101, 0.9998531 ],\n" + " [0.48450894, 0.94076538, 0.99932777, 0.99999999],\n" + " [0.76249222, 0.99409103, 0.99999445, 1. ]])") +ufunc_nbdtr_loops[0] = loop_d_ppd__As_ppd_d +ufunc_nbdtr_loops[1] = loop_d_ddd__As_fff_f +ufunc_nbdtr_loops[2] = loop_d_ddd__As_ddd_d +ufunc_nbdtr_types[0] = NPY_INTP +ufunc_nbdtr_types[1] = NPY_INTP +ufunc_nbdtr_types[2] = NPY_DOUBLE +ufunc_nbdtr_types[3] = NPY_DOUBLE +ufunc_nbdtr_types[4] = NPY_FLOAT +ufunc_nbdtr_types[5] = NPY_FLOAT +ufunc_nbdtr_types[6] = NPY_FLOAT +ufunc_nbdtr_types[7] = NPY_FLOAT +ufunc_nbdtr_types[8] = NPY_DOUBLE +ufunc_nbdtr_types[9] = NPY_DOUBLE +ufunc_nbdtr_types[10] = NPY_DOUBLE +ufunc_nbdtr_types[11] = NPY_DOUBLE +ufunc_nbdtr_ptr[2*0] = _func_cephes_nbdtr_wrap +ufunc_nbdtr_ptr[2*0+1] = ("nbdtr") +ufunc_nbdtr_ptr[2*1] = _func_nbdtr_unsafe +ufunc_nbdtr_ptr[2*1+1] = ("nbdtr") +ufunc_nbdtr_ptr[2*2] = _func_nbdtr_unsafe +ufunc_nbdtr_ptr[2*2+1] = ("nbdtr") +ufunc_nbdtr_data[0] = &ufunc_nbdtr_ptr[2*0] +ufunc_nbdtr_data[1] = &ufunc_nbdtr_ptr[2*1] +ufunc_nbdtr_data[2] = &ufunc_nbdtr_ptr[2*2] +nbdtr = np.PyUFunc_FromFuncAndData(ufunc_nbdtr_loops, ufunc_nbdtr_data, ufunc_nbdtr_types, 3, 3, 1, 0, "nbdtr", ufunc_nbdtr_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_nbdtrc_loops[3] +cdef void *ufunc_nbdtrc_ptr[6] +cdef void *ufunc_nbdtrc_data[3] +cdef char ufunc_nbdtrc_types[12] +cdef char *ufunc_nbdtrc_doc = ( + "nbdtrc(k, n, p, out=None)\n" + "\n" + "Negative binomial survival function.\n" + "\n" + "Returns the sum of the terms `k + 1` to infinity of the negative binomial\n" + "distribution probability mass function,\n" + "\n" + ".. math::\n" + "\n" + " F = \\sum_{j=k + 1}^\\infty {{n + j - 1}\\choose{j}} p^n (1 - p)^j.\n" + "\n" + "In a sequence of Bernoulli trials with individual success probabilities\n" + "`p`, this is the probability that more than `k` failures precede the nth\n" + "success.\n" + "\n" + "Parameters\n" + "----------\n" + "k : array_like\n" + " The maximum number of allowed failures (nonnegative int).\n" + "n : array_like\n" + " The target number of successes (positive int).\n" + "p : array_like\n" + " Probability of success in a single event (float).\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "F : scalar or ndarray\n" + " The probability of `k + 1` or more failures before `n` successes in a\n" + " sequence of events with individual success probability `p`.\n" + "\n" + "See Also\n" + "--------\n" + "nbdtr : Negative binomial cumulative distribution function\n" + "nbdtrik : Negative binomial percentile function\n" + "scipy.stats.nbinom : Negative binomial distribution\n" + "\n" + "Notes\n" + "-----\n" + "If floating point values are passed for `k` or `n`, they will be truncated\n" + "to integers.\n" + "\n" + "The terms are not summed directly; instead the regularized incomplete beta\n" + "function is employed, according to the formula,\n" + "\n" + ".. math::\n" + " \\mathrm{nbdtrc}(k, n, p) = I_{1 - p}(k + 1, n).\n" + "\n" + "Wrapper for the Cephes [1]_ routine `nbdtrc`.\n" + "\n" + "The negative binomial distribution is also available as\n" + "`scipy.stats.nbinom`. Using `nbdtrc` directly can improve performance\n" + "compared to the ``sf`` method of `scipy.stats.nbinom` (see last example).\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + "\n" + "Examples\n" + "--------\n" + "Compute the function for ``k=10`` and ``n=5`` at ``p=0.5``.\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import nbdtrc\n" + ">>> nbdtrc(10, 5, 0.5)\n" + "0.059234619140624986\n" + "\n" + "Compute the function for ``n=10`` and ``p=0.5`` at several points by\n" + "providing a NumPy array or list for `k`.\n" + "\n" + ">>> nbdtrc([5, 10, 15], 10, 0.5)\n" + "array([0.84912109, 0.41190147, 0.11476147])\n" + "\n" + "Plot the function for four different parameter sets.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> k = np.arange(130)\n" + ">>> n_parameters = [20, 20, 20, 80]\n" + ">>> p_parameters = [0.2, 0.5, 0.8, 0.5]\n" + ">>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot']\n" + ">>> parameters_list = list(zip(p_parameters, n_parameters,\n" + "... linestyles))\n" + ">>> fig, ax = plt.subplots(figsize=(8, 8))\n" + ">>> for parameter_set in parameters_list:\n" + "... p, n, style = parameter_set\n" + "... nbdtrc_vals = nbdtrc(k, n, p)\n" + "... ax.plot(k, nbdtrc_vals, label=rf\"$n={n},\\, p={p}$\",\n" + "... ls=style)\n" + ">>> ax.legend()\n" + ">>> ax.set_xlabel(\"$k$\")\n" + ">>> ax.set_title(\"Negative binomial distribution survival function\")\n" + ">>> plt.show()\n" + "\n" + "The negative binomial distribution is also available as\n" + "`scipy.stats.nbinom`. Using `nbdtrc` directly can be much faster than\n" + "calling the ``sf`` method of `scipy.stats.nbinom`, especially for small\n" + "arrays or individual values. To get the same results one must use the\n" + "following parametrization: ``nbinom(n, p).sf(k)=nbdtrc(k, n, p)``.\n" + "\n" + ">>> from scipy.stats import nbinom\n" + ">>> k, n, p = 3, 5, 0.5\n" + ">>> nbdtr_res = nbdtrc(k, n, p) # this will often be faster than below\n" + ">>> stats_res = nbinom(n, p).sf(k)\n" + ">>> stats_res, nbdtr_res # test that results are equal\n" + "(0.6367187499999999, 0.6367187499999999)\n" + "\n" + "`nbdtrc` can evaluate different parameter sets by providing arrays with\n" + "shapes compatible for broadcasting for `k`, `n` and `p`. Here we compute\n" + "the function for three different `k` at four locations `p`, resulting in\n" + "a 3x4 array.\n" + "\n" + ">>> k = np.array([[5], [10], [15]])\n" + ">>> p = np.array([0.3, 0.5, 0.7, 0.9])\n" + ">>> k.shape, p.shape\n" + "((3, 1), (4,))\n" + "\n" + ">>> nbdtrc(k, 5, p)\n" + "array([[8.49731667e-01, 3.76953125e-01, 4.73489874e-02, 1.46902600e-04],\n" + " [5.15491059e-01, 5.92346191e-02, 6.72234070e-04, 9.29610100e-09],\n" + " [2.37507779e-01, 5.90896606e-03, 5.55025308e-06, 3.26346760e-13]])") +ufunc_nbdtrc_loops[0] = loop_d_ppd__As_ppd_d +ufunc_nbdtrc_loops[1] = loop_d_ddd__As_fff_f +ufunc_nbdtrc_loops[2] = loop_d_ddd__As_ddd_d +ufunc_nbdtrc_types[0] = NPY_INTP +ufunc_nbdtrc_types[1] = NPY_INTP +ufunc_nbdtrc_types[2] = NPY_DOUBLE +ufunc_nbdtrc_types[3] = NPY_DOUBLE +ufunc_nbdtrc_types[4] = NPY_FLOAT +ufunc_nbdtrc_types[5] = NPY_FLOAT +ufunc_nbdtrc_types[6] = NPY_FLOAT +ufunc_nbdtrc_types[7] = NPY_FLOAT +ufunc_nbdtrc_types[8] = NPY_DOUBLE +ufunc_nbdtrc_types[9] = NPY_DOUBLE +ufunc_nbdtrc_types[10] = NPY_DOUBLE +ufunc_nbdtrc_types[11] = NPY_DOUBLE +ufunc_nbdtrc_ptr[2*0] = _func_cephes_nbdtrc_wrap +ufunc_nbdtrc_ptr[2*0+1] = ("nbdtrc") +ufunc_nbdtrc_ptr[2*1] = _func_nbdtrc_unsafe +ufunc_nbdtrc_ptr[2*1+1] = ("nbdtrc") +ufunc_nbdtrc_ptr[2*2] = _func_nbdtrc_unsafe +ufunc_nbdtrc_ptr[2*2+1] = ("nbdtrc") +ufunc_nbdtrc_data[0] = &ufunc_nbdtrc_ptr[2*0] +ufunc_nbdtrc_data[1] = &ufunc_nbdtrc_ptr[2*1] +ufunc_nbdtrc_data[2] = &ufunc_nbdtrc_ptr[2*2] +nbdtrc = np.PyUFunc_FromFuncAndData(ufunc_nbdtrc_loops, ufunc_nbdtrc_data, ufunc_nbdtrc_types, 3, 3, 1, 0, "nbdtrc", ufunc_nbdtrc_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_nbdtri_loops[3] +cdef void *ufunc_nbdtri_ptr[6] +cdef void *ufunc_nbdtri_data[3] +cdef char ufunc_nbdtri_types[12] +cdef char *ufunc_nbdtri_doc = ( + "nbdtri(k, n, y, out=None)\n" + "\n" + "Returns the inverse with respect to the parameter `p` of\n" + "`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution\n" + "function.\n" + "\n" + "Parameters\n" + "----------\n" + "k : array_like\n" + " The maximum number of allowed failures (nonnegative int).\n" + "n : array_like\n" + " The target number of successes (positive int).\n" + "y : array_like\n" + " The probability of `k` or fewer failures before `n` successes (float).\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "p : scalar or ndarray\n" + " Probability of success in a single event (float) such that\n" + " `nbdtr(k, n, p) = y`.\n" + "\n" + "See Also\n" + "--------\n" + "nbdtr : Cumulative distribution function of the negative binomial.\n" + "nbdtrc : Negative binomial survival function.\n" + "scipy.stats.nbinom : negative binomial distribution.\n" + "nbdtrik : Inverse with respect to `k` of `nbdtr(k, n, p)`.\n" + "nbdtrin : Inverse with respect to `n` of `nbdtr(k, n, p)`.\n" + "scipy.stats.nbinom : Negative binomial distribution\n" + "\n" + "Notes\n" + "-----\n" + "Wrapper for the Cephes [1]_ routine `nbdtri`.\n" + "\n" + "The negative binomial distribution is also available as\n" + "`scipy.stats.nbinom`. Using `nbdtri` directly can improve performance\n" + "compared to the ``ppf`` method of `scipy.stats.nbinom`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + "\n" + "Examples\n" + "--------\n" + "`nbdtri` is the inverse of `nbdtr` with respect to `p`.\n" + "Up to floating point errors the following holds:\n" + "``nbdtri(k, n, nbdtr(k, n, p))=p``.\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import nbdtri, nbdtr\n" + ">>> k, n, y = 5, 10, 0.2\n" + ">>> cdf_val = nbdtr(k, n, y)\n" + ">>> nbdtri(k, n, cdf_val)\n" + "0.20000000000000004\n" + "\n" + "Compute the function for ``k=10`` and ``n=5`` at several points by\n" + "providing a NumPy array or list for `y`.\n" + "\n" + ">>> y = np.array([0.1, 0.4, 0.8])\n" + ">>> nbdtri(3, 5, y)\n" + "array([0.34462319, 0.51653095, 0.69677416])\n" + "\n" + "Plot the function for three different parameter sets.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> n_parameters = [5, 20, 30, 30]\n" + ">>> k_parameters = [20, 20, 60, 80]\n" + ">>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot']\n" + ">>> parameters_list = list(zip(n_parameters, k_parameters, linestyles))\n" + ">>> cdf_vals = np.linspace(0, 1, 1000)\n" + ">>> fig, ax = plt.subplots(figsize=(8, 8))\n" + ">>> for parameter_set in parameters_list:\n" + "... n, k, style = parameter_set\n" + "... nbdtri_vals = nbdtri(k, n, cdf_vals)\n" + "... ax.plot(cdf_vals, nbdtri_vals, label=rf\"$k={k},\\ n={n}$\",\n" + "... ls=style)\n" + ">>> ax.legend()\n" + ">>> ax.set_ylabel(\"$p$\")\n" + ">>> ax.set_xlabel(\"$CDF$\")\n" + ">>> title = \"nbdtri: inverse of negative binomial CDF with respect to $p$\"\n" + ">>> ax.set_title(title)\n" + ">>> plt.show()\n" + "\n" + "`nbdtri` can evaluate different parameter sets by providing arrays with\n" + "shapes compatible for broadcasting for `k`, `n` and `p`. Here we compute\n" + "the function for three different `k` at four locations `p`, resulting in\n" + "a 3x4 array.\n" + "\n" + ">>> k = np.array([[5], [10], [15]])\n" + ">>> y = np.array([0.3, 0.5, 0.7, 0.9])\n" + ">>> k.shape, y.shape\n" + "((3, 1), (4,))\n" + "\n" + ">>> nbdtri(k, 5, y)\n" + "array([[0.37258157, 0.45169416, 0.53249956, 0.64578407],\n" + " [0.24588501, 0.30451981, 0.36778453, 0.46397088],\n" + " [0.18362101, 0.22966758, 0.28054743, 0.36066188]])") +ufunc_nbdtri_loops[0] = loop_d_ppd__As_ppd_d +ufunc_nbdtri_loops[1] = loop_d_ddd__As_fff_f +ufunc_nbdtri_loops[2] = loop_d_ddd__As_ddd_d +ufunc_nbdtri_types[0] = NPY_INTP +ufunc_nbdtri_types[1] = NPY_INTP +ufunc_nbdtri_types[2] = NPY_DOUBLE +ufunc_nbdtri_types[3] = NPY_DOUBLE +ufunc_nbdtri_types[4] = NPY_FLOAT +ufunc_nbdtri_types[5] = NPY_FLOAT +ufunc_nbdtri_types[6] = NPY_FLOAT +ufunc_nbdtri_types[7] = NPY_FLOAT +ufunc_nbdtri_types[8] = NPY_DOUBLE +ufunc_nbdtri_types[9] = NPY_DOUBLE +ufunc_nbdtri_types[10] = NPY_DOUBLE +ufunc_nbdtri_types[11] = NPY_DOUBLE +ufunc_nbdtri_ptr[2*0] = _func_cephes_nbdtri_wrap +ufunc_nbdtri_ptr[2*0+1] = ("nbdtri") +ufunc_nbdtri_ptr[2*1] = _func_nbdtri_unsafe +ufunc_nbdtri_ptr[2*1+1] = ("nbdtri") +ufunc_nbdtri_ptr[2*2] = _func_nbdtri_unsafe +ufunc_nbdtri_ptr[2*2+1] = ("nbdtri") +ufunc_nbdtri_data[0] = &ufunc_nbdtri_ptr[2*0] +ufunc_nbdtri_data[1] = &ufunc_nbdtri_ptr[2*1] +ufunc_nbdtri_data[2] = &ufunc_nbdtri_ptr[2*2] +nbdtri = np.PyUFunc_FromFuncAndData(ufunc_nbdtri_loops, ufunc_nbdtri_data, ufunc_nbdtri_types, 3, 3, 1, 0, "nbdtri", ufunc_nbdtri_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_nbdtrik_loops[2] +cdef void *ufunc_nbdtrik_ptr[4] +cdef void *ufunc_nbdtrik_data[2] +cdef char ufunc_nbdtrik_types[8] +cdef char *ufunc_nbdtrik_doc = ( + "nbdtrik(y, n, p, out=None)\n" + "\n" + "Negative binomial percentile function.\n" + "\n" + "Returns the inverse with respect to the parameter `k` of\n" + "`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution\n" + "function.\n" + "\n" + "Parameters\n" + "----------\n" + "y : array_like\n" + " The probability of `k` or fewer failures before `n` successes (float).\n" + "n : array_like\n" + " The target number of successes (positive int).\n" + "p : array_like\n" + " Probability of success in a single event (float).\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "k : scalar or ndarray\n" + " The maximum number of allowed failures such that `nbdtr(k, n, p) = y`.\n" + "\n" + "See Also\n" + "--------\n" + "nbdtr : Cumulative distribution function of the negative binomial.\n" + "nbdtrc : Survival function of the negative binomial.\n" + "nbdtri : Inverse with respect to `p` of `nbdtr(k, n, p)`.\n" + "nbdtrin : Inverse with respect to `n` of `nbdtr(k, n, p)`.\n" + "scipy.stats.nbinom : Negative binomial distribution\n" + "\n" + "Notes\n" + "-----\n" + "Wrapper for the CDFLIB [1]_ Fortran routine `cdfnbn`.\n" + "\n" + "Formula 26.5.26 of [2]_,\n" + "\n" + ".. math::\n" + " \\sum_{j=k + 1}^\\infty {{n + j - 1}\n" + " \\choose{j}} p^n (1 - p)^j = I_{1 - p}(k + 1, n),\n" + "\n" + "is used to reduce calculation of the cumulative distribution function to\n" + "that of a regularized incomplete beta :math:`I`.\n" + "\n" + "Computation of `k` involves a search for a value that produces the desired\n" + "value of `y`. The search relies on the monotonicity of `y` with `k`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Barry Brown, James Lovato, and Kathy Russell,\n" + " CDFLIB: Library of Fortran Routines for Cumulative Distribution\n" + " Functions, Inverses, and Other Parameters.\n" + ".. [2] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.\n" + "\n" + "Examples\n" + "--------\n" + "Compute the negative binomial cumulative distribution function for an\n" + "exemplary parameter set.\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import nbdtr, nbdtrik\n" + ">>> k, n, p = 5, 2, 0.5\n" + ">>> cdf_value = nbdtr(k, n, p)\n" + ">>> cdf_value\n" + "0.9375\n" + "\n" + "Verify that `nbdtrik` recovers the original value for `k`.\n" + "\n" + ">>> nbdtrik(cdf_value, n, p)\n" + "5.0\n" + "\n" + "Plot the function for different parameter sets.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> p_parameters = [0.2, 0.5, 0.7, 0.5]\n" + ">>> n_parameters = [30, 30, 30, 80]\n" + ">>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot']\n" + ">>> parameters_list = list(zip(p_parameters, n_parameters, linestyles))\n" + ">>> cdf_vals = np.linspace(0, 1, 1000)\n" + ">>> fig, ax = plt.subplots(figsize=(8, 8))\n" + ">>> for parameter_set in parameters_list:\n" + "... p, n, style = parameter_set\n" + "... nbdtrik_vals = nbdtrik(cdf_vals, n, p)\n" + "... ax.plot(cdf_vals, nbdtrik_vals, label=rf\"$n={n},\\ p={p}$\",\n" + "... ls=style)\n" + ">>> ax.legend()\n" + ">>> ax.set_ylabel(\"$k$\")\n" + ">>> ax.set_xlabel(\"$CDF$\")\n" + ">>> ax.set_title(\"Negative binomial percentile function\")\n" + ">>> plt.show()\n" + "\n" + "The negative binomial distribution is also available as\n" + "`scipy.stats.nbinom`. The percentile function method ``ppf``\n" + "returns the result of `nbdtrik` rounded up to integers:\n" + "\n" + ">>> from scipy.stats import nbinom\n" + ">>> q, n, p = 0.6, 5, 0.5\n" + ">>> nbinom.ppf(q, n, p), nbdtrik(q, n, p)\n" + "(5.0, 4.800428460273882)") +ufunc_nbdtrik_loops[0] = loop_d_ddd__As_fff_f +ufunc_nbdtrik_loops[1] = loop_d_ddd__As_ddd_d +ufunc_nbdtrik_types[0] = NPY_FLOAT +ufunc_nbdtrik_types[1] = NPY_FLOAT +ufunc_nbdtrik_types[2] = NPY_FLOAT +ufunc_nbdtrik_types[3] = NPY_FLOAT +ufunc_nbdtrik_types[4] = NPY_DOUBLE +ufunc_nbdtrik_types[5] = NPY_DOUBLE +ufunc_nbdtrik_types[6] = NPY_DOUBLE +ufunc_nbdtrik_types[7] = NPY_DOUBLE +ufunc_nbdtrik_ptr[2*0] = _func_nbdtrik +ufunc_nbdtrik_ptr[2*0+1] = ("nbdtrik") +ufunc_nbdtrik_ptr[2*1] = _func_nbdtrik +ufunc_nbdtrik_ptr[2*1+1] = ("nbdtrik") +ufunc_nbdtrik_data[0] = &ufunc_nbdtrik_ptr[2*0] +ufunc_nbdtrik_data[1] = &ufunc_nbdtrik_ptr[2*1] +nbdtrik = np.PyUFunc_FromFuncAndData(ufunc_nbdtrik_loops, ufunc_nbdtrik_data, ufunc_nbdtrik_types, 2, 3, 1, 0, "nbdtrik", ufunc_nbdtrik_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_nbdtrin_loops[2] +cdef void *ufunc_nbdtrin_ptr[4] +cdef void *ufunc_nbdtrin_data[2] +cdef char ufunc_nbdtrin_types[8] +cdef char *ufunc_nbdtrin_doc = ( + "nbdtrin(k, y, p, out=None)\n" + "\n" + "Inverse of `nbdtr` vs `n`.\n" + "\n" + "Returns the inverse with respect to the parameter `n` of\n" + "`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution\n" + "function.\n" + "\n" + "Parameters\n" + "----------\n" + "k : array_like\n" + " The maximum number of allowed failures (nonnegative int).\n" + "y : array_like\n" + " The probability of `k` or fewer failures before `n` successes (float).\n" + "p : array_like\n" + " Probability of success in a single event (float).\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "n : scalar or ndarray\n" + " The number of successes `n` such that `nbdtr(k, n, p) = y`.\n" + "\n" + "See Also\n" + "--------\n" + "nbdtr : Cumulative distribution function of the negative binomial.\n" + "nbdtri : Inverse with respect to `p` of `nbdtr(k, n, p)`.\n" + "nbdtrik : Inverse with respect to `k` of `nbdtr(k, n, p)`.\n" + "\n" + "Notes\n" + "-----\n" + "Wrapper for the CDFLIB [1]_ Fortran routine `cdfnbn`.\n" + "\n" + "Formula 26.5.26 of [2]_,\n" + "\n" + ".. math::\n" + " \\sum_{j=k + 1}^\\infty {{n + j - 1}\n" + " \\choose{j}} p^n (1 - p)^j = I_{1 - p}(k + 1, n),\n" + "\n" + "is used to reduce calculation of the cumulative distribution function to\n" + "that of a regularized incomplete beta :math:`I`.\n" + "\n" + "Computation of `n` involves a search for a value that produces the desired\n" + "value of `y`. The search relies on the monotonicity of `y` with `n`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Barry Brown, James Lovato, and Kathy Russell,\n" + " CDFLIB: Library of Fortran Routines for Cumulative Distribution\n" + " Functions, Inverses, and Other Parameters.\n" + ".. [2] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.\n" + "\n" + "Examples\n" + "--------\n" + "Compute the negative binomial cumulative distribution function for an\n" + "exemplary parameter set.\n" + "\n" + ">>> from scipy.special import nbdtr, nbdtrin\n" + ">>> k, n, p = 5, 2, 0.5\n" + ">>> cdf_value = nbdtr(k, n, p)\n" + ">>> cdf_value\n" + "0.9375\n" + "\n" + "Verify that `nbdtrin` recovers the original value for `n` up to floating\n" + "point accuracy.\n" + "\n" + ">>> nbdtrin(k, cdf_value, p)\n" + "1.999999999998137") +ufunc_nbdtrin_loops[0] = loop_d_ddd__As_fff_f +ufunc_nbdtrin_loops[1] = loop_d_ddd__As_ddd_d +ufunc_nbdtrin_types[0] = NPY_FLOAT +ufunc_nbdtrin_types[1] = NPY_FLOAT +ufunc_nbdtrin_types[2] = NPY_FLOAT +ufunc_nbdtrin_types[3] = NPY_FLOAT +ufunc_nbdtrin_types[4] = NPY_DOUBLE +ufunc_nbdtrin_types[5] = NPY_DOUBLE +ufunc_nbdtrin_types[6] = NPY_DOUBLE +ufunc_nbdtrin_types[7] = NPY_DOUBLE +ufunc_nbdtrin_ptr[2*0] = _func_nbdtrin +ufunc_nbdtrin_ptr[2*0+1] = ("nbdtrin") +ufunc_nbdtrin_ptr[2*1] = _func_nbdtrin +ufunc_nbdtrin_ptr[2*1+1] = ("nbdtrin") +ufunc_nbdtrin_data[0] = &ufunc_nbdtrin_ptr[2*0] +ufunc_nbdtrin_data[1] = &ufunc_nbdtrin_ptr[2*1] +nbdtrin = np.PyUFunc_FromFuncAndData(ufunc_nbdtrin_loops, ufunc_nbdtrin_data, ufunc_nbdtrin_types, 2, 3, 1, 0, "nbdtrin", ufunc_nbdtrin_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_ncfdtr_loops[2] +cdef void *ufunc_ncfdtr_ptr[4] +cdef void *ufunc_ncfdtr_data[2] +cdef char ufunc_ncfdtr_types[10] +cdef char *ufunc_ncfdtr_doc = ( + "ncfdtr(dfn, dfd, nc, f, out=None)\n" + "\n" + "Cumulative distribution function of the non-central F distribution.\n" + "\n" + "The non-central F describes the distribution of,\n" + "\n" + ".. math::\n" + " Z = \\frac{X/d_n}{Y/d_d}\n" + "\n" + "where :math:`X` and :math:`Y` are independently distributed, with\n" + ":math:`X` distributed non-central :math:`\\chi^2` with noncentrality\n" + "parameter `nc` and :math:`d_n` degrees of freedom, and :math:`Y`\n" + "distributed :math:`\\chi^2` with :math:`d_d` degrees of freedom.\n" + "\n" + "Parameters\n" + "----------\n" + "dfn : array_like\n" + " Degrees of freedom of the numerator sum of squares. Range (0, inf).\n" + "dfd : array_like\n" + " Degrees of freedom of the denominator sum of squares. Range (0, inf).\n" + "nc : array_like\n" + " Noncentrality parameter. Should be in range (0, 1e4).\n" + "f : array_like\n" + " Quantiles, i.e. the upper limit of integration.\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "cdf : scalar or ndarray\n" + " The calculated CDF. If all inputs are scalar, the return will be a\n" + " float. Otherwise it will be an array.\n" + "\n" + "See Also\n" + "--------\n" + "ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`.\n" + "ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`.\n" + "ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`.\n" + "ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`.\n" + "\n" + "Notes\n" + "-----\n" + "Wrapper for the CDFLIB [1]_ Fortran routine `cdffnc`.\n" + "\n" + "The cumulative distribution function is computed using Formula 26.6.20 of\n" + "[2]_:\n" + "\n" + ".. math::\n" + " F(d_n, d_d, n_c, f) = \\sum_{j=0}^\\infty e^{-n_c/2}\n" + " \\frac{(n_c/2)^j}{j!} I_{x}(\\frac{d_n}{2} + j, \\frac{d_d}{2}),\n" + "\n" + "where :math:`I` is the regularized incomplete beta function, and\n" + ":math:`x = f d_n/(f d_n + d_d)`.\n" + "\n" + "The computation time required for this routine is proportional to the\n" + "noncentrality parameter `nc`. Very large values of this parameter can\n" + "consume immense computer resources. This is why the search range is\n" + "bounded by 10,000.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Barry Brown, James Lovato, and Kathy Russell,\n" + " CDFLIB: Library of Fortran Routines for Cumulative Distribution\n" + " Functions, Inverses, and Other Parameters.\n" + ".. [2] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> from scipy import special\n" + ">>> from scipy import stats\n" + ">>> import matplotlib.pyplot as plt\n" + "\n" + "Plot the CDF of the non-central F distribution, for nc=0. Compare with the\n" + "F-distribution from scipy.stats:\n" + "\n" + ">>> x = np.linspace(-1, 8, num=500)\n" + ">>> dfn = 3\n" + ">>> dfd = 2\n" + ">>> ncf_stats = stats.f.cdf(x, dfn, dfd)\n" + ">>> ncf_special = special.ncfdtr(dfn, dfd, 0, x)\n" + "\n" + ">>> fig = plt.figure()\n" + ">>> ax = fig.add_subplot(111)\n" + ">>> ax.plot(x, ncf_stats, 'b-', lw=3)\n" + ">>> ax.plot(x, ncf_special, 'r-')\n" + ">>> plt.show()") +ufunc_ncfdtr_loops[0] = loop_d_dddd__As_ffff_f +ufunc_ncfdtr_loops[1] = loop_d_dddd__As_dddd_d +ufunc_ncfdtr_types[0] = NPY_FLOAT +ufunc_ncfdtr_types[1] = NPY_FLOAT +ufunc_ncfdtr_types[2] = NPY_FLOAT +ufunc_ncfdtr_types[3] = NPY_FLOAT +ufunc_ncfdtr_types[4] = NPY_FLOAT +ufunc_ncfdtr_types[5] = NPY_DOUBLE +ufunc_ncfdtr_types[6] = NPY_DOUBLE +ufunc_ncfdtr_types[7] = NPY_DOUBLE +ufunc_ncfdtr_types[8] = NPY_DOUBLE +ufunc_ncfdtr_types[9] = NPY_DOUBLE +ufunc_ncfdtr_ptr[2*0] = _func_ncfdtr +ufunc_ncfdtr_ptr[2*0+1] = ("ncfdtr") +ufunc_ncfdtr_ptr[2*1] = _func_ncfdtr +ufunc_ncfdtr_ptr[2*1+1] = ("ncfdtr") +ufunc_ncfdtr_data[0] = &ufunc_ncfdtr_ptr[2*0] +ufunc_ncfdtr_data[1] = &ufunc_ncfdtr_ptr[2*1] +ncfdtr = np.PyUFunc_FromFuncAndData(ufunc_ncfdtr_loops, ufunc_ncfdtr_data, ufunc_ncfdtr_types, 2, 4, 1, 0, "ncfdtr", ufunc_ncfdtr_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_ncfdtri_loops[2] +cdef void *ufunc_ncfdtri_ptr[4] +cdef void *ufunc_ncfdtri_data[2] +cdef char ufunc_ncfdtri_types[10] +cdef char *ufunc_ncfdtri_doc = ( + "ncfdtri(dfn, dfd, nc, p, out=None)\n" + "\n" + "Inverse with respect to `f` of the CDF of the non-central F distribution.\n" + "\n" + "See `ncfdtr` for more details.\n" + "\n" + "Parameters\n" + "----------\n" + "dfn : array_like\n" + " Degrees of freedom of the numerator sum of squares. Range (0, inf).\n" + "dfd : array_like\n" + " Degrees of freedom of the denominator sum of squares. Range (0, inf).\n" + "nc : array_like\n" + " Noncentrality parameter. Should be in range (0, 1e4).\n" + "p : array_like\n" + " Value of the cumulative distribution function. Must be in the\n" + " range [0, 1].\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "f : scalar or ndarray\n" + " Quantiles, i.e., the upper limit of integration.\n" + "\n" + "See Also\n" + "--------\n" + "ncfdtr : CDF of the non-central F distribution.\n" + "ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`.\n" + "ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`.\n" + "ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`.\n" + "\n" + "Examples\n" + "--------\n" + ">>> from scipy.special import ncfdtr, ncfdtri\n" + "\n" + "Compute the CDF for several values of `f`:\n" + "\n" + ">>> f = [0.5, 1, 1.5]\n" + ">>> p = ncfdtr(2, 3, 1.5, f)\n" + ">>> p\n" + "array([ 0.20782291, 0.36107392, 0.47345752])\n" + "\n" + "Compute the inverse. We recover the values of `f`, as expected:\n" + "\n" + ">>> ncfdtri(2, 3, 1.5, p)\n" + "array([ 0.5, 1. , 1.5])") +ufunc_ncfdtri_loops[0] = loop_d_dddd__As_ffff_f +ufunc_ncfdtri_loops[1] = loop_d_dddd__As_dddd_d +ufunc_ncfdtri_types[0] = NPY_FLOAT +ufunc_ncfdtri_types[1] = NPY_FLOAT +ufunc_ncfdtri_types[2] = NPY_FLOAT +ufunc_ncfdtri_types[3] = NPY_FLOAT +ufunc_ncfdtri_types[4] = NPY_FLOAT +ufunc_ncfdtri_types[5] = NPY_DOUBLE +ufunc_ncfdtri_types[6] = NPY_DOUBLE +ufunc_ncfdtri_types[7] = NPY_DOUBLE +ufunc_ncfdtri_types[8] = NPY_DOUBLE +ufunc_ncfdtri_types[9] = NPY_DOUBLE +ufunc_ncfdtri_ptr[2*0] = _func_ncfdtri +ufunc_ncfdtri_ptr[2*0+1] = ("ncfdtri") +ufunc_ncfdtri_ptr[2*1] = _func_ncfdtri +ufunc_ncfdtri_ptr[2*1+1] = ("ncfdtri") +ufunc_ncfdtri_data[0] = &ufunc_ncfdtri_ptr[2*0] +ufunc_ncfdtri_data[1] = &ufunc_ncfdtri_ptr[2*1] +ncfdtri = np.PyUFunc_FromFuncAndData(ufunc_ncfdtri_loops, ufunc_ncfdtri_data, ufunc_ncfdtri_types, 2, 4, 1, 0, "ncfdtri", ufunc_ncfdtri_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_ncfdtridfd_loops[2] +cdef void *ufunc_ncfdtridfd_ptr[4] +cdef void *ufunc_ncfdtridfd_data[2] +cdef char ufunc_ncfdtridfd_types[10] +cdef char *ufunc_ncfdtridfd_doc = ( + "ncfdtridfd(dfn, p, nc, f, out=None)\n" + "\n" + "Calculate degrees of freedom (denominator) for the noncentral F-distribution.\n" + "\n" + "This is the inverse with respect to `dfd` of `ncfdtr`.\n" + "See `ncfdtr` for more details.\n" + "\n" + "Parameters\n" + "----------\n" + "dfn : array_like\n" + " Degrees of freedom of the numerator sum of squares. Range (0, inf).\n" + "p : array_like\n" + " Value of the cumulative distribution function. Must be in the\n" + " range [0, 1].\n" + "nc : array_like\n" + " Noncentrality parameter. Should be in range (0, 1e4).\n" + "f : array_like\n" + " Quantiles, i.e., the upper limit of integration.\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "dfd : scalar or ndarray\n" + " Degrees of freedom of the denominator sum of squares.\n" + "\n" + "See Also\n" + "--------\n" + "ncfdtr : CDF of the non-central F distribution.\n" + "ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`.\n" + "ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`.\n" + "ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`.\n" + "\n" + "Notes\n" + "-----\n" + "The value of the cumulative noncentral F distribution is not necessarily\n" + "monotone in either degrees of freedom. There thus may be two values that\n" + "provide a given CDF value. This routine assumes monotonicity and will\n" + "find an arbitrary one of the two values.\n" + "\n" + "Examples\n" + "--------\n" + ">>> from scipy.special import ncfdtr, ncfdtridfd\n" + "\n" + "Compute the CDF for several values of `dfd`:\n" + "\n" + ">>> dfd = [1, 2, 3]\n" + ">>> p = ncfdtr(2, dfd, 0.25, 15)\n" + ">>> p\n" + "array([ 0.8097138 , 0.93020416, 0.96787852])\n" + "\n" + "Compute the inverse. We recover the values of `dfd`, as expected:\n" + "\n" + ">>> ncfdtridfd(2, p, 0.25, 15)\n" + "array([ 1., 2., 3.])") +ufunc_ncfdtridfd_loops[0] = loop_d_dddd__As_ffff_f +ufunc_ncfdtridfd_loops[1] = loop_d_dddd__As_dddd_d +ufunc_ncfdtridfd_types[0] = NPY_FLOAT +ufunc_ncfdtridfd_types[1] = NPY_FLOAT +ufunc_ncfdtridfd_types[2] = NPY_FLOAT +ufunc_ncfdtridfd_types[3] = NPY_FLOAT +ufunc_ncfdtridfd_types[4] = NPY_FLOAT +ufunc_ncfdtridfd_types[5] = NPY_DOUBLE +ufunc_ncfdtridfd_types[6] = NPY_DOUBLE +ufunc_ncfdtridfd_types[7] = NPY_DOUBLE +ufunc_ncfdtridfd_types[8] = NPY_DOUBLE +ufunc_ncfdtridfd_types[9] = NPY_DOUBLE +ufunc_ncfdtridfd_ptr[2*0] = _func_ncfdtridfd +ufunc_ncfdtridfd_ptr[2*0+1] = ("ncfdtridfd") +ufunc_ncfdtridfd_ptr[2*1] = _func_ncfdtridfd +ufunc_ncfdtridfd_ptr[2*1+1] = ("ncfdtridfd") +ufunc_ncfdtridfd_data[0] = &ufunc_ncfdtridfd_ptr[2*0] +ufunc_ncfdtridfd_data[1] = &ufunc_ncfdtridfd_ptr[2*1] +ncfdtridfd = np.PyUFunc_FromFuncAndData(ufunc_ncfdtridfd_loops, ufunc_ncfdtridfd_data, ufunc_ncfdtridfd_types, 2, 4, 1, 0, "ncfdtridfd", ufunc_ncfdtridfd_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_ncfdtridfn_loops[2] +cdef void *ufunc_ncfdtridfn_ptr[4] +cdef void *ufunc_ncfdtridfn_data[2] +cdef char ufunc_ncfdtridfn_types[10] +cdef char *ufunc_ncfdtridfn_doc = ( + "ncfdtridfn(p, dfd, nc, f, out=None)\n" + "\n" + "Calculate degrees of freedom (numerator) for the noncentral F-distribution.\n" + "\n" + "This is the inverse with respect to `dfn` of `ncfdtr`.\n" + "See `ncfdtr` for more details.\n" + "\n" + "Parameters\n" + "----------\n" + "p : array_like\n" + " Value of the cumulative distribution function. Must be in the\n" + " range [0, 1].\n" + "dfd : array_like\n" + " Degrees of freedom of the denominator sum of squares. Range (0, inf).\n" + "nc : array_like\n" + " Noncentrality parameter. Should be in range (0, 1e4).\n" + "f : float\n" + " Quantiles, i.e., the upper limit of integration.\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "dfn : scalar or ndarray\n" + " Degrees of freedom of the numerator sum of squares.\n" + "\n" + "See Also\n" + "--------\n" + "ncfdtr : CDF of the non-central F distribution.\n" + "ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`.\n" + "ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`.\n" + "ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`.\n" + "\n" + "Notes\n" + "-----\n" + "The value of the cumulative noncentral F distribution is not necessarily\n" + "monotone in either degrees of freedom. There thus may be two values that\n" + "provide a given CDF value. This routine assumes monotonicity and will\n" + "find an arbitrary one of the two values.\n" + "\n" + "Examples\n" + "--------\n" + ">>> from scipy.special import ncfdtr, ncfdtridfn\n" + "\n" + "Compute the CDF for several values of `dfn`:\n" + "\n" + ">>> dfn = [1, 2, 3]\n" + ">>> p = ncfdtr(dfn, 2, 0.25, 15)\n" + ">>> p\n" + "array([ 0.92562363, 0.93020416, 0.93188394])\n" + "\n" + "Compute the inverse. We recover the values of `dfn`, as expected:\n" + "\n" + ">>> ncfdtridfn(p, 2, 0.25, 15)\n" + "array([ 1., 2., 3.])") +ufunc_ncfdtridfn_loops[0] = loop_d_dddd__As_ffff_f +ufunc_ncfdtridfn_loops[1] = loop_d_dddd__As_dddd_d +ufunc_ncfdtridfn_types[0] = NPY_FLOAT +ufunc_ncfdtridfn_types[1] = NPY_FLOAT +ufunc_ncfdtridfn_types[2] = NPY_FLOAT +ufunc_ncfdtridfn_types[3] = NPY_FLOAT +ufunc_ncfdtridfn_types[4] = NPY_FLOAT +ufunc_ncfdtridfn_types[5] = NPY_DOUBLE +ufunc_ncfdtridfn_types[6] = NPY_DOUBLE +ufunc_ncfdtridfn_types[7] = NPY_DOUBLE +ufunc_ncfdtridfn_types[8] = NPY_DOUBLE +ufunc_ncfdtridfn_types[9] = NPY_DOUBLE +ufunc_ncfdtridfn_ptr[2*0] = _func_ncfdtridfn +ufunc_ncfdtridfn_ptr[2*0+1] = ("ncfdtridfn") +ufunc_ncfdtridfn_ptr[2*1] = _func_ncfdtridfn +ufunc_ncfdtridfn_ptr[2*1+1] = ("ncfdtridfn") +ufunc_ncfdtridfn_data[0] = &ufunc_ncfdtridfn_ptr[2*0] +ufunc_ncfdtridfn_data[1] = &ufunc_ncfdtridfn_ptr[2*1] +ncfdtridfn = np.PyUFunc_FromFuncAndData(ufunc_ncfdtridfn_loops, ufunc_ncfdtridfn_data, ufunc_ncfdtridfn_types, 2, 4, 1, 0, "ncfdtridfn", ufunc_ncfdtridfn_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_ncfdtrinc_loops[2] +cdef void *ufunc_ncfdtrinc_ptr[4] +cdef void *ufunc_ncfdtrinc_data[2] +cdef char ufunc_ncfdtrinc_types[10] +cdef char *ufunc_ncfdtrinc_doc = ( + "ncfdtrinc(dfn, dfd, p, f, out=None)\n" + "\n" + "Calculate non-centrality parameter for non-central F distribution.\n" + "\n" + "This is the inverse with respect to `nc` of `ncfdtr`.\n" + "See `ncfdtr` for more details.\n" + "\n" + "Parameters\n" + "----------\n" + "dfn : array_like\n" + " Degrees of freedom of the numerator sum of squares. Range (0, inf).\n" + "dfd : array_like\n" + " Degrees of freedom of the denominator sum of squares. Range (0, inf).\n" + "p : array_like\n" + " Value of the cumulative distribution function. Must be in the\n" + " range [0, 1].\n" + "f : array_like\n" + " Quantiles, i.e., the upper limit of integration.\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "nc : scalar or ndarray\n" + " Noncentrality parameter.\n" + "\n" + "See Also\n" + "--------\n" + "ncfdtr : CDF of the non-central F distribution.\n" + "ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`.\n" + "ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`.\n" + "ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`.\n" + "\n" + "Examples\n" + "--------\n" + ">>> from scipy.special import ncfdtr, ncfdtrinc\n" + "\n" + "Compute the CDF for several values of `nc`:\n" + "\n" + ">>> nc = [0.5, 1.5, 2.0]\n" + ">>> p = ncfdtr(2, 3, nc, 15)\n" + ">>> p\n" + "array([ 0.96309246, 0.94327955, 0.93304098])\n" + "\n" + "Compute the inverse. We recover the values of `nc`, as expected:\n" + "\n" + ">>> ncfdtrinc(2, 3, p, 15)\n" + "array([ 0.5, 1.5, 2. ])") +ufunc_ncfdtrinc_loops[0] = loop_d_dddd__As_ffff_f +ufunc_ncfdtrinc_loops[1] = loop_d_dddd__As_dddd_d +ufunc_ncfdtrinc_types[0] = NPY_FLOAT +ufunc_ncfdtrinc_types[1] = NPY_FLOAT +ufunc_ncfdtrinc_types[2] = NPY_FLOAT +ufunc_ncfdtrinc_types[3] = NPY_FLOAT +ufunc_ncfdtrinc_types[4] = NPY_FLOAT +ufunc_ncfdtrinc_types[5] = NPY_DOUBLE +ufunc_ncfdtrinc_types[6] = NPY_DOUBLE +ufunc_ncfdtrinc_types[7] = NPY_DOUBLE +ufunc_ncfdtrinc_types[8] = NPY_DOUBLE +ufunc_ncfdtrinc_types[9] = NPY_DOUBLE +ufunc_ncfdtrinc_ptr[2*0] = _func_ncfdtrinc +ufunc_ncfdtrinc_ptr[2*0+1] = ("ncfdtrinc") +ufunc_ncfdtrinc_ptr[2*1] = _func_ncfdtrinc +ufunc_ncfdtrinc_ptr[2*1+1] = ("ncfdtrinc") +ufunc_ncfdtrinc_data[0] = &ufunc_ncfdtrinc_ptr[2*0] +ufunc_ncfdtrinc_data[1] = &ufunc_ncfdtrinc_ptr[2*1] +ncfdtrinc = np.PyUFunc_FromFuncAndData(ufunc_ncfdtrinc_loops, ufunc_ncfdtrinc_data, ufunc_ncfdtrinc_types, 2, 4, 1, 0, "ncfdtrinc", ufunc_ncfdtrinc_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_nctdtr_loops[2] +cdef void *ufunc_nctdtr_ptr[4] +cdef void *ufunc_nctdtr_data[2] +cdef char ufunc_nctdtr_types[8] +cdef char *ufunc_nctdtr_doc = ( + "nctdtr(df, nc, t, out=None)\n" + "\n" + "Cumulative distribution function of the non-central `t` distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "df : array_like\n" + " Degrees of freedom of the distribution. Should be in range (0, inf).\n" + "nc : array_like\n" + " Noncentrality parameter. Should be in range (-1e6, 1e6).\n" + "t : array_like\n" + " Quantiles, i.e., the upper limit of integration.\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "cdf : scalar or ndarray\n" + " The calculated CDF. If all inputs are scalar, the return will be a\n" + " float. Otherwise, it will be an array.\n" + "\n" + "See Also\n" + "--------\n" + "nctdtrit : Inverse CDF (iCDF) of the non-central t distribution.\n" + "nctdtridf : Calculate degrees of freedom, given CDF and iCDF values.\n" + "nctdtrinc : Calculate non-centrality parameter, given CDF iCDF values.\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> from scipy import special\n" + ">>> from scipy import stats\n" + ">>> import matplotlib.pyplot as plt\n" + "\n" + "Plot the CDF of the non-central t distribution, for nc=0. Compare with the\n" + "t-distribution from scipy.stats:\n" + "\n" + ">>> x = np.linspace(-5, 5, num=500)\n" + ">>> df = 3\n" + ">>> nct_stats = stats.t.cdf(x, df)\n" + ">>> nct_special = special.nctdtr(df, 0, x)\n" + "\n" + ">>> fig = plt.figure()\n" + ">>> ax = fig.add_subplot(111)\n" + ">>> ax.plot(x, nct_stats, 'b-', lw=3)\n" + ">>> ax.plot(x, nct_special, 'r-')\n" + ">>> plt.show()") +ufunc_nctdtr_loops[0] = loop_d_ddd__As_fff_f +ufunc_nctdtr_loops[1] = loop_d_ddd__As_ddd_d +ufunc_nctdtr_types[0] = NPY_FLOAT +ufunc_nctdtr_types[1] = NPY_FLOAT +ufunc_nctdtr_types[2] = NPY_FLOAT +ufunc_nctdtr_types[3] = NPY_FLOAT +ufunc_nctdtr_types[4] = NPY_DOUBLE +ufunc_nctdtr_types[5] = NPY_DOUBLE +ufunc_nctdtr_types[6] = NPY_DOUBLE +ufunc_nctdtr_types[7] = NPY_DOUBLE +ufunc_nctdtr_ptr[2*0] = _func_nctdtr +ufunc_nctdtr_ptr[2*0+1] = ("nctdtr") +ufunc_nctdtr_ptr[2*1] = _func_nctdtr +ufunc_nctdtr_ptr[2*1+1] = ("nctdtr") +ufunc_nctdtr_data[0] = &ufunc_nctdtr_ptr[2*0] +ufunc_nctdtr_data[1] = &ufunc_nctdtr_ptr[2*1] +nctdtr = np.PyUFunc_FromFuncAndData(ufunc_nctdtr_loops, ufunc_nctdtr_data, ufunc_nctdtr_types, 2, 3, 1, 0, "nctdtr", ufunc_nctdtr_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_nctdtridf_loops[2] +cdef void *ufunc_nctdtridf_ptr[4] +cdef void *ufunc_nctdtridf_data[2] +cdef char ufunc_nctdtridf_types[8] +cdef char *ufunc_nctdtridf_doc = ( + "nctdtridf(p, nc, t, out=None)\n" + "\n" + "Calculate degrees of freedom for non-central t distribution.\n" + "\n" + "See `nctdtr` for more details.\n" + "\n" + "Parameters\n" + "----------\n" + "p : array_like\n" + " CDF values, in range (0, 1].\n" + "nc : array_like\n" + " Noncentrality parameter. Should be in range (-1e6, 1e6).\n" + "t : array_like\n" + " Quantiles, i.e., the upper limit of integration.\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "df : scalar or ndarray\n" + " The degrees of freedom. If all inputs are scalar, the return will be a\n" + " float. Otherwise, it will be an array.\n" + "\n" + "See Also\n" + "--------\n" + "nctdtr : CDF of the non-central `t` distribution.\n" + "nctdtrit : Inverse CDF (iCDF) of the non-central t distribution.\n" + "nctdtrinc : Calculate non-centrality parameter, given CDF iCDF values.\n" + "\n" + "Examples\n" + "--------\n" + ">>> from scipy.special import nctdtr, nctdtridf\n" + "\n" + "Compute the CDF for several values of `df`:\n" + "\n" + ">>> df = [1, 2, 3]\n" + ">>> p = nctdtr(df, 0.25, 1)\n" + ">>> p\n" + "array([0.67491974, 0.716464 , 0.73349456])\n" + "\n" + "Compute the inverse. We recover the values of `df`, as expected:\n" + "\n" + ">>> nctdtridf(p, 0.25, 1)\n" + "array([1., 2., 3.])") +ufunc_nctdtridf_loops[0] = loop_d_ddd__As_fff_f +ufunc_nctdtridf_loops[1] = loop_d_ddd__As_ddd_d +ufunc_nctdtridf_types[0] = NPY_FLOAT +ufunc_nctdtridf_types[1] = NPY_FLOAT +ufunc_nctdtridf_types[2] = NPY_FLOAT +ufunc_nctdtridf_types[3] = NPY_FLOAT +ufunc_nctdtridf_types[4] = NPY_DOUBLE +ufunc_nctdtridf_types[5] = NPY_DOUBLE +ufunc_nctdtridf_types[6] = NPY_DOUBLE +ufunc_nctdtridf_types[7] = NPY_DOUBLE +ufunc_nctdtridf_ptr[2*0] = _func_nctdtridf +ufunc_nctdtridf_ptr[2*0+1] = ("nctdtridf") +ufunc_nctdtridf_ptr[2*1] = _func_nctdtridf +ufunc_nctdtridf_ptr[2*1+1] = ("nctdtridf") +ufunc_nctdtridf_data[0] = &ufunc_nctdtridf_ptr[2*0] +ufunc_nctdtridf_data[1] = &ufunc_nctdtridf_ptr[2*1] +nctdtridf = np.PyUFunc_FromFuncAndData(ufunc_nctdtridf_loops, ufunc_nctdtridf_data, ufunc_nctdtridf_types, 2, 3, 1, 0, "nctdtridf", ufunc_nctdtridf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_nctdtrinc_loops[2] +cdef void *ufunc_nctdtrinc_ptr[4] +cdef void *ufunc_nctdtrinc_data[2] +cdef char ufunc_nctdtrinc_types[8] +cdef char *ufunc_nctdtrinc_doc = ( + "nctdtrinc(df, p, t, out=None)\n" + "\n" + "Calculate non-centrality parameter for non-central t distribution.\n" + "\n" + "See `nctdtr` for more details.\n" + "\n" + "Parameters\n" + "----------\n" + "df : array_like\n" + " Degrees of freedom of the distribution. Should be in range (0, inf).\n" + "p : array_like\n" + " CDF values, in range (0, 1].\n" + "t : array_like\n" + " Quantiles, i.e., the upper limit of integration.\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "nc : scalar or ndarray\n" + " Noncentrality parameter\n" + "\n" + "See Also\n" + "--------\n" + "nctdtr : CDF of the non-central `t` distribution.\n" + "nctdtrit : Inverse CDF (iCDF) of the non-central t distribution.\n" + "nctdtridf : Calculate degrees of freedom, given CDF and iCDF values.\n" + "\n" + "Examples\n" + "--------\n" + ">>> from scipy.special import nctdtr, nctdtrinc\n" + "\n" + "Compute the CDF for several values of `nc`:\n" + "\n" + ">>> nc = [0.5, 1.5, 2.5]\n" + ">>> p = nctdtr(3, nc, 1.5)\n" + ">>> p\n" + "array([0.77569497, 0.45524533, 0.1668691 ])\n" + "\n" + "Compute the inverse. We recover the values of `nc`, as expected:\n" + "\n" + ">>> nctdtrinc(3, p, 1.5)\n" + "array([0.5, 1.5, 2.5])") +ufunc_nctdtrinc_loops[0] = loop_d_ddd__As_fff_f +ufunc_nctdtrinc_loops[1] = loop_d_ddd__As_ddd_d +ufunc_nctdtrinc_types[0] = NPY_FLOAT +ufunc_nctdtrinc_types[1] = NPY_FLOAT +ufunc_nctdtrinc_types[2] = NPY_FLOAT +ufunc_nctdtrinc_types[3] = NPY_FLOAT +ufunc_nctdtrinc_types[4] = NPY_DOUBLE +ufunc_nctdtrinc_types[5] = NPY_DOUBLE +ufunc_nctdtrinc_types[6] = NPY_DOUBLE +ufunc_nctdtrinc_types[7] = NPY_DOUBLE +ufunc_nctdtrinc_ptr[2*0] = _func_nctdtrinc +ufunc_nctdtrinc_ptr[2*0+1] = ("nctdtrinc") +ufunc_nctdtrinc_ptr[2*1] = _func_nctdtrinc +ufunc_nctdtrinc_ptr[2*1+1] = ("nctdtrinc") +ufunc_nctdtrinc_data[0] = &ufunc_nctdtrinc_ptr[2*0] +ufunc_nctdtrinc_data[1] = &ufunc_nctdtrinc_ptr[2*1] +nctdtrinc = np.PyUFunc_FromFuncAndData(ufunc_nctdtrinc_loops, ufunc_nctdtrinc_data, ufunc_nctdtrinc_types, 2, 3, 1, 0, "nctdtrinc", ufunc_nctdtrinc_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_nctdtrit_loops[2] +cdef void *ufunc_nctdtrit_ptr[4] +cdef void *ufunc_nctdtrit_data[2] +cdef char ufunc_nctdtrit_types[8] +cdef char *ufunc_nctdtrit_doc = ( + "nctdtrit(df, nc, p, out=None)\n" + "\n" + "Inverse cumulative distribution function of the non-central t distribution.\n" + "\n" + "See `nctdtr` for more details.\n" + "\n" + "Parameters\n" + "----------\n" + "df : array_like\n" + " Degrees of freedom of the distribution. Should be in range (0, inf).\n" + "nc : array_like\n" + " Noncentrality parameter. Should be in range (-1e6, 1e6).\n" + "p : array_like\n" + " CDF values, in range (0, 1].\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "t : scalar or ndarray\n" + " Quantiles\n" + "\n" + "See Also\n" + "--------\n" + "nctdtr : CDF of the non-central `t` distribution.\n" + "nctdtridf : Calculate degrees of freedom, given CDF and iCDF values.\n" + "nctdtrinc : Calculate non-centrality parameter, given CDF iCDF values.\n" + "\n" + "Examples\n" + "--------\n" + ">>> from scipy.special import nctdtr, nctdtrit\n" + "\n" + "Compute the CDF for several values of `t`:\n" + "\n" + ">>> t = [0.5, 1, 1.5]\n" + ">>> p = nctdtr(3, 1, t)\n" + ">>> p\n" + "array([0.29811049, 0.46922687, 0.6257559 ])\n" + "\n" + "Compute the inverse. We recover the values of `t`, as expected:\n" + "\n" + ">>> nctdtrit(3, 1, p)\n" + "array([0.5, 1. , 1.5])") +ufunc_nctdtrit_loops[0] = loop_d_ddd__As_fff_f +ufunc_nctdtrit_loops[1] = loop_d_ddd__As_ddd_d +ufunc_nctdtrit_types[0] = NPY_FLOAT +ufunc_nctdtrit_types[1] = NPY_FLOAT +ufunc_nctdtrit_types[2] = NPY_FLOAT +ufunc_nctdtrit_types[3] = NPY_FLOAT +ufunc_nctdtrit_types[4] = NPY_DOUBLE +ufunc_nctdtrit_types[5] = NPY_DOUBLE +ufunc_nctdtrit_types[6] = NPY_DOUBLE +ufunc_nctdtrit_types[7] = NPY_DOUBLE +ufunc_nctdtrit_ptr[2*0] = _func_nctdtrit +ufunc_nctdtrit_ptr[2*0+1] = ("nctdtrit") +ufunc_nctdtrit_ptr[2*1] = _func_nctdtrit +ufunc_nctdtrit_ptr[2*1+1] = ("nctdtrit") +ufunc_nctdtrit_data[0] = &ufunc_nctdtrit_ptr[2*0] +ufunc_nctdtrit_data[1] = &ufunc_nctdtrit_ptr[2*1] +nctdtrit = np.PyUFunc_FromFuncAndData(ufunc_nctdtrit_loops, ufunc_nctdtrit_data, ufunc_nctdtrit_types, 2, 3, 1, 0, "nctdtrit", ufunc_nctdtrit_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_ndtr_loops[4] +cdef void *ufunc_ndtr_ptr[8] +cdef void *ufunc_ndtr_data[4] +cdef char ufunc_ndtr_types[8] +cdef char *ufunc_ndtr_doc = ( + "ndtr(x, out=None)\n" + "\n" + "Cumulative distribution of the standard normal distribution.\n" + "\n" + "Returns the area under the standard Gaussian probability\n" + "density function, integrated from minus infinity to `x`\n" + "\n" + ".. math::\n" + "\n" + " \\frac{1}{\\sqrt{2\\pi}} \\int_{-\\infty}^x \\exp(-t^2/2) dt\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like, real or complex\n" + " Argument\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " The value of the normal CDF evaluated at `x`\n" + "\n" + "See Also\n" + "--------\n" + "log_ndtr : Logarithm of ndtr\n" + "ndtri : Inverse of ndtr, standard normal percentile function\n" + "erf : Error function\n" + "erfc : 1 - erf\n" + "scipy.stats.norm : Normal distribution\n" + "\n" + "Examples\n" + "--------\n" + "Evaluate `ndtr` at one point.\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import ndtr\n" + ">>> ndtr(0.5)\n" + "0.6914624612740131\n" + "\n" + "Evaluate the function at several points by providing a NumPy array\n" + "or list for `x`.\n" + "\n" + ">>> ndtr([0, 0.5, 2])\n" + "array([0.5 , 0.69146246, 0.97724987])\n" + "\n" + "Plot the function.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> x = np.linspace(-5, 5, 100)\n" + ">>> fig, ax = plt.subplots()\n" + ">>> ax.plot(x, ndtr(x))\n" + ">>> ax.set_title(r\"Standard normal cumulative distribution function $\\Phi$\")\n" + ">>> plt.show()") +ufunc_ndtr_loops[0] = loop_d_d__As_f_f +ufunc_ndtr_loops[1] = loop_d_d__As_d_d +ufunc_ndtr_loops[2] = loop_D_D__As_F_F +ufunc_ndtr_loops[3] = loop_D_D__As_D_D +ufunc_ndtr_types[0] = NPY_FLOAT +ufunc_ndtr_types[1] = NPY_FLOAT +ufunc_ndtr_types[2] = NPY_DOUBLE +ufunc_ndtr_types[3] = NPY_DOUBLE +ufunc_ndtr_types[4] = NPY_CFLOAT +ufunc_ndtr_types[5] = NPY_CFLOAT +ufunc_ndtr_types[6] = NPY_CDOUBLE +ufunc_ndtr_types[7] = NPY_CDOUBLE +ufunc_ndtr_ptr[2*0] = _func_cephes_ndtr +ufunc_ndtr_ptr[2*0+1] = ("ndtr") +ufunc_ndtr_ptr[2*1] = _func_cephes_ndtr +ufunc_ndtr_ptr[2*1+1] = ("ndtr") +ufunc_ndtr_ptr[2*2] = scipy.special._ufuncs_cxx._export_faddeeva_ndtr +ufunc_ndtr_ptr[2*2+1] = ("ndtr") +ufunc_ndtr_ptr[2*3] = scipy.special._ufuncs_cxx._export_faddeeva_ndtr +ufunc_ndtr_ptr[2*3+1] = ("ndtr") +ufunc_ndtr_data[0] = &ufunc_ndtr_ptr[2*0] +ufunc_ndtr_data[1] = &ufunc_ndtr_ptr[2*1] +ufunc_ndtr_data[2] = &ufunc_ndtr_ptr[2*2] +ufunc_ndtr_data[3] = &ufunc_ndtr_ptr[2*3] +ndtr = np.PyUFunc_FromFuncAndData(ufunc_ndtr_loops, ufunc_ndtr_data, ufunc_ndtr_types, 4, 1, 1, 0, "ndtr", ufunc_ndtr_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_ndtri_loops[2] +cdef void *ufunc_ndtri_ptr[4] +cdef void *ufunc_ndtri_data[2] +cdef char ufunc_ndtri_types[4] +cdef char *ufunc_ndtri_doc = ( + "ndtri(y, out=None)\n" + "\n" + "Inverse of `ndtr` vs x\n" + "\n" + "Returns the argument x for which the area under the standard normal\n" + "probability density function (integrated from minus infinity to `x`)\n" + "is equal to y.\n" + "\n" + "Parameters\n" + "----------\n" + "p : array_like\n" + " Probability\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "x : scalar or ndarray\n" + " Value of x such that ``ndtr(x) == p``.\n" + "\n" + "See Also\n" + "--------\n" + "ndtr : Standard normal cumulative probability distribution\n" + "ndtri_exp : Inverse of log_ndtr\n" + "\n" + "Examples\n" + "--------\n" + "`ndtri` is the percentile function of the standard normal distribution.\n" + "This means it returns the inverse of the cumulative density `ndtr`. First,\n" + "let us compute a cumulative density value.\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import ndtri, ndtr\n" + ">>> cdf_val = ndtr(2)\n" + ">>> cdf_val\n" + "0.9772498680518208\n" + "\n" + "Verify that `ndtri` yields the original value for `x` up to floating point\n" + "errors.\n" + "\n" + ">>> ndtri(cdf_val)\n" + "2.0000000000000004\n" + "\n" + "Plot the function. For that purpose, we provide a NumPy array as argument.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> x = np.linspace(0.01, 1, 200)\n" + ">>> fig, ax = plt.subplots()\n" + ">>> ax.plot(x, ndtri(x))\n" + ">>> ax.set_title(\"Standard normal percentile function\")\n" + ">>> plt.show()") +ufunc_ndtri_loops[0] = loop_d_d__As_f_f +ufunc_ndtri_loops[1] = loop_d_d__As_d_d +ufunc_ndtri_types[0] = NPY_FLOAT +ufunc_ndtri_types[1] = NPY_FLOAT +ufunc_ndtri_types[2] = NPY_DOUBLE +ufunc_ndtri_types[3] = NPY_DOUBLE +ufunc_ndtri_ptr[2*0] = _func_cephes_ndtri +ufunc_ndtri_ptr[2*0+1] = ("ndtri") +ufunc_ndtri_ptr[2*1] = _func_cephes_ndtri +ufunc_ndtri_ptr[2*1+1] = ("ndtri") +ufunc_ndtri_data[0] = &ufunc_ndtri_ptr[2*0] +ufunc_ndtri_data[1] = &ufunc_ndtri_ptr[2*1] +ndtri = np.PyUFunc_FromFuncAndData(ufunc_ndtri_loops, ufunc_ndtri_data, ufunc_ndtri_types, 2, 1, 1, 0, "ndtri", ufunc_ndtri_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_ndtri_exp_loops[2] +cdef void *ufunc_ndtri_exp_ptr[4] +cdef void *ufunc_ndtri_exp_data[2] +cdef char ufunc_ndtri_exp_types[4] +cdef char *ufunc_ndtri_exp_doc = ( + "ndtri_exp(y, out=None)\n" + "\n" + "Inverse of `log_ndtr` vs x. Allows for greater precision than\n" + "`ndtri` composed with `numpy.exp` for very small values of y and for\n" + "y close to 0.\n" + "\n" + "Parameters\n" + "----------\n" + "y : array_like of float\n" + " Function argument\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Inverse of the log CDF of the standard normal distribution, evaluated\n" + " at y.\n" + "\n" + "See Also\n" + "--------\n" + "log_ndtr : log of the standard normal cumulative distribution function\n" + "ndtr : standard normal cumulative distribution function\n" + "ndtri : standard normal percentile function\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import scipy.special as sc\n" + "\n" + "`ndtri_exp` agrees with the naive implementation when the latter does\n" + "not suffer from underflow.\n" + "\n" + ">>> sc.ndtri_exp(-1)\n" + "-0.33747496376420244\n" + ">>> sc.ndtri(np.exp(-1))\n" + "-0.33747496376420244\n" + "\n" + "For extreme values of y, the naive approach fails\n" + "\n" + ">>> sc.ndtri(np.exp(-800))\n" + "-inf\n" + ">>> sc.ndtri(np.exp(-1e-20))\n" + "inf\n" + "\n" + "whereas `ndtri_exp` is still able to compute the result to high precision.\n" + "\n" + ">>> sc.ndtri_exp(-800)\n" + "-39.88469483825668\n" + ">>> sc.ndtri_exp(-1e-20)\n" + "9.262340089798409") +ufunc_ndtri_exp_loops[0] = loop_d_d__As_f_f +ufunc_ndtri_exp_loops[1] = loop_d_d__As_d_d +ufunc_ndtri_exp_types[0] = NPY_FLOAT +ufunc_ndtri_exp_types[1] = NPY_FLOAT +ufunc_ndtri_exp_types[2] = NPY_DOUBLE +ufunc_ndtri_exp_types[3] = NPY_DOUBLE +ufunc_ndtri_exp_ptr[2*0] = _func_ndtri_exp +ufunc_ndtri_exp_ptr[2*0+1] = ("ndtri_exp") +ufunc_ndtri_exp_ptr[2*1] = _func_ndtri_exp +ufunc_ndtri_exp_ptr[2*1+1] = ("ndtri_exp") +ufunc_ndtri_exp_data[0] = &ufunc_ndtri_exp_ptr[2*0] +ufunc_ndtri_exp_data[1] = &ufunc_ndtri_exp_ptr[2*1] +ndtri_exp = np.PyUFunc_FromFuncAndData(ufunc_ndtri_exp_loops, ufunc_ndtri_exp_data, ufunc_ndtri_exp_types, 2, 1, 1, 0, "ndtri_exp", ufunc_ndtri_exp_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_nrdtrimn_loops[2] +cdef void *ufunc_nrdtrimn_ptr[4] +cdef void *ufunc_nrdtrimn_data[2] +cdef char ufunc_nrdtrimn_types[8] +cdef char *ufunc_nrdtrimn_doc = ( + "nrdtrimn(p, std, x, out=None)\n" + "\n" + "Calculate mean of normal distribution given other params.\n" + "\n" + "Parameters\n" + "----------\n" + "p : array_like\n" + " CDF values, in range (0, 1].\n" + "std : array_like\n" + " Standard deviation.\n" + "x : array_like\n" + " Quantiles, i.e. the upper limit of integration.\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "mn : scalar or ndarray\n" + " The mean of the normal distribution.\n" + "\n" + "See Also\n" + "--------\n" + "scipy.stats.norm : Normal distribution\n" + "ndtr : Standard normal cumulative probability distribution\n" + "ndtri : Inverse of standard normal CDF with respect to quantile\n" + "nrdtrisd : Inverse of normal distribution CDF with respect to\n" + " standard deviation\n" + "\n" + "Examples\n" + "--------\n" + "`nrdtrimn` can be used to recover the mean of a normal distribution\n" + "if we know the CDF value `p` for a given quantile `x` and the\n" + "standard deviation `std`. First, we calculate\n" + "the normal distribution CDF for an exemplary parameter set.\n" + "\n" + ">>> from scipy.stats import norm\n" + ">>> mean = 3.\n" + ">>> std = 2.\n" + ">>> x = 6.\n" + ">>> p = norm.cdf(x, loc=mean, scale=std)\n" + ">>> p\n" + "0.9331927987311419\n" + "\n" + "Verify that `nrdtrimn` returns the original value for `mean`.\n" + "\n" + ">>> from scipy.special import nrdtrimn\n" + ">>> nrdtrimn(p, std, x)\n" + "3.0000000000000004") +ufunc_nrdtrimn_loops[0] = loop_d_ddd__As_fff_f +ufunc_nrdtrimn_loops[1] = loop_d_ddd__As_ddd_d +ufunc_nrdtrimn_types[0] = NPY_FLOAT +ufunc_nrdtrimn_types[1] = NPY_FLOAT +ufunc_nrdtrimn_types[2] = NPY_FLOAT +ufunc_nrdtrimn_types[3] = NPY_FLOAT +ufunc_nrdtrimn_types[4] = NPY_DOUBLE +ufunc_nrdtrimn_types[5] = NPY_DOUBLE +ufunc_nrdtrimn_types[6] = NPY_DOUBLE +ufunc_nrdtrimn_types[7] = NPY_DOUBLE +ufunc_nrdtrimn_ptr[2*0] = _func_nrdtrimn +ufunc_nrdtrimn_ptr[2*0+1] = ("nrdtrimn") +ufunc_nrdtrimn_ptr[2*1] = _func_nrdtrimn +ufunc_nrdtrimn_ptr[2*1+1] = ("nrdtrimn") +ufunc_nrdtrimn_data[0] = &ufunc_nrdtrimn_ptr[2*0] +ufunc_nrdtrimn_data[1] = &ufunc_nrdtrimn_ptr[2*1] +nrdtrimn = np.PyUFunc_FromFuncAndData(ufunc_nrdtrimn_loops, ufunc_nrdtrimn_data, ufunc_nrdtrimn_types, 2, 3, 1, 0, "nrdtrimn", ufunc_nrdtrimn_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_nrdtrisd_loops[2] +cdef void *ufunc_nrdtrisd_ptr[4] +cdef void *ufunc_nrdtrisd_data[2] +cdef char ufunc_nrdtrisd_types[8] +cdef char *ufunc_nrdtrisd_doc = ( + "nrdtrisd(mn, p, x, out=None)\n" + "\n" + "Calculate standard deviation of normal distribution given other params.\n" + "\n" + "Parameters\n" + "----------\n" + "mn : scalar or ndarray\n" + " The mean of the normal distribution.\n" + "p : array_like\n" + " CDF values, in range (0, 1].\n" + "x : array_like\n" + " Quantiles, i.e. the upper limit of integration.\n" + "\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "std : scalar or ndarray\n" + " Standard deviation.\n" + "\n" + "See Also\n" + "--------\n" + "scipy.stats.norm : Normal distribution\n" + "ndtr : Standard normal cumulative probability distribution\n" + "ndtri : Inverse of standard normal CDF with respect to quantile\n" + "nrdtrimn : Inverse of normal distribution CDF with respect to\n" + " mean\n" + "\n" + "Examples\n" + "--------\n" + "`nrdtrisd` can be used to recover the standard deviation of a normal\n" + "distribution if we know the CDF value `p` for a given quantile `x` and\n" + "the mean `mn`. First, we calculate the normal distribution CDF for an\n" + "exemplary parameter set.\n" + "\n" + ">>> from scipy.stats import norm\n" + ">>> mean = 3.\n" + ">>> std = 2.\n" + ">>> x = 6.\n" + ">>> p = norm.cdf(x, loc=mean, scale=std)\n" + ">>> p\n" + "0.9331927987311419\n" + "\n" + "Verify that `nrdtrisd` returns the original value for `std`.\n" + "\n" + ">>> from scipy.special import nrdtrisd\n" + ">>> nrdtrisd(mean, p, x)\n" + "2.0000000000000004") +ufunc_nrdtrisd_loops[0] = loop_d_ddd__As_fff_f +ufunc_nrdtrisd_loops[1] = loop_d_ddd__As_ddd_d +ufunc_nrdtrisd_types[0] = NPY_FLOAT +ufunc_nrdtrisd_types[1] = NPY_FLOAT +ufunc_nrdtrisd_types[2] = NPY_FLOAT +ufunc_nrdtrisd_types[3] = NPY_FLOAT +ufunc_nrdtrisd_types[4] = NPY_DOUBLE +ufunc_nrdtrisd_types[5] = NPY_DOUBLE +ufunc_nrdtrisd_types[6] = NPY_DOUBLE +ufunc_nrdtrisd_types[7] = NPY_DOUBLE +ufunc_nrdtrisd_ptr[2*0] = _func_nrdtrisd +ufunc_nrdtrisd_ptr[2*0+1] = ("nrdtrisd") +ufunc_nrdtrisd_ptr[2*1] = _func_nrdtrisd +ufunc_nrdtrisd_ptr[2*1+1] = ("nrdtrisd") +ufunc_nrdtrisd_data[0] = &ufunc_nrdtrisd_ptr[2*0] +ufunc_nrdtrisd_data[1] = &ufunc_nrdtrisd_ptr[2*1] +nrdtrisd = np.PyUFunc_FromFuncAndData(ufunc_nrdtrisd_loops, ufunc_nrdtrisd_data, ufunc_nrdtrisd_types, 2, 3, 1, 0, "nrdtrisd", ufunc_nrdtrisd_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_owens_t_loops[2] +cdef void *ufunc_owens_t_ptr[4] +cdef void *ufunc_owens_t_data[2] +cdef char ufunc_owens_t_types[6] +cdef char *ufunc_owens_t_doc = ( + "owens_t(h, a, out=None)\n" + "\n" + "Owen's T Function.\n" + "\n" + "The function T(h, a) gives the probability of the event\n" + "(X > h and 0 < Y < a * X) where X and Y are independent\n" + "standard normal random variables.\n" + "\n" + "Parameters\n" + "----------\n" + "h: array_like\n" + " Input value.\n" + "a: array_like\n" + " Input value.\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "t: scalar or ndarray\n" + " Probability of the event (X > h and 0 < Y < a * X),\n" + " where X and Y are independent standard normal random variables.\n" + "\n" + "References\n" + "----------\n" + ".. [1] M. Patefield and D. Tandy, \"Fast and accurate calculation of\n" + " Owen's T Function\", Statistical Software vol. 5, pp. 1-25, 2000.\n" + "\n" + "Examples\n" + "--------\n" + ">>> from scipy import special\n" + ">>> a = 3.5\n" + ">>> h = 0.78\n" + ">>> special.owens_t(h, a)\n" + "0.10877216734852274") +ufunc_owens_t_loops[0] = loop_d_dd__As_ff_f +ufunc_owens_t_loops[1] = loop_d_dd__As_dd_d +ufunc_owens_t_types[0] = NPY_FLOAT +ufunc_owens_t_types[1] = NPY_FLOAT +ufunc_owens_t_types[2] = NPY_FLOAT +ufunc_owens_t_types[3] = NPY_DOUBLE +ufunc_owens_t_types[4] = NPY_DOUBLE +ufunc_owens_t_types[5] = NPY_DOUBLE +ufunc_owens_t_ptr[2*0] = _func_cephes_owens_t +ufunc_owens_t_ptr[2*0+1] = ("owens_t") +ufunc_owens_t_ptr[2*1] = _func_cephes_owens_t +ufunc_owens_t_ptr[2*1+1] = ("owens_t") +ufunc_owens_t_data[0] = &ufunc_owens_t_ptr[2*0] +ufunc_owens_t_data[1] = &ufunc_owens_t_ptr[2*1] +owens_t = np.PyUFunc_FromFuncAndData(ufunc_owens_t_loops, ufunc_owens_t_data, ufunc_owens_t_types, 2, 2, 1, 0, "owens_t", ufunc_owens_t_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_pdtr_loops[2] +cdef void *ufunc_pdtr_ptr[4] +cdef void *ufunc_pdtr_data[2] +cdef char ufunc_pdtr_types[6] +cdef char *ufunc_pdtr_doc = ( + "pdtr(k, m, out=None)\n" + "\n" + "Poisson cumulative distribution function.\n" + "\n" + "Defined as the probability that a Poisson-distributed random\n" + "variable with event rate :math:`m` is less than or equal to\n" + ":math:`k`. More concretely, this works out to be [1]_\n" + "\n" + ".. math::\n" + "\n" + " \\exp(-m) \\sum_{j = 0}^{\\lfloor{k}\\rfloor} \\frac{m^j}{j!}.\n" + "\n" + "Parameters\n" + "----------\n" + "k : array_like\n" + " Number of occurrences (nonnegative, real)\n" + "m : array_like\n" + " Shape parameter (nonnegative, real)\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the Poisson cumulative distribution function\n" + "\n" + "See Also\n" + "--------\n" + "pdtrc : Poisson survival function\n" + "pdtrik : inverse of `pdtr` with respect to `k`\n" + "pdtri : inverse of `pdtr` with respect to `m`\n" + "\n" + "References\n" + "----------\n" + ".. [1] https://en.wikipedia.org/wiki/Poisson_distribution\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import scipy.special as sc\n" + "\n" + "It is a cumulative distribution function, so it converges to 1\n" + "monotonically as `k` goes to infinity.\n" + "\n" + ">>> sc.pdtr([1, 10, 100, np.inf], 1)\n" + "array([0.73575888, 0.99999999, 1. , 1. ])\n" + "\n" + "It is discontinuous at integers and constant between integers.\n" + "\n" + ">>> sc.pdtr([1, 1.5, 1.9, 2], 1)\n" + "array([0.73575888, 0.73575888, 0.73575888, 0.9196986 ])") +ufunc_pdtr_loops[0] = loop_d_dd__As_ff_f +ufunc_pdtr_loops[1] = loop_d_dd__As_dd_d +ufunc_pdtr_types[0] = NPY_FLOAT +ufunc_pdtr_types[1] = NPY_FLOAT +ufunc_pdtr_types[2] = NPY_FLOAT +ufunc_pdtr_types[3] = NPY_DOUBLE +ufunc_pdtr_types[4] = NPY_DOUBLE +ufunc_pdtr_types[5] = NPY_DOUBLE +ufunc_pdtr_ptr[2*0] = _func_cephes_pdtr +ufunc_pdtr_ptr[2*0+1] = ("pdtr") +ufunc_pdtr_ptr[2*1] = _func_cephes_pdtr +ufunc_pdtr_ptr[2*1+1] = ("pdtr") +ufunc_pdtr_data[0] = &ufunc_pdtr_ptr[2*0] +ufunc_pdtr_data[1] = &ufunc_pdtr_ptr[2*1] +pdtr = np.PyUFunc_FromFuncAndData(ufunc_pdtr_loops, ufunc_pdtr_data, ufunc_pdtr_types, 2, 2, 1, 0, "pdtr", ufunc_pdtr_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_pdtrc_loops[2] +cdef void *ufunc_pdtrc_ptr[4] +cdef void *ufunc_pdtrc_data[2] +cdef char ufunc_pdtrc_types[6] +cdef char *ufunc_pdtrc_doc = ( + "pdtrc(k, m, out=None)\n" + "\n" + "Poisson survival function\n" + "\n" + "Returns the sum of the terms from k+1 to infinity of the Poisson\n" + "distribution: sum(exp(-m) * m**j / j!, j=k+1..inf) = gammainc(\n" + "k+1, m). Arguments must both be non-negative doubles.\n" + "\n" + "Parameters\n" + "----------\n" + "k : array_like\n" + " Number of occurrences (nonnegative, real)\n" + "m : array_like\n" + " Shape parameter (nonnegative, real)\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the Poisson survival function\n" + "\n" + "See Also\n" + "--------\n" + "pdtr : Poisson cumulative distribution function\n" + "pdtrik : inverse of `pdtr` with respect to `k`\n" + "pdtri : inverse of `pdtr` with respect to `m`\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import scipy.special as sc\n" + "\n" + "It is a survival function, so it decreases to 0\n" + "monotonically as `k` goes to infinity.\n" + "\n" + ">>> k = np.array([1, 10, 100, np.inf])\n" + ">>> sc.pdtrc(k, 1)\n" + "array([2.64241118e-001, 1.00477664e-008, 3.94147589e-161, 0.00000000e+000])\n" + "\n" + "It can be expressed in terms of the lower incomplete gamma\n" + "function `gammainc`.\n" + "\n" + ">>> sc.gammainc(k + 1, 1)\n" + "array([2.64241118e-001, 1.00477664e-008, 3.94147589e-161, 0.00000000e+000])") +ufunc_pdtrc_loops[0] = loop_d_dd__As_ff_f +ufunc_pdtrc_loops[1] = loop_d_dd__As_dd_d +ufunc_pdtrc_types[0] = NPY_FLOAT +ufunc_pdtrc_types[1] = NPY_FLOAT +ufunc_pdtrc_types[2] = NPY_FLOAT +ufunc_pdtrc_types[3] = NPY_DOUBLE +ufunc_pdtrc_types[4] = NPY_DOUBLE +ufunc_pdtrc_types[5] = NPY_DOUBLE +ufunc_pdtrc_ptr[2*0] = _func_cephes_pdtrc +ufunc_pdtrc_ptr[2*0+1] = ("pdtrc") +ufunc_pdtrc_ptr[2*1] = _func_cephes_pdtrc +ufunc_pdtrc_ptr[2*1+1] = ("pdtrc") +ufunc_pdtrc_data[0] = &ufunc_pdtrc_ptr[2*0] +ufunc_pdtrc_data[1] = &ufunc_pdtrc_ptr[2*1] +pdtrc = np.PyUFunc_FromFuncAndData(ufunc_pdtrc_loops, ufunc_pdtrc_data, ufunc_pdtrc_types, 2, 2, 1, 0, "pdtrc", ufunc_pdtrc_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_pdtri_loops[3] +cdef void *ufunc_pdtri_ptr[6] +cdef void *ufunc_pdtri_data[3] +cdef char ufunc_pdtri_types[9] +cdef char *ufunc_pdtri_doc = ( + "pdtri(k, y, out=None)\n" + "\n" + "Inverse to `pdtr` vs m\n" + "\n" + "Returns the Poisson variable `m` such that the sum from 0 to `k` of\n" + "the Poisson density is equal to the given probability `y`:\n" + "calculated by ``gammaincinv(k + 1, y)``. `k` must be a nonnegative\n" + "integer and `y` between 0 and 1.\n" + "\n" + "Parameters\n" + "----------\n" + "k : array_like\n" + " Number of occurrences (nonnegative, real)\n" + "y : array_like\n" + " Probability\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the shape parameter `m` such that ``pdtr(k, m) = p``\n" + "\n" + "See Also\n" + "--------\n" + "pdtr : Poisson cumulative distribution function\n" + "pdtrc : Poisson survival function\n" + "pdtrik : inverse of `pdtr` with respect to `k`\n" + "\n" + "Examples\n" + "--------\n" + ">>> import scipy.special as sc\n" + "\n" + "Compute the CDF for several values of `m`:\n" + "\n" + ">>> m = [0.5, 1, 1.5]\n" + ">>> p = sc.pdtr(1, m)\n" + ">>> p\n" + "array([0.90979599, 0.73575888, 0.5578254 ])\n" + "\n" + "Compute the inverse. We recover the values of `m`, as expected:\n" + "\n" + ">>> sc.pdtri(1, p)\n" + "array([0.5, 1. , 1.5])") +ufunc_pdtri_loops[0] = loop_d_pd__As_pd_d +ufunc_pdtri_loops[1] = loop_d_dd__As_ff_f +ufunc_pdtri_loops[2] = loop_d_dd__As_dd_d +ufunc_pdtri_types[0] = NPY_INTP +ufunc_pdtri_types[1] = NPY_DOUBLE +ufunc_pdtri_types[2] = NPY_DOUBLE +ufunc_pdtri_types[3] = NPY_FLOAT +ufunc_pdtri_types[4] = NPY_FLOAT +ufunc_pdtri_types[5] = NPY_FLOAT +ufunc_pdtri_types[6] = NPY_DOUBLE +ufunc_pdtri_types[7] = NPY_DOUBLE +ufunc_pdtri_types[8] = NPY_DOUBLE +ufunc_pdtri_ptr[2*0] = _func_cephes_pdtri_wrap +ufunc_pdtri_ptr[2*0+1] = ("pdtri") +ufunc_pdtri_ptr[2*1] = _func_pdtri_unsafe +ufunc_pdtri_ptr[2*1+1] = ("pdtri") +ufunc_pdtri_ptr[2*2] = _func_pdtri_unsafe +ufunc_pdtri_ptr[2*2+1] = ("pdtri") +ufunc_pdtri_data[0] = &ufunc_pdtri_ptr[2*0] +ufunc_pdtri_data[1] = &ufunc_pdtri_ptr[2*1] +ufunc_pdtri_data[2] = &ufunc_pdtri_ptr[2*2] +pdtri = np.PyUFunc_FromFuncAndData(ufunc_pdtri_loops, ufunc_pdtri_data, ufunc_pdtri_types, 3, 2, 1, 0, "pdtri", ufunc_pdtri_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_pdtrik_loops[2] +cdef void *ufunc_pdtrik_ptr[4] +cdef void *ufunc_pdtrik_data[2] +cdef char ufunc_pdtrik_types[6] +cdef char *ufunc_pdtrik_doc = ( + "pdtrik(p, m, out=None)\n" + "\n" + "Inverse to `pdtr` vs `k`.\n" + "\n" + "Parameters\n" + "----------\n" + "p : array_like\n" + " Probability\n" + "m : array_like\n" + " Shape parameter (nonnegative, real)\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " The number of occurrences `k` such that ``pdtr(k, m) = p``\n" + "\n" + "See Also\n" + "--------\n" + "pdtr : Poisson cumulative distribution function\n" + "pdtrc : Poisson survival function\n" + "pdtri : inverse of `pdtr` with respect to `m`\n" + "\n" + "Examples\n" + "--------\n" + ">>> import scipy.special as sc\n" + "\n" + "Compute the CDF for several values of `k`:\n" + "\n" + ">>> k = [1, 2, 3]\n" + ">>> p = sc.pdtr(k, 2)\n" + ">>> p\n" + "array([0.40600585, 0.67667642, 0.85712346])\n" + "\n" + "Compute the inverse. We recover the values of `k`, as expected:\n" + "\n" + ">>> sc.pdtrik(p, 2)\n" + "array([1., 2., 3.])") +ufunc_pdtrik_loops[0] = loop_d_dd__As_ff_f +ufunc_pdtrik_loops[1] = loop_d_dd__As_dd_d +ufunc_pdtrik_types[0] = NPY_FLOAT +ufunc_pdtrik_types[1] = NPY_FLOAT +ufunc_pdtrik_types[2] = NPY_FLOAT +ufunc_pdtrik_types[3] = NPY_DOUBLE +ufunc_pdtrik_types[4] = NPY_DOUBLE +ufunc_pdtrik_types[5] = NPY_DOUBLE +ufunc_pdtrik_ptr[2*0] = _func_pdtrik +ufunc_pdtrik_ptr[2*0+1] = ("pdtrik") +ufunc_pdtrik_ptr[2*1] = _func_pdtrik +ufunc_pdtrik_ptr[2*1+1] = ("pdtrik") +ufunc_pdtrik_data[0] = &ufunc_pdtrik_ptr[2*0] +ufunc_pdtrik_data[1] = &ufunc_pdtrik_ptr[2*1] +pdtrik = np.PyUFunc_FromFuncAndData(ufunc_pdtrik_loops, ufunc_pdtrik_data, ufunc_pdtrik_types, 2, 2, 1, 0, "pdtrik", ufunc_pdtrik_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_poch_loops[2] +cdef void *ufunc_poch_ptr[4] +cdef void *ufunc_poch_data[2] +cdef char ufunc_poch_types[6] +cdef char *ufunc_poch_doc = ( + "poch(z, m, out=None)\n" + "\n" + "Pochhammer symbol.\n" + "\n" + "The Pochhammer symbol (rising factorial) is defined as\n" + "\n" + ".. math::\n" + "\n" + " (z)_m = \\frac{\\Gamma(z + m)}{\\Gamma(z)}\n" + "\n" + "For positive integer `m` it reads\n" + "\n" + ".. math::\n" + "\n" + " (z)_m = z (z + 1) ... (z + m - 1)\n" + "\n" + "See [dlmf]_ for more details.\n" + "\n" + "Parameters\n" + "----------\n" + "z, m : array_like\n" + " Real-valued arguments.\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " The value of the function.\n" + "\n" + "References\n" + "----------\n" + ".. [dlmf] Nist, Digital Library of Mathematical Functions\n" + " https://dlmf.nist.gov/5.2#iii\n" + "\n" + "Examples\n" + "--------\n" + ">>> import scipy.special as sc\n" + "\n" + "It is 1 when m is 0.\n" + "\n" + ">>> sc.poch([1, 2, 3, 4], 0)\n" + "array([1., 1., 1., 1.])\n" + "\n" + "For z equal to 1 it reduces to the factorial function.\n" + "\n" + ">>> sc.poch(1, 5)\n" + "120.0\n" + ">>> 1 * 2 * 3 * 4 * 5\n" + "120\n" + "\n" + "It can be expressed in terms of the gamma function.\n" + "\n" + ">>> z, m = 3.7, 2.1\n" + ">>> sc.poch(z, m)\n" + "20.529581933776953\n" + ">>> sc.gamma(z + m) / sc.gamma(z)\n" + "20.52958193377696") +ufunc_poch_loops[0] = loop_d_dd__As_ff_f +ufunc_poch_loops[1] = loop_d_dd__As_dd_d +ufunc_poch_types[0] = NPY_FLOAT +ufunc_poch_types[1] = NPY_FLOAT +ufunc_poch_types[2] = NPY_FLOAT +ufunc_poch_types[3] = NPY_DOUBLE +ufunc_poch_types[4] = NPY_DOUBLE +ufunc_poch_types[5] = NPY_DOUBLE +ufunc_poch_ptr[2*0] = _func_cephes_poch +ufunc_poch_ptr[2*0+1] = ("poch") +ufunc_poch_ptr[2*1] = _func_cephes_poch +ufunc_poch_ptr[2*1+1] = ("poch") +ufunc_poch_data[0] = &ufunc_poch_ptr[2*0] +ufunc_poch_data[1] = &ufunc_poch_ptr[2*1] +poch = np.PyUFunc_FromFuncAndData(ufunc_poch_loops, ufunc_poch_data, ufunc_poch_types, 2, 2, 1, 0, "poch", ufunc_poch_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_powm1_loops[2] +cdef void *ufunc_powm1_ptr[4] +cdef void *ufunc_powm1_data[2] +cdef char ufunc_powm1_types[6] +cdef char *ufunc_powm1_doc = ( + "powm1(x, y, out=None)\n" + "\n" + "Computes ``x**y - 1``.\n" + "\n" + "This function is useful when `y` is near 0, or when `x` is near 1.\n" + "\n" + "The function is implemented for real types only (unlike ``numpy.power``,\n" + "which accepts complex inputs).\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " The base. Must be a real type (i.e. integer or float, not complex).\n" + "y : array_like\n" + " The exponent. Must be a real type (i.e. integer or float, not complex).\n" + "\n" + "Returns\n" + "-------\n" + "array_like\n" + " Result of the calculation\n" + "\n" + "Notes\n" + "-----\n" + ".. versionadded:: 1.10.0\n" + "\n" + "The underlying code is implemented for single precision and double\n" + "precision floats only. Unlike `numpy.power`, integer inputs to\n" + "`powm1` are converted to floating point, and complex inputs are\n" + "not accepted.\n" + "\n" + "Note the following edge cases:\n" + "\n" + "* ``powm1(x, 0)`` returns 0 for any ``x``, including 0, ``inf``\n" + " and ``nan``.\n" + "* ``powm1(1, y)`` returns 0 for any ``y``, including ``nan``\n" + " and ``inf``.\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> from scipy.special import powm1\n" + "\n" + ">>> x = np.array([1.2, 10.0, 0.9999999975])\n" + ">>> y = np.array([1e-9, 1e-11, 0.1875])\n" + ">>> powm1(x, y)\n" + "array([ 1.82321557e-10, 2.30258509e-11, -4.68749998e-10])\n" + "\n" + "It can be verified that the relative errors in those results\n" + "are less than 2.5e-16.\n" + "\n" + "Compare that to the result of ``x**y - 1``, where the\n" + "relative errors are all larger than 8e-8:\n" + "\n" + ">>> x**y - 1\n" + "array([ 1.82321491e-10, 2.30258035e-11, -4.68750039e-10])") +ufunc_powm1_loops[0] = loop_f_ff__As_ff_f +ufunc_powm1_loops[1] = loop_d_dd__As_dd_d +ufunc_powm1_types[0] = NPY_FLOAT +ufunc_powm1_types[1] = NPY_FLOAT +ufunc_powm1_types[2] = NPY_FLOAT +ufunc_powm1_types[3] = NPY_DOUBLE +ufunc_powm1_types[4] = NPY_DOUBLE +ufunc_powm1_types[5] = NPY_DOUBLE +ufunc_powm1_ptr[2*0] = scipy.special._ufuncs_cxx._export_powm1_float +ufunc_powm1_ptr[2*0+1] = ("powm1") +ufunc_powm1_ptr[2*1] = scipy.special._ufuncs_cxx._export_powm1_double +ufunc_powm1_ptr[2*1+1] = ("powm1") +ufunc_powm1_data[0] = &ufunc_powm1_ptr[2*0] +ufunc_powm1_data[1] = &ufunc_powm1_ptr[2*1] +powm1 = np.PyUFunc_FromFuncAndData(ufunc_powm1_loops, ufunc_powm1_data, ufunc_powm1_types, 2, 2, 1, 0, "powm1", ufunc_powm1_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_pseudo_huber_loops[2] +cdef void *ufunc_pseudo_huber_ptr[4] +cdef void *ufunc_pseudo_huber_data[2] +cdef char ufunc_pseudo_huber_types[6] +cdef char *ufunc_pseudo_huber_doc = ( + "pseudo_huber(delta, r, out=None)\n" + "\n" + "Pseudo-Huber loss function.\n" + "\n" + ".. math:: \\mathrm{pseudo\\_huber}(\\delta, r) =\n" + " \\delta^2 \\left( \\sqrt{ 1 + \\left( \\frac{r}{\\delta} \\right)^2 } - 1 \\right)\n" + "\n" + "Parameters\n" + "----------\n" + "delta : array_like\n" + " Input array, indicating the soft quadratic vs. linear loss changepoint.\n" + "r : array_like\n" + " Input array, possibly representing residuals.\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "res : scalar or ndarray\n" + " The computed Pseudo-Huber loss function values.\n" + "\n" + "See Also\n" + "--------\n" + "huber: Similar function which this function approximates\n" + "\n" + "Notes\n" + "-----\n" + "Like `huber`, `pseudo_huber` often serves as a robust loss function\n" + "in statistics or machine learning to reduce the influence of outliers.\n" + "Unlike `huber`, `pseudo_huber` is smooth.\n" + "\n" + "Typically, `r` represents residuals, the difference\n" + "between a model prediction and data. Then, for :math:`|r|\\leq\\delta`,\n" + "`pseudo_huber` resembles the squared error and for :math:`|r|>\\delta` the\n" + "absolute error. This way, the Pseudo-Huber loss often achieves\n" + "a fast convergence in model fitting for small residuals like the squared\n" + "error loss function and still reduces the influence of outliers\n" + "(:math:`|r|>\\delta`) like the absolute error loss. As :math:`\\delta` is\n" + "the cutoff between squared and absolute error regimes, it has\n" + "to be tuned carefully for each problem. `pseudo_huber` is also\n" + "convex, making it suitable for gradient based optimization. [1]_ [2]_\n" + "\n" + ".. versionadded:: 0.15.0\n" + "\n" + "References\n" + "----------\n" + ".. [1] Hartley, Zisserman, \"Multiple View Geometry in Computer Vision\".\n" + " 2003. Cambridge University Press. p. 619\n" + ".. [2] Charbonnier et al. \"Deterministic edge-preserving regularization\n" + " in computed imaging\". 1997. IEEE Trans. Image Processing.\n" + " 6 (2): 298 - 311.\n" + "\n" + "Examples\n" + "--------\n" + "Import all necessary modules.\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import pseudo_huber, huber\n" + ">>> import matplotlib.pyplot as plt\n" + "\n" + "Calculate the function for ``delta=1`` at ``r=2``.\n" + "\n" + ">>> pseudo_huber(1., 2.)\n" + "1.2360679774997898\n" + "\n" + "Calculate the function at ``r=2`` for different `delta` by providing\n" + "a list or NumPy array for `delta`.\n" + "\n" + ">>> pseudo_huber([1., 2., 4.], 3.)\n" + "array([2.16227766, 3.21110255, 4. ])\n" + "\n" + "Calculate the function for ``delta=1`` at several points by providing\n" + "a list or NumPy array for `r`.\n" + "\n" + ">>> pseudo_huber(2., np.array([1., 1.5, 3., 4.]))\n" + "array([0.47213595, 1. , 3.21110255, 4.94427191])\n" + "\n" + "The function can be calculated for different `delta` and `r` by\n" + "providing arrays for both with compatible shapes for broadcasting.\n" + "\n" + ">>> r = np.array([1., 2.5, 8., 10.])\n" + ">>> deltas = np.array([[1.], [5.], [9.]])\n" + ">>> print(r.shape, deltas.shape)\n" + "(4,) (3, 1)\n" + "\n" + ">>> pseudo_huber(deltas, r)\n" + "array([[ 0.41421356, 1.6925824 , 7.06225775, 9.04987562],\n" + " [ 0.49509757, 2.95084972, 22.16990566, 30.90169944],\n" + " [ 0.49846624, 3.06693762, 27.37435121, 40.08261642]])\n" + "\n" + "Plot the function for different `delta`.\n" + "\n" + ">>> x = np.linspace(-4, 4, 500)\n" + ">>> deltas = [1, 2, 3]\n" + ">>> linestyles = [\"dashed\", \"dotted\", \"dashdot\"]\n" + ">>> fig, ax = plt.subplots()\n" + ">>> combined_plot_parameters = list(zip(deltas, linestyles))\n" + ">>> for delta, style in combined_plot_parameters:\n" + "... ax.plot(x, pseudo_huber(delta, x), label=rf\"$\\delta={delta}$\",\n" + "... ls=style)\n" + ">>> ax.legend(loc=\"upper center\")\n" + ">>> ax.set_xlabel(\"$x$\")\n" + ">>> ax.set_title(r\"Pseudo-Huber loss function $h_{\\delta}(x)$\")\n" + ">>> ax.set_xlim(-4, 4)\n" + ">>> ax.set_ylim(0, 8)\n" + ">>> plt.show()\n" + "\n" + "Finally, illustrate the difference between `huber` and `pseudo_huber` by\n" + "plotting them and their gradients with respect to `r`. The plot shows\n" + "that `pseudo_huber` is continuously differentiable while `huber` is not\n" + "at the points :math:`\\pm\\delta`.\n" + "\n" + ">>> def huber_grad(delta, x):\n" + "... grad = np.copy(x)\n" + "... linear_area = np.argwhere(np.abs(x) > delta)\n" + "... grad[linear_area]=delta*np.sign(x[linear_area])\n" + "... return grad\n" + ">>> def pseudo_huber_grad(delta, x):\n" + "... return x* (1+(x/delta)**2)**(-0.5)\n" + ">>> x=np.linspace(-3, 3, 500)\n" + ">>> delta = 1.\n" + ">>> fig, ax = plt.subplots(figsize=(7, 7))\n" + ">>> ax.plot(x, huber(delta, x), label=\"Huber\", ls=\"dashed\")\n" + ">>> ax.plot(x, huber_grad(delta, x), label=\"Huber Gradient\", ls=\"dashdot\")\n" + ">>> ax.plot(x, pseudo_huber(delta, x), label=\"Pseudo-Huber\", ls=\"dotted\")\n" + ">>> ax.plot(x, pseudo_huber_grad(delta, x), label=\"Pseudo-Huber Gradient\",\n" + "... ls=\"solid\")\n" + ">>> ax.legend(loc=\"upper center\")\n" + ">>> plt.show()") +ufunc_pseudo_huber_loops[0] = loop_d_dd__As_ff_f +ufunc_pseudo_huber_loops[1] = loop_d_dd__As_dd_d +ufunc_pseudo_huber_types[0] = NPY_FLOAT +ufunc_pseudo_huber_types[1] = NPY_FLOAT +ufunc_pseudo_huber_types[2] = NPY_FLOAT +ufunc_pseudo_huber_types[3] = NPY_DOUBLE +ufunc_pseudo_huber_types[4] = NPY_DOUBLE +ufunc_pseudo_huber_types[5] = NPY_DOUBLE +ufunc_pseudo_huber_ptr[2*0] = _func_pseudo_huber +ufunc_pseudo_huber_ptr[2*0+1] = ("pseudo_huber") +ufunc_pseudo_huber_ptr[2*1] = _func_pseudo_huber +ufunc_pseudo_huber_ptr[2*1+1] = ("pseudo_huber") +ufunc_pseudo_huber_data[0] = &ufunc_pseudo_huber_ptr[2*0] +ufunc_pseudo_huber_data[1] = &ufunc_pseudo_huber_ptr[2*1] +pseudo_huber = np.PyUFunc_FromFuncAndData(ufunc_pseudo_huber_loops, ufunc_pseudo_huber_data, ufunc_pseudo_huber_types, 2, 2, 1, 0, "pseudo_huber", ufunc_pseudo_huber_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_radian_loops[2] +cdef void *ufunc_radian_ptr[4] +cdef void *ufunc_radian_data[2] +cdef char ufunc_radian_types[8] +cdef char *ufunc_radian_doc = ( + "radian(d, m, s, out=None)\n" + "\n" + "Convert from degrees to radians.\n" + "\n" + "Returns the angle given in (d)egrees, (m)inutes, and (s)econds in\n" + "radians.\n" + "\n" + "Parameters\n" + "----------\n" + "d : array_like\n" + " Degrees, can be real-valued.\n" + "m : array_like\n" + " Minutes, can be real-valued.\n" + "s : array_like\n" + " Seconds, can be real-valued.\n" + "out : ndarray, optional\n" + " Optional output array for the function results.\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the inputs in radians.\n" + "\n" + "Examples\n" + "--------\n" + ">>> import scipy.special as sc\n" + "\n" + "There are many ways to specify an angle.\n" + "\n" + ">>> sc.radian(90, 0, 0)\n" + "1.5707963267948966\n" + ">>> sc.radian(0, 60 * 90, 0)\n" + "1.5707963267948966\n" + ">>> sc.radian(0, 0, 60**2 * 90)\n" + "1.5707963267948966\n" + "\n" + "The inputs can be real-valued.\n" + "\n" + ">>> sc.radian(1.5, 0, 0)\n" + "0.02617993877991494\n" + ">>> sc.radian(1, 30, 0)\n" + "0.02617993877991494") +ufunc_radian_loops[0] = loop_d_ddd__As_fff_f +ufunc_radian_loops[1] = loop_d_ddd__As_ddd_d +ufunc_radian_types[0] = NPY_FLOAT +ufunc_radian_types[1] = NPY_FLOAT +ufunc_radian_types[2] = NPY_FLOAT +ufunc_radian_types[3] = NPY_FLOAT +ufunc_radian_types[4] = NPY_DOUBLE +ufunc_radian_types[5] = NPY_DOUBLE +ufunc_radian_types[6] = NPY_DOUBLE +ufunc_radian_types[7] = NPY_DOUBLE +ufunc_radian_ptr[2*0] = _func_cephes_radian +ufunc_radian_ptr[2*0+1] = ("radian") +ufunc_radian_ptr[2*1] = _func_cephes_radian +ufunc_radian_ptr[2*1+1] = ("radian") +ufunc_radian_data[0] = &ufunc_radian_ptr[2*0] +ufunc_radian_data[1] = &ufunc_radian_ptr[2*1] +radian = np.PyUFunc_FromFuncAndData(ufunc_radian_loops, ufunc_radian_data, ufunc_radian_types, 2, 3, 1, 0, "radian", ufunc_radian_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_rel_entr_loops[2] +cdef void *ufunc_rel_entr_ptr[4] +cdef void *ufunc_rel_entr_data[2] +cdef char ufunc_rel_entr_types[6] +cdef char *ufunc_rel_entr_doc = ( + "rel_entr(x, y, out=None)\n" + "\n" + "Elementwise function for computing relative entropy.\n" + "\n" + ".. math::\n" + "\n" + " \\mathrm{rel\\_entr}(x, y) =\n" + " \\begin{cases}\n" + " x \\log(x / y) & x > 0, y > 0 \\\\\n" + " 0 & x = 0, y \\ge 0 \\\\\n" + " \\infty & \\text{otherwise}\n" + " \\end{cases}\n" + "\n" + "Parameters\n" + "----------\n" + "x, y : array_like\n" + " Input arrays\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Relative entropy of the inputs\n" + "\n" + "See Also\n" + "--------\n" + "entr, kl_div, scipy.stats.entropy\n" + "\n" + "Notes\n" + "-----\n" + ".. versionadded:: 0.15.0\n" + "\n" + "This function is jointly convex in x and y.\n" + "\n" + "The origin of this function is in convex programming; see\n" + "[1]_. Given two discrete probability distributions :math:`p_1,\n" + "\\ldots, p_n` and :math:`q_1, \\ldots, q_n`, the definition of relative\n" + "entropy in the context of *information theory* is\n" + "\n" + ".. math::\n" + "\n" + " \\sum_{i = 1}^n \\mathrm{rel\\_entr}(p_i, q_i).\n" + "\n" + "To compute the latter quantity, use `scipy.stats.entropy`.\n" + "\n" + "See [2]_ for details.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Boyd, Stephen and Lieven Vandenberghe. *Convex optimization*.\n" + " Cambridge University Press, 2004.\n" + " :doi:`https://doi.org/10.1017/CBO9780511804441`\n" + ".. [2] Kullback-Leibler divergence,\n" + " https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence") +ufunc_rel_entr_loops[0] = loop_d_dd__As_ff_f +ufunc_rel_entr_loops[1] = loop_d_dd__As_dd_d +ufunc_rel_entr_types[0] = NPY_FLOAT +ufunc_rel_entr_types[1] = NPY_FLOAT +ufunc_rel_entr_types[2] = NPY_FLOAT +ufunc_rel_entr_types[3] = NPY_DOUBLE +ufunc_rel_entr_types[4] = NPY_DOUBLE +ufunc_rel_entr_types[5] = NPY_DOUBLE +ufunc_rel_entr_ptr[2*0] = _func_rel_entr +ufunc_rel_entr_ptr[2*0+1] = ("rel_entr") +ufunc_rel_entr_ptr[2*1] = _func_rel_entr +ufunc_rel_entr_ptr[2*1+1] = ("rel_entr") +ufunc_rel_entr_data[0] = &ufunc_rel_entr_ptr[2*0] +ufunc_rel_entr_data[1] = &ufunc_rel_entr_ptr[2*1] +rel_entr = np.PyUFunc_FromFuncAndData(ufunc_rel_entr_loops, ufunc_rel_entr_data, ufunc_rel_entr_types, 2, 2, 1, 0, "rel_entr", ufunc_rel_entr_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_round_loops[2] +cdef void *ufunc_round_ptr[4] +cdef void *ufunc_round_data[2] +cdef char ufunc_round_types[4] +cdef char *ufunc_round_doc = ( + "round(x, out=None)\n" + "\n" + "Round to the nearest integer.\n" + "\n" + "Returns the nearest integer to `x`. If `x` ends in 0.5 exactly,\n" + "the nearest even integer is chosen.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real valued input.\n" + "out : ndarray, optional\n" + " Optional output array for the function results.\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " The nearest integers to the elements of `x`. The result is of\n" + " floating type, not integer type.\n" + "\n" + "Examples\n" + "--------\n" + ">>> import scipy.special as sc\n" + "\n" + "It rounds to even.\n" + "\n" + ">>> sc.round([0.5, 1.5])\n" + "array([0., 2.])") +ufunc_round_loops[0] = loop_d_d__As_f_f +ufunc_round_loops[1] = loop_d_d__As_d_d +ufunc_round_types[0] = NPY_FLOAT +ufunc_round_types[1] = NPY_FLOAT +ufunc_round_types[2] = NPY_DOUBLE +ufunc_round_types[3] = NPY_DOUBLE +ufunc_round_ptr[2*0] = _func_cephes_round +ufunc_round_ptr[2*0+1] = ("round") +ufunc_round_ptr[2*1] = _func_cephes_round +ufunc_round_ptr[2*1+1] = ("round") +ufunc_round_data[0] = &ufunc_round_ptr[2*0] +ufunc_round_data[1] = &ufunc_round_ptr[2*1] +round = np.PyUFunc_FromFuncAndData(ufunc_round_loops, ufunc_round_data, ufunc_round_types, 2, 1, 1, 0, "round", ufunc_round_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_shichi_loops[4] +cdef void *ufunc_shichi_ptr[8] +cdef void *ufunc_shichi_data[4] +cdef char ufunc_shichi_types[12] +cdef char *ufunc_shichi_doc = ( + "shichi(x, out=None)\n" + "\n" + "Hyperbolic sine and cosine integrals.\n" + "\n" + "The hyperbolic sine integral is\n" + "\n" + ".. math::\n" + "\n" + " \\int_0^x \\frac{\\sinh{t}}{t}dt\n" + "\n" + "and the hyperbolic cosine integral is\n" + "\n" + ".. math::\n" + "\n" + " \\gamma + \\log(x) + \\int_0^x \\frac{\\cosh{t} - 1}{t} dt\n" + "\n" + "where :math:`\\gamma` is Euler's constant and :math:`\\log` is the\n" + "principal branch of the logarithm [1]_.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real or complex points at which to compute the hyperbolic sine\n" + " and cosine integrals.\n" + "out : tuple of ndarray, optional\n" + " Optional output arrays for the function results\n" + "\n" + "Returns\n" + "-------\n" + "si : scalar or ndarray\n" + " Hyperbolic sine integral at ``x``\n" + "ci : scalar or ndarray\n" + " Hyperbolic cosine integral at ``x``\n" + "\n" + "See Also\n" + "--------\n" + "sici : Sine and cosine integrals.\n" + "exp1 : Exponential integral E1.\n" + "expi : Exponential integral Ei.\n" + "\n" + "Notes\n" + "-----\n" + "For real arguments with ``x < 0``, ``chi`` is the real part of the\n" + "hyperbolic cosine integral. For such points ``chi(x)`` and ``chi(x\n" + "+ 0j)`` differ by a factor of ``1j*pi``.\n" + "\n" + "For real arguments the function is computed by calling Cephes'\n" + "[2]_ *shichi* routine. For complex arguments the algorithm is based\n" + "on Mpmath's [3]_ *shi* and *chi* routines.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.\n" + " (See Section 5.2.)\n" + ".. [2] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + ".. [3] Fredrik Johansson and others.\n" + " \"mpmath: a Python library for arbitrary-precision floating-point\n" + " arithmetic\" (Version 0.19) http://mpmath.org/\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> from scipy.special import shichi, sici\n" + "\n" + "`shichi` accepts real or complex input:\n" + "\n" + ">>> shichi(0.5)\n" + "(0.5069967498196671, -0.05277684495649357)\n" + ">>> shichi(0.5 + 2.5j)\n" + "((0.11772029666668238+1.831091777729851j),\n" + " (0.29912435887648825+1.7395351121166562j))\n" + "\n" + "The hyperbolic sine and cosine integrals Shi(z) and Chi(z) are\n" + "related to the sine and cosine integrals Si(z) and Ci(z) by\n" + "\n" + "* Shi(z) = -i*Si(i*z)\n" + "* Chi(z) = Ci(-i*z) + i*pi/2\n" + "\n" + ">>> z = 0.25 + 5j\n" + ">>> shi, chi = shichi(z)\n" + ">>> shi, -1j*sici(1j*z)[0] # Should be the same.\n" + "((-0.04834719325101729+1.5469354086921228j),\n" + " (-0.04834719325101729+1.5469354086921228j))\n" + ">>> chi, sici(-1j*z)[1] + 1j*np.pi/2 # Should be the same.\n" + "((-0.19568708973868087+1.556276312103824j),\n" + " (-0.19568708973868087+1.556276312103824j))\n" + "\n" + "Plot the functions evaluated on the real axis:\n" + "\n" + ">>> xp = np.geomspace(1e-8, 4.0, 250)\n" + ">>> x = np.concatenate((-xp[::-1], xp))\n" + ">>> shi, chi = shichi(x)\n" + "\n" + ">>> fig, ax = plt.subplots()\n" + ">>> ax.plot(x, shi, label='Shi(x)')\n" + ">>> ax.plot(x, chi, '--', label='Chi(x)')\n" + ">>> ax.set_xlabel('x')\n" + ">>> ax.set_title('Hyperbolic Sine and Cosine Integrals')\n" + ">>> ax.legend(shadow=True, framealpha=1, loc='lower right')\n" + ">>> ax.grid(True)\n" + ">>> plt.show()") +ufunc_shichi_loops[0] = loop_i_d_dd_As_f_ff +ufunc_shichi_loops[1] = loop_i_d_dd_As_d_dd +ufunc_shichi_loops[2] = loop_i_D_DD_As_F_FF +ufunc_shichi_loops[3] = loop_i_D_DD_As_D_DD +ufunc_shichi_types[0] = NPY_FLOAT +ufunc_shichi_types[1] = NPY_FLOAT +ufunc_shichi_types[2] = NPY_FLOAT +ufunc_shichi_types[3] = NPY_DOUBLE +ufunc_shichi_types[4] = NPY_DOUBLE +ufunc_shichi_types[5] = NPY_DOUBLE +ufunc_shichi_types[6] = NPY_CFLOAT +ufunc_shichi_types[7] = NPY_CFLOAT +ufunc_shichi_types[8] = NPY_CFLOAT +ufunc_shichi_types[9] = NPY_CDOUBLE +ufunc_shichi_types[10] = NPY_CDOUBLE +ufunc_shichi_types[11] = NPY_CDOUBLE +ufunc_shichi_ptr[2*0] = _func_cephes_shichi_wrap +ufunc_shichi_ptr[2*0+1] = ("shichi") +ufunc_shichi_ptr[2*1] = _func_cephes_shichi_wrap +ufunc_shichi_ptr[2*1+1] = ("shichi") +ufunc_shichi_ptr[2*2] = _func_cshichi +ufunc_shichi_ptr[2*2+1] = ("shichi") +ufunc_shichi_ptr[2*3] = _func_cshichi +ufunc_shichi_ptr[2*3+1] = ("shichi") +ufunc_shichi_data[0] = &ufunc_shichi_ptr[2*0] +ufunc_shichi_data[1] = &ufunc_shichi_ptr[2*1] +ufunc_shichi_data[2] = &ufunc_shichi_ptr[2*2] +ufunc_shichi_data[3] = &ufunc_shichi_ptr[2*3] +shichi = np.PyUFunc_FromFuncAndData(ufunc_shichi_loops, ufunc_shichi_data, ufunc_shichi_types, 4, 1, 2, 0, "shichi", ufunc_shichi_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_sici_loops[4] +cdef void *ufunc_sici_ptr[8] +cdef void *ufunc_sici_data[4] +cdef char ufunc_sici_types[12] +cdef char *ufunc_sici_doc = ( + "sici(x, out=None)\n" + "\n" + "Sine and cosine integrals.\n" + "\n" + "The sine integral is\n" + "\n" + ".. math::\n" + "\n" + " \\int_0^x \\frac{\\sin{t}}{t}dt\n" + "\n" + "and the cosine integral is\n" + "\n" + ".. math::\n" + "\n" + " \\gamma + \\log(x) + \\int_0^x \\frac{\\cos{t} - 1}{t}dt\n" + "\n" + "where :math:`\\gamma` is Euler's constant and :math:`\\log` is the\n" + "principal branch of the logarithm [1]_.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real or complex points at which to compute the sine and cosine\n" + " integrals.\n" + "out : tuple of ndarray, optional\n" + " Optional output arrays for the function results\n" + "\n" + "Returns\n" + "-------\n" + "si : scalar or ndarray\n" + " Sine integral at ``x``\n" + "ci : scalar or ndarray\n" + " Cosine integral at ``x``\n" + "\n" + "See Also\n" + "--------\n" + "shichi : Hyperbolic sine and cosine integrals.\n" + "exp1 : Exponential integral E1.\n" + "expi : Exponential integral Ei.\n" + "\n" + "Notes\n" + "-----\n" + "For real arguments with ``x < 0``, ``ci`` is the real part of the\n" + "cosine integral. For such points ``ci(x)`` and ``ci(x + 0j)``\n" + "differ by a factor of ``1j*pi``.\n" + "\n" + "For real arguments the function is computed by calling Cephes'\n" + "[2]_ *sici* routine. For complex arguments the algorithm is based\n" + "on Mpmath's [3]_ *si* and *ci* routines.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.\n" + " (See Section 5.2.)\n" + ".. [2] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + ".. [3] Fredrik Johansson and others.\n" + " \"mpmath: a Python library for arbitrary-precision floating-point\n" + " arithmetic\" (Version 0.19) http://mpmath.org/\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> from scipy.special import sici, exp1\n" + "\n" + "`sici` accepts real or complex input:\n" + "\n" + ">>> sici(2.5)\n" + "(1.7785201734438267, 0.2858711963653835)\n" + ">>> sici(2.5 + 3j)\n" + "((4.505735874563953+0.06863305018999577j),\n" + "(0.0793644206906966-2.935510262937543j))\n" + "\n" + "For z in the right half plane, the sine and cosine integrals are\n" + "related to the exponential integral E1 (implemented in SciPy as\n" + "`scipy.special.exp1`) by\n" + "\n" + "* Si(z) = (E1(i*z) - E1(-i*z))/2i + pi/2\n" + "* Ci(z) = -(E1(i*z) + E1(-i*z))/2\n" + "\n" + "See [1]_ (equations 5.2.21 and 5.2.23).\n" + "\n" + "We can verify these relations:\n" + "\n" + ">>> z = 2 - 3j\n" + ">>> sici(z)\n" + "((4.54751388956229-1.3991965806460565j),\n" + "(1.408292501520851+2.9836177420296055j))\n" + "\n" + ">>> (exp1(1j*z) - exp1(-1j*z))/2j + np.pi/2 # Same as sine integral\n" + "(4.54751388956229-1.3991965806460565j)\n" + "\n" + ">>> -(exp1(1j*z) + exp1(-1j*z))/2 # Same as cosine integral\n" + "(1.408292501520851+2.9836177420296055j)\n" + "\n" + "Plot the functions evaluated on the real axis; the dotted horizontal\n" + "lines are at pi/2 and -pi/2:\n" + "\n" + ">>> x = np.linspace(-16, 16, 150)\n" + ">>> si, ci = sici(x)\n" + "\n" + ">>> fig, ax = plt.subplots()\n" + ">>> ax.plot(x, si, label='Si(x)')\n" + ">>> ax.plot(x, ci, '--', label='Ci(x)')\n" + ">>> ax.legend(shadow=True, framealpha=1, loc='upper left')\n" + ">>> ax.set_xlabel('x')\n" + ">>> ax.set_title('Sine and Cosine Integrals')\n" + ">>> ax.axhline(np.pi/2, linestyle=':', alpha=0.5, color='k')\n" + ">>> ax.axhline(-np.pi/2, linestyle=':', alpha=0.5, color='k')\n" + ">>> ax.grid(True)\n" + ">>> plt.show()") +ufunc_sici_loops[0] = loop_i_d_dd_As_f_ff +ufunc_sici_loops[1] = loop_i_d_dd_As_d_dd +ufunc_sici_loops[2] = loop_i_D_DD_As_F_FF +ufunc_sici_loops[3] = loop_i_D_DD_As_D_DD +ufunc_sici_types[0] = NPY_FLOAT +ufunc_sici_types[1] = NPY_FLOAT +ufunc_sici_types[2] = NPY_FLOAT +ufunc_sici_types[3] = NPY_DOUBLE +ufunc_sici_types[4] = NPY_DOUBLE +ufunc_sici_types[5] = NPY_DOUBLE +ufunc_sici_types[6] = NPY_CFLOAT +ufunc_sici_types[7] = NPY_CFLOAT +ufunc_sici_types[8] = NPY_CFLOAT +ufunc_sici_types[9] = NPY_CDOUBLE +ufunc_sici_types[10] = NPY_CDOUBLE +ufunc_sici_types[11] = NPY_CDOUBLE +ufunc_sici_ptr[2*0] = _func_cephes_sici_wrap +ufunc_sici_ptr[2*0+1] = ("sici") +ufunc_sici_ptr[2*1] = _func_cephes_sici_wrap +ufunc_sici_ptr[2*1+1] = ("sici") +ufunc_sici_ptr[2*2] = _func_csici +ufunc_sici_ptr[2*2+1] = ("sici") +ufunc_sici_ptr[2*3] = _func_csici +ufunc_sici_ptr[2*3+1] = ("sici") +ufunc_sici_data[0] = &ufunc_sici_ptr[2*0] +ufunc_sici_data[1] = &ufunc_sici_ptr[2*1] +ufunc_sici_data[2] = &ufunc_sici_ptr[2*2] +ufunc_sici_data[3] = &ufunc_sici_ptr[2*3] +sici = np.PyUFunc_FromFuncAndData(ufunc_sici_loops, ufunc_sici_data, ufunc_sici_types, 4, 1, 2, 0, "sici", ufunc_sici_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_sindg_loops[2] +cdef void *ufunc_sindg_ptr[4] +cdef void *ufunc_sindg_data[2] +cdef char ufunc_sindg_types[4] +cdef char *ufunc_sindg_doc = ( + "sindg(x, out=None)\n" + "\n" + "Sine of the angle `x` given in degrees.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Angle, given in degrees.\n" + "out : ndarray, optional\n" + " Optional output array for the function results.\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Sine at the input.\n" + "\n" + "See Also\n" + "--------\n" + "cosdg, tandg, cotdg\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import scipy.special as sc\n" + "\n" + "It is more accurate than using sine directly.\n" + "\n" + ">>> x = 180 * np.arange(3)\n" + ">>> sc.sindg(x)\n" + "array([ 0., -0., 0.])\n" + ">>> np.sin(x * np.pi / 180)\n" + "array([ 0.0000000e+00, 1.2246468e-16, -2.4492936e-16])") +ufunc_sindg_loops[0] = loop_d_d__As_f_f +ufunc_sindg_loops[1] = loop_d_d__As_d_d +ufunc_sindg_types[0] = NPY_FLOAT +ufunc_sindg_types[1] = NPY_FLOAT +ufunc_sindg_types[2] = NPY_DOUBLE +ufunc_sindg_types[3] = NPY_DOUBLE +ufunc_sindg_ptr[2*0] = _func_cephes_sindg +ufunc_sindg_ptr[2*0+1] = ("sindg") +ufunc_sindg_ptr[2*1] = _func_cephes_sindg +ufunc_sindg_ptr[2*1+1] = ("sindg") +ufunc_sindg_data[0] = &ufunc_sindg_ptr[2*0] +ufunc_sindg_data[1] = &ufunc_sindg_ptr[2*1] +sindg = np.PyUFunc_FromFuncAndData(ufunc_sindg_loops, ufunc_sindg_data, ufunc_sindg_types, 2, 1, 1, 0, "sindg", ufunc_sindg_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_smirnov_loops[3] +cdef void *ufunc_smirnov_ptr[6] +cdef void *ufunc_smirnov_data[3] +cdef char ufunc_smirnov_types[9] +cdef char *ufunc_smirnov_doc = ( + "smirnov(n, d, out=None)\n" + "\n" + "Kolmogorov-Smirnov complementary cumulative distribution function\n" + "\n" + "Returns the exact Kolmogorov-Smirnov complementary cumulative\n" + "distribution function,(aka the Survival Function) of Dn+ (or Dn-)\n" + "for a one-sided test of equality between an empirical and a\n" + "theoretical distribution. It is equal to the probability that the\n" + "maximum difference between a theoretical distribution and an empirical\n" + "one based on `n` samples is greater than d.\n" + "\n" + "Parameters\n" + "----------\n" + "n : int\n" + " Number of samples\n" + "d : float array_like\n" + " Deviation between the Empirical CDF (ECDF) and the target CDF.\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " The value(s) of smirnov(n, d), Prob(Dn+ >= d) (Also Prob(Dn- >= d))\n" + "\n" + "See Also\n" + "--------\n" + "smirnovi : The Inverse Survival Function for the distribution\n" + "scipy.stats.ksone : Provides the functionality as a continuous distribution\n" + "kolmogorov, kolmogi : Functions for the two-sided distribution\n" + "\n" + "Notes\n" + "-----\n" + "`smirnov` is used by `stats.kstest` in the application of the\n" + "Kolmogorov-Smirnov Goodness of Fit test. For historical reasons this\n" + "function is exposed in `scpy.special`, but the recommended way to achieve\n" + "the most accurate CDF/SF/PDF/PPF/ISF computations is to use the\n" + "`stats.ksone` distribution.\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> from scipy.special import smirnov\n" + ">>> from scipy.stats import norm\n" + "\n" + "Show the probability of a gap at least as big as 0, 0.5 and 1.0 for a\n" + "sample of size 5.\n" + "\n" + ">>> smirnov(5, [0, 0.5, 1.0])\n" + "array([ 1. , 0.056, 0. ])\n" + "\n" + "Compare a sample of size 5 against N(0, 1), the standard normal\n" + "distribution with mean 0 and standard deviation 1.\n" + "\n" + "`x` is the sample.\n" + "\n" + ">>> x = np.array([-1.392, -0.135, 0.114, 0.190, 1.82])\n" + "\n" + ">>> target = norm(0, 1)\n" + ">>> cdfs = target.cdf(x)\n" + ">>> cdfs\n" + "array([0.0819612 , 0.44630594, 0.5453811 , 0.57534543, 0.9656205 ])\n" + "\n" + "Construct the empirical CDF and the K-S statistics (Dn+, Dn-, Dn).\n" + "\n" + ">>> n = len(x)\n" + ">>> ecdfs = np.arange(n+1, dtype=float)/n\n" + ">>> cols = np.column_stack([x, ecdfs[1:], cdfs, cdfs - ecdfs[:n],\n" + "... ecdfs[1:] - cdfs])\n" + ">>> with np.printoptions(precision=3):\n" + "... print(cols)\n" + "[[-1.392 0.2 0.082 0.082 0.118]\n" + " [-0.135 0.4 0.446 0.246 -0.046]\n" + " [ 0.114 0.6 0.545 0.145 0.055]\n" + " [ 0.19 0.8 0.575 -0.025 0.225]\n" + " [ 1.82 1. 0.966 0.166 0.034]]\n" + ">>> gaps = cols[:, -2:]\n" + ">>> Dnpm = np.max(gaps, axis=0)\n" + ">>> print(f'Dn-={Dnpm[0]:f}, Dn+={Dnpm[1]:f}')\n" + "Dn-=0.246306, Dn+=0.224655\n" + ">>> probs = smirnov(n, Dnpm)\n" + ">>> print(f'For a sample of size {n} drawn from N(0, 1):',\n" + "... f' Smirnov n={n}: Prob(Dn- >= {Dnpm[0]:f}) = {probs[0]:.4f}',\n" + "... f' Smirnov n={n}: Prob(Dn+ >= {Dnpm[1]:f}) = {probs[1]:.4f}',\n" + "... sep='\\n')\n" + "For a sample of size 5 drawn from N(0, 1):\n" + " Smirnov n=5: Prob(Dn- >= 0.246306) = 0.4711\n" + " Smirnov n=5: Prob(Dn+ >= 0.224655) = 0.5245\n" + "\n" + "Plot the empirical CDF and the standard normal CDF.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> plt.step(np.concatenate(([-2.5], x, [2.5])),\n" + "... np.concatenate((ecdfs, [1])),\n" + "... where='post', label='Empirical CDF')\n" + ">>> xx = np.linspace(-2.5, 2.5, 100)\n" + ">>> plt.plot(xx, target.cdf(xx), '--', label='CDF for N(0, 1)')\n" + "\n" + "Add vertical lines marking Dn+ and Dn-.\n" + "\n" + ">>> iminus, iplus = np.argmax(gaps, axis=0)\n" + ">>> plt.vlines([x[iminus]], ecdfs[iminus], cdfs[iminus], color='r',\n" + "... alpha=0.5, lw=4)\n" + ">>> plt.vlines([x[iplus]], cdfs[iplus], ecdfs[iplus+1], color='m',\n" + "... alpha=0.5, lw=4)\n" + "\n" + ">>> plt.grid(True)\n" + ">>> plt.legend(framealpha=1, shadow=True)\n" + ">>> plt.show()") +ufunc_smirnov_loops[0] = loop_d_pd__As_pd_d +ufunc_smirnov_loops[1] = loop_d_dd__As_ff_f +ufunc_smirnov_loops[2] = loop_d_dd__As_dd_d +ufunc_smirnov_types[0] = NPY_INTP +ufunc_smirnov_types[1] = NPY_DOUBLE +ufunc_smirnov_types[2] = NPY_DOUBLE +ufunc_smirnov_types[3] = NPY_FLOAT +ufunc_smirnov_types[4] = NPY_FLOAT +ufunc_smirnov_types[5] = NPY_FLOAT +ufunc_smirnov_types[6] = NPY_DOUBLE +ufunc_smirnov_types[7] = NPY_DOUBLE +ufunc_smirnov_types[8] = NPY_DOUBLE +ufunc_smirnov_ptr[2*0] = _func_cephes_smirnov_wrap +ufunc_smirnov_ptr[2*0+1] = ("smirnov") +ufunc_smirnov_ptr[2*1] = _func_smirnov_unsafe +ufunc_smirnov_ptr[2*1+1] = ("smirnov") +ufunc_smirnov_ptr[2*2] = _func_smirnov_unsafe +ufunc_smirnov_ptr[2*2+1] = ("smirnov") +ufunc_smirnov_data[0] = &ufunc_smirnov_ptr[2*0] +ufunc_smirnov_data[1] = &ufunc_smirnov_ptr[2*1] +ufunc_smirnov_data[2] = &ufunc_smirnov_ptr[2*2] +smirnov = np.PyUFunc_FromFuncAndData(ufunc_smirnov_loops, ufunc_smirnov_data, ufunc_smirnov_types, 3, 2, 1, 0, "smirnov", ufunc_smirnov_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_smirnovi_loops[3] +cdef void *ufunc_smirnovi_ptr[6] +cdef void *ufunc_smirnovi_data[3] +cdef char ufunc_smirnovi_types[9] +cdef char *ufunc_smirnovi_doc = ( + "smirnovi(n, p, out=None)\n" + "\n" + "Inverse to `smirnov`\n" + "\n" + "Returns `d` such that ``smirnov(n, d) == p``, the critical value\n" + "corresponding to `p`.\n" + "\n" + "Parameters\n" + "----------\n" + "n : int\n" + " Number of samples\n" + "p : float array_like\n" + " Probability\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " The value(s) of smirnovi(n, p), the critical values.\n" + "\n" + "See Also\n" + "--------\n" + "smirnov : The Survival Function (SF) for the distribution\n" + "scipy.stats.ksone : Provides the functionality as a continuous distribution\n" + "kolmogorov, kolmogi : Functions for the two-sided distribution\n" + "scipy.stats.kstwobign : Two-sided Kolmogorov-Smirnov distribution, large n\n" + "\n" + "Notes\n" + "-----\n" + "`smirnov` is used by `stats.kstest` in the application of the\n" + "Kolmogorov-Smirnov Goodness of Fit test. For historical reasons this\n" + "function is exposed in `scpy.special`, but the recommended way to achieve\n" + "the most accurate CDF/SF/PDF/PPF/ISF computations is to use the\n" + "`stats.ksone` distribution.\n" + "\n" + "Examples\n" + "--------\n" + ">>> from scipy.special import smirnovi, smirnov\n" + "\n" + ">>> n = 24\n" + ">>> deviations = [0.1, 0.2, 0.3]\n" + "\n" + "Use `smirnov` to compute the complementary CDF of the Smirnov\n" + "distribution for the given number of samples and deviations.\n" + "\n" + ">>> p = smirnov(n, deviations)\n" + ">>> p\n" + "array([0.58105083, 0.12826832, 0.01032231])\n" + "\n" + "The inverse function ``smirnovi(n, p)`` returns ``deviations``.\n" + "\n" + ">>> smirnovi(n, p)\n" + "array([0.1, 0.2, 0.3])") +ufunc_smirnovi_loops[0] = loop_d_pd__As_pd_d +ufunc_smirnovi_loops[1] = loop_d_dd__As_ff_f +ufunc_smirnovi_loops[2] = loop_d_dd__As_dd_d +ufunc_smirnovi_types[0] = NPY_INTP +ufunc_smirnovi_types[1] = NPY_DOUBLE +ufunc_smirnovi_types[2] = NPY_DOUBLE +ufunc_smirnovi_types[3] = NPY_FLOAT +ufunc_smirnovi_types[4] = NPY_FLOAT +ufunc_smirnovi_types[5] = NPY_FLOAT +ufunc_smirnovi_types[6] = NPY_DOUBLE +ufunc_smirnovi_types[7] = NPY_DOUBLE +ufunc_smirnovi_types[8] = NPY_DOUBLE +ufunc_smirnovi_ptr[2*0] = _func_cephes_smirnovi_wrap +ufunc_smirnovi_ptr[2*0+1] = ("smirnovi") +ufunc_smirnovi_ptr[2*1] = _func_smirnovi_unsafe +ufunc_smirnovi_ptr[2*1+1] = ("smirnovi") +ufunc_smirnovi_ptr[2*2] = _func_smirnovi_unsafe +ufunc_smirnovi_ptr[2*2+1] = ("smirnovi") +ufunc_smirnovi_data[0] = &ufunc_smirnovi_ptr[2*0] +ufunc_smirnovi_data[1] = &ufunc_smirnovi_ptr[2*1] +ufunc_smirnovi_data[2] = &ufunc_smirnovi_ptr[2*2] +smirnovi = np.PyUFunc_FromFuncAndData(ufunc_smirnovi_loops, ufunc_smirnovi_data, ufunc_smirnovi_types, 3, 2, 1, 0, "smirnovi", ufunc_smirnovi_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_spence_loops[4] +cdef void *ufunc_spence_ptr[8] +cdef void *ufunc_spence_data[4] +cdef char ufunc_spence_types[8] +cdef char *ufunc_spence_doc = ( + "spence(z, out=None)\n" + "\n" + "Spence's function, also known as the dilogarithm.\n" + "\n" + "It is defined to be\n" + "\n" + ".. math::\n" + " \\int_1^z \\frac{\\log(t)}{1 - t}dt\n" + "\n" + "for complex :math:`z`, where the contour of integration is taken\n" + "to avoid the branch cut of the logarithm. Spence's function is\n" + "analytic everywhere except the negative real axis where it has a\n" + "branch cut.\n" + "\n" + "Parameters\n" + "----------\n" + "z : array_like\n" + " Points at which to evaluate Spence's function\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "s : scalar or ndarray\n" + " Computed values of Spence's function\n" + "\n" + "Notes\n" + "-----\n" + "There is a different convention which defines Spence's function by\n" + "the integral\n" + "\n" + ".. math::\n" + " -\\int_0^z \\frac{\\log(1 - t)}{t}dt;\n" + "\n" + "this is our ``spence(1 - z)``.\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> from scipy.special import spence\n" + ">>> import matplotlib.pyplot as plt\n" + "\n" + "The function is defined for complex inputs:\n" + "\n" + ">>> spence([1-1j, 1.5+2j, 3j, -10-5j])\n" + "array([-0.20561676+0.91596559j, -0.86766909-1.39560134j,\n" + " -0.59422064-2.49129918j, -1.14044398+6.80075924j])\n" + "\n" + "For complex inputs on the branch cut, which is the negative real axis,\n" + "the function returns the limit for ``z`` with positive imaginary part.\n" + "For example, in the following, note the sign change of the imaginary\n" + "part of the output for ``z = -2`` and ``z = -2 - 1e-8j``:\n" + "\n" + ">>> spence([-2 + 1e-8j, -2, -2 - 1e-8j])\n" + "array([2.32018041-3.45139229j, 2.32018042-3.4513923j ,\n" + " 2.32018041+3.45139229j])\n" + "\n" + "The function returns ``nan`` for real inputs on the branch cut:\n" + "\n" + ">>> spence(-1.5)\n" + "nan\n" + "\n" + "Verify some particular values: ``spence(0) = pi**2/6``,\n" + "``spence(1) = 0`` and ``spence(2) = -pi**2/12``.\n" + "\n" + ">>> spence([0, 1, 2])\n" + "array([ 1.64493407, 0. , -0.82246703])\n" + ">>> np.pi**2/6, -np.pi**2/12\n" + "(1.6449340668482264, -0.8224670334241132)\n" + "\n" + "Verify the identity::\n" + "\n" + " spence(z) + spence(1 - z) = pi**2/6 - log(z)*log(1 - z)\n" + "\n" + ">>> z = 3 + 4j\n" + ">>> spence(z) + spence(1 - z)\n" + "(-2.6523186143876067+1.8853470951513935j)\n" + ">>> np.pi**2/6 - np.log(z)*np.log(1 - z)\n" + "(-2.652318614387606+1.885347095151394j)\n" + "\n" + "Plot the function for positive real input.\n" + "\n" + ">>> fig, ax = plt.subplots()\n" + ">>> x = np.linspace(0, 6, 400)\n" + ">>> ax.plot(x, spence(x))\n" + ">>> ax.grid()\n" + ">>> ax.set_xlabel('x')\n" + ">>> ax.set_title('spence(x)')\n" + ">>> plt.show()") +ufunc_spence_loops[0] = loop_d_d__As_f_f +ufunc_spence_loops[1] = loop_d_d__As_d_d +ufunc_spence_loops[2] = loop_D_D__As_F_F +ufunc_spence_loops[3] = loop_D_D__As_D_D +ufunc_spence_types[0] = NPY_FLOAT +ufunc_spence_types[1] = NPY_FLOAT +ufunc_spence_types[2] = NPY_DOUBLE +ufunc_spence_types[3] = NPY_DOUBLE +ufunc_spence_types[4] = NPY_CFLOAT +ufunc_spence_types[5] = NPY_CFLOAT +ufunc_spence_types[6] = NPY_CDOUBLE +ufunc_spence_types[7] = NPY_CDOUBLE +ufunc_spence_ptr[2*0] = _func_cephes_spence +ufunc_spence_ptr[2*0+1] = ("spence") +ufunc_spence_ptr[2*1] = _func_cephes_spence +ufunc_spence_ptr[2*1+1] = ("spence") +ufunc_spence_ptr[2*2] = _func_cspence +ufunc_spence_ptr[2*2+1] = ("spence") +ufunc_spence_ptr[2*3] = _func_cspence +ufunc_spence_ptr[2*3+1] = ("spence") +ufunc_spence_data[0] = &ufunc_spence_ptr[2*0] +ufunc_spence_data[1] = &ufunc_spence_ptr[2*1] +ufunc_spence_data[2] = &ufunc_spence_ptr[2*2] +ufunc_spence_data[3] = &ufunc_spence_ptr[2*3] +spence = np.PyUFunc_FromFuncAndData(ufunc_spence_loops, ufunc_spence_data, ufunc_spence_types, 4, 1, 1, 0, "spence", ufunc_spence_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_stdtr_loops[2] +cdef void *ufunc_stdtr_ptr[4] +cdef void *ufunc_stdtr_data[2] +cdef char ufunc_stdtr_types[6] +cdef char *ufunc_stdtr_doc = ( + "stdtr(df, t, out=None)\n" + "\n" + "Student t distribution cumulative distribution function\n" + "\n" + "Returns the integral:\n" + "\n" + ".. math::\n" + " \\frac{\\Gamma((df+1)/2)}{\\sqrt{\\pi df} \\Gamma(df/2)}\n" + " \\int_{-\\infty}^t (1+x^2/df)^{-(df+1)/2}\\, dx\n" + "\n" + "Parameters\n" + "----------\n" + "df : array_like\n" + " Degrees of freedom\n" + "t : array_like\n" + " Upper bound of the integral\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Value of the Student t CDF at t\n" + "\n" + "See Also\n" + "--------\n" + "stdtridf : inverse of stdtr with respect to `df`\n" + "stdtrit : inverse of stdtr with respect to `t`\n" + "scipy.stats.t : student t distribution\n" + "\n" + "Notes\n" + "-----\n" + "The student t distribution is also available as `scipy.stats.t`.\n" + "Calling `stdtr` directly can improve performance compared to the\n" + "``cdf`` method of `scipy.stats.t` (see last example below).\n" + "\n" + "Examples\n" + "--------\n" + "Calculate the function for ``df=3`` at ``t=1``.\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import stdtr\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> stdtr(3, 1)\n" + "0.8044988905221148\n" + "\n" + "Plot the function for three different degrees of freedom.\n" + "\n" + ">>> x = np.linspace(-10, 10, 1000)\n" + ">>> fig, ax = plt.subplots()\n" + ">>> parameters = [(1, \"solid\"), (3, \"dashed\"), (10, \"dotted\")]\n" + ">>> for (df, linestyle) in parameters:\n" + "... ax.plot(x, stdtr(df, x), ls=linestyle, label=f\"$df={df}$\")\n" + ">>> ax.legend()\n" + ">>> ax.set_title(\"Student t distribution cumulative distribution function\")\n" + ">>> plt.show()\n" + "\n" + "The function can be computed for several degrees of freedom at the same\n" + "time by providing a NumPy array or list for `df`:\n" + "\n" + ">>> stdtr([1, 2, 3], 1)\n" + "array([0.75 , 0.78867513, 0.80449889])\n" + "\n" + "It is possible to calculate the function at several points for several\n" + "different degrees of freedom simultaneously by providing arrays for `df`\n" + "and `t` with shapes compatible for broadcasting. Compute `stdtr` at\n" + "4 points for 3 degrees of freedom resulting in an array of shape 3x4.\n" + "\n" + ">>> dfs = np.array([[1], [2], [3]])\n" + ">>> t = np.array([2, 4, 6, 8])\n" + ">>> dfs.shape, t.shape\n" + "((3, 1), (4,))\n" + "\n" + ">>> stdtr(dfs, t)\n" + "array([[0.85241638, 0.92202087, 0.94743154, 0.96041658],\n" + " [0.90824829, 0.97140452, 0.98666426, 0.99236596],\n" + " [0.93033702, 0.98599577, 0.99536364, 0.99796171]])\n" + "\n" + "The t distribution is also available as `scipy.stats.t`. Calling `stdtr`\n" + "directly can be much faster than calling the ``cdf`` method of\n" + "`scipy.stats.t`. To get the same results, one must use the following\n" + "parametrization: ``scipy.stats.t(df).cdf(x) = stdtr(df, x)``.\n" + "\n" + ">>> from scipy.stats import t\n" + ">>> df, x = 3, 1\n" + ">>> stdtr_result = stdtr(df, x) # this can be faster than below\n" + ">>> stats_result = t(df).cdf(x)\n" + ">>> stats_result == stdtr_result # test that results are equal\n" + "True") +ufunc_stdtr_loops[0] = loop_d_dd__As_ff_f +ufunc_stdtr_loops[1] = loop_d_dd__As_dd_d +ufunc_stdtr_types[0] = NPY_FLOAT +ufunc_stdtr_types[1] = NPY_FLOAT +ufunc_stdtr_types[2] = NPY_FLOAT +ufunc_stdtr_types[3] = NPY_DOUBLE +ufunc_stdtr_types[4] = NPY_DOUBLE +ufunc_stdtr_types[5] = NPY_DOUBLE +ufunc_stdtr_ptr[2*0] = _func_stdtr +ufunc_stdtr_ptr[2*0+1] = ("stdtr") +ufunc_stdtr_ptr[2*1] = _func_stdtr +ufunc_stdtr_ptr[2*1+1] = ("stdtr") +ufunc_stdtr_data[0] = &ufunc_stdtr_ptr[2*0] +ufunc_stdtr_data[1] = &ufunc_stdtr_ptr[2*1] +stdtr = np.PyUFunc_FromFuncAndData(ufunc_stdtr_loops, ufunc_stdtr_data, ufunc_stdtr_types, 2, 2, 1, 0, "stdtr", ufunc_stdtr_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_stdtridf_loops[2] +cdef void *ufunc_stdtridf_ptr[4] +cdef void *ufunc_stdtridf_data[2] +cdef char ufunc_stdtridf_types[6] +cdef char *ufunc_stdtridf_doc = ( + "stdtridf(p, t, out=None)\n" + "\n" + "Inverse of `stdtr` vs df\n" + "\n" + "Returns the argument df such that stdtr(df, t) is equal to `p`.\n" + "\n" + "Parameters\n" + "----------\n" + "p : array_like\n" + " Probability\n" + "t : array_like\n" + " Upper bound of the integral\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "df : scalar or ndarray\n" + " Value of `df` such that ``stdtr(df, t) == p``\n" + "\n" + "See Also\n" + "--------\n" + "stdtr : Student t CDF\n" + "stdtrit : inverse of stdtr with respect to `t`\n" + "scipy.stats.t : Student t distribution\n" + "\n" + "Examples\n" + "--------\n" + "Compute the student t cumulative distribution function for one\n" + "parameter set.\n" + "\n" + ">>> from scipy.special import stdtr, stdtridf\n" + ">>> df, x = 5, 2\n" + ">>> cdf_value = stdtr(df, x)\n" + ">>> cdf_value\n" + "0.9490302605850709\n" + "\n" + "Verify that `stdtridf` recovers the original value for `df` given\n" + "the CDF value and `x`.\n" + "\n" + ">>> stdtridf(cdf_value, x)\n" + "5.0") +ufunc_stdtridf_loops[0] = loop_d_dd__As_ff_f +ufunc_stdtridf_loops[1] = loop_d_dd__As_dd_d +ufunc_stdtridf_types[0] = NPY_FLOAT +ufunc_stdtridf_types[1] = NPY_FLOAT +ufunc_stdtridf_types[2] = NPY_FLOAT +ufunc_stdtridf_types[3] = NPY_DOUBLE +ufunc_stdtridf_types[4] = NPY_DOUBLE +ufunc_stdtridf_types[5] = NPY_DOUBLE +ufunc_stdtridf_ptr[2*0] = _func_stdtridf +ufunc_stdtridf_ptr[2*0+1] = ("stdtridf") +ufunc_stdtridf_ptr[2*1] = _func_stdtridf +ufunc_stdtridf_ptr[2*1+1] = ("stdtridf") +ufunc_stdtridf_data[0] = &ufunc_stdtridf_ptr[2*0] +ufunc_stdtridf_data[1] = &ufunc_stdtridf_ptr[2*1] +stdtridf = np.PyUFunc_FromFuncAndData(ufunc_stdtridf_loops, ufunc_stdtridf_data, ufunc_stdtridf_types, 2, 2, 1, 0, "stdtridf", ufunc_stdtridf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_stdtrit_loops[2] +cdef void *ufunc_stdtrit_ptr[4] +cdef void *ufunc_stdtrit_data[2] +cdef char ufunc_stdtrit_types[6] +cdef char *ufunc_stdtrit_doc = ( + "stdtrit(df, p, out=None)\n" + "\n" + "The `p`-th quantile of the student t distribution.\n" + "\n" + "This function is the inverse of the student t distribution cumulative\n" + "distribution function (CDF), returning `t` such that `stdtr(df, t) = p`.\n" + "\n" + "Returns the argument `t` such that stdtr(df, t) is equal to `p`.\n" + "\n" + "Parameters\n" + "----------\n" + "df : array_like\n" + " Degrees of freedom\n" + "p : array_like\n" + " Probability\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "t : scalar or ndarray\n" + " Value of `t` such that ``stdtr(df, t) == p``\n" + "\n" + "See Also\n" + "--------\n" + "stdtr : Student t CDF\n" + "stdtridf : inverse of stdtr with respect to `df`\n" + "scipy.stats.t : Student t distribution\n" + "\n" + "Notes\n" + "-----\n" + "The student t distribution is also available as `scipy.stats.t`. Calling\n" + "`stdtrit` directly can improve performance compared to the ``ppf``\n" + "method of `scipy.stats.t` (see last example below).\n" + "\n" + "Examples\n" + "--------\n" + "`stdtrit` represents the inverse of the student t distribution CDF which\n" + "is available as `stdtr`. Here, we calculate the CDF for ``df`` at\n" + "``x=1``. `stdtrit` then returns ``1`` up to floating point errors\n" + "given the same value for `df` and the computed CDF value.\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import stdtr, stdtrit\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> df = 3\n" + ">>> x = 1\n" + ">>> cdf_value = stdtr(df, x)\n" + ">>> stdtrit(df, cdf_value)\n" + "0.9999999994418539\n" + "\n" + "Plot the function for three different degrees of freedom.\n" + "\n" + ">>> x = np.linspace(0, 1, 1000)\n" + ">>> parameters = [(1, \"solid\"), (2, \"dashed\"), (5, \"dotted\")]\n" + ">>> fig, ax = plt.subplots()\n" + ">>> for (df, linestyle) in parameters:\n" + "... ax.plot(x, stdtrit(df, x), ls=linestyle, label=f\"$df={df}$\")\n" + ">>> ax.legend()\n" + ">>> ax.set_ylim(-10, 10)\n" + ">>> ax.set_title(\"Student t distribution quantile function\")\n" + ">>> plt.show()\n" + "\n" + "The function can be computed for several degrees of freedom at the same\n" + "time by providing a NumPy array or list for `df`:\n" + "\n" + ">>> stdtrit([1, 2, 3], 0.7)\n" + "array([0.72654253, 0.6172134 , 0.58438973])\n" + "\n" + "It is possible to calculate the function at several points for several\n" + "different degrees of freedom simultaneously by providing arrays for `df`\n" + "and `p` with shapes compatible for broadcasting. Compute `stdtrit` at\n" + "4 points for 3 degrees of freedom resulting in an array of shape 3x4.\n" + "\n" + ">>> dfs = np.array([[1], [2], [3]])\n" + ">>> p = np.array([0.2, 0.4, 0.7, 0.8])\n" + ">>> dfs.shape, p.shape\n" + "((3, 1), (4,))\n" + "\n" + ">>> stdtrit(dfs, p)\n" + "array([[-1.37638192, -0.3249197 , 0.72654253, 1.37638192],\n" + " [-1.06066017, -0.28867513, 0.6172134 , 1.06066017],\n" + " [-0.97847231, -0.27667066, 0.58438973, 0.97847231]])\n" + "\n" + "The t distribution is also available as `scipy.stats.t`. Calling `stdtrit`\n" + "directly can be much faster than calling the ``ppf`` method of\n" + "`scipy.stats.t`. To get the same results, one must use the following\n" + "parametrization: ``scipy.stats.t(df).ppf(x) = stdtrit(df, x)``.\n" + "\n" + ">>> from scipy.stats import t\n" + ">>> df, x = 3, 0.5\n" + ">>> stdtrit_result = stdtrit(df, x) # this can be faster than below\n" + ">>> stats_result = t(df).ppf(x)\n" + ">>> stats_result == stdtrit_result # test that results are equal\n" + "True") +ufunc_stdtrit_loops[0] = loop_d_dd__As_ff_f +ufunc_stdtrit_loops[1] = loop_d_dd__As_dd_d +ufunc_stdtrit_types[0] = NPY_FLOAT +ufunc_stdtrit_types[1] = NPY_FLOAT +ufunc_stdtrit_types[2] = NPY_FLOAT +ufunc_stdtrit_types[3] = NPY_DOUBLE +ufunc_stdtrit_types[4] = NPY_DOUBLE +ufunc_stdtrit_types[5] = NPY_DOUBLE +ufunc_stdtrit_ptr[2*0] = _func_stdtrit +ufunc_stdtrit_ptr[2*0+1] = ("stdtrit") +ufunc_stdtrit_ptr[2*1] = _func_stdtrit +ufunc_stdtrit_ptr[2*1+1] = ("stdtrit") +ufunc_stdtrit_data[0] = &ufunc_stdtrit_ptr[2*0] +ufunc_stdtrit_data[1] = &ufunc_stdtrit_ptr[2*1] +stdtrit = np.PyUFunc_FromFuncAndData(ufunc_stdtrit_loops, ufunc_stdtrit_data, ufunc_stdtrit_types, 2, 2, 1, 0, "stdtrit", ufunc_stdtrit_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_struve_loops[2] +cdef void *ufunc_struve_ptr[4] +cdef void *ufunc_struve_data[2] +cdef char ufunc_struve_types[6] +cdef char *ufunc_struve_doc = ( + "struve(v, x, out=None)\n" + "\n" + "Struve function.\n" + "\n" + "Return the value of the Struve function of order `v` at `x`. The Struve\n" + "function is defined as,\n" + "\n" + ".. math::\n" + " H_v(x) = (z/2)^{v + 1} \\sum_{n=0}^\\infty\n" + " \\frac{(-1)^n (z/2)^{2n}}{\\Gamma(n + \\frac{3}{2}) \\Gamma(n + v + \\frac{3}{2})},\n" + "\n" + "where :math:`\\Gamma` is the gamma function.\n" + "\n" + "Parameters\n" + "----------\n" + "v : array_like\n" + " Order of the Struve function (float).\n" + "x : array_like\n" + " Argument of the Struve function (float; must be positive unless `v` is\n" + " an integer).\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "H : scalar or ndarray\n" + " Value of the Struve function of order `v` at `x`.\n" + "\n" + "See Also\n" + "--------\n" + "modstruve: Modified Struve function\n" + "\n" + "Notes\n" + "-----\n" + "Three methods discussed in [1]_ are used to evaluate the Struve function:\n" + "\n" + "- power series\n" + "- expansion in Bessel functions (if :math:`|z| < |v| + 20`)\n" + "- asymptotic large-z expansion (if :math:`z \\geq 0.7v + 12`)\n" + "\n" + "Rounding errors are estimated based on the largest terms in the sums, and\n" + "the result associated with the smallest error is returned.\n" + "\n" + "References\n" + "----------\n" + ".. [1] NIST Digital Library of Mathematical Functions\n" + " https://dlmf.nist.gov/11\n" + "\n" + "Examples\n" + "--------\n" + "Calculate the Struve function of order 1 at 2.\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import struve\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> struve(1, 2.)\n" + "0.6467637282835622\n" + "\n" + "Calculate the Struve function at 2 for orders 1, 2 and 3 by providing\n" + "a list for the order parameter `v`.\n" + "\n" + ">>> struve([1, 2, 3], 2.)\n" + "array([0.64676373, 0.28031806, 0.08363767])\n" + "\n" + "Calculate the Struve function of order 1 for several points by providing\n" + "an array for `x`.\n" + "\n" + ">>> points = np.array([2., 5., 8.])\n" + ">>> struve(1, points)\n" + "array([0.64676373, 0.80781195, 0.48811605])\n" + "\n" + "Compute the Struve function for several orders at several points by\n" + "providing arrays for `v` and `z`. The arrays have to be broadcastable\n" + "to the correct shapes.\n" + "\n" + ">>> orders = np.array([[1], [2], [3]])\n" + ">>> points.shape, orders.shape\n" + "((3,), (3, 1))\n" + "\n" + ">>> struve(orders, points)\n" + "array([[0.64676373, 0.80781195, 0.48811605],\n" + " [0.28031806, 1.56937455, 1.51769363],\n" + " [0.08363767, 1.50872065, 2.98697513]])\n" + "\n" + "Plot the Struve functions of order 0 to 3 from -10 to 10.\n" + "\n" + ">>> fig, ax = plt.subplots()\n" + ">>> x = np.linspace(-10., 10., 1000)\n" + ">>> for i in range(4):\n" + "... ax.plot(x, struve(i, x), label=f'$H_{i!r}$')\n" + ">>> ax.legend(ncol=2)\n" + ">>> ax.set_xlim(-10, 10)\n" + ">>> ax.set_title(r\"Struve functions $H_{\\nu}$\")\n" + ">>> plt.show()") +ufunc_struve_loops[0] = loop_d_dd__As_ff_f +ufunc_struve_loops[1] = loop_d_dd__As_dd_d +ufunc_struve_types[0] = NPY_FLOAT +ufunc_struve_types[1] = NPY_FLOAT +ufunc_struve_types[2] = NPY_FLOAT +ufunc_struve_types[3] = NPY_DOUBLE +ufunc_struve_types[4] = NPY_DOUBLE +ufunc_struve_types[5] = NPY_DOUBLE +ufunc_struve_ptr[2*0] = _func_cephes_struve_h +ufunc_struve_ptr[2*0+1] = ("struve") +ufunc_struve_ptr[2*1] = _func_cephes_struve_h +ufunc_struve_ptr[2*1+1] = ("struve") +ufunc_struve_data[0] = &ufunc_struve_ptr[2*0] +ufunc_struve_data[1] = &ufunc_struve_ptr[2*1] +struve = np.PyUFunc_FromFuncAndData(ufunc_struve_loops, ufunc_struve_data, ufunc_struve_types, 2, 2, 1, 0, "struve", ufunc_struve_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_tandg_loops[2] +cdef void *ufunc_tandg_ptr[4] +cdef void *ufunc_tandg_data[2] +cdef char ufunc_tandg_types[4] +cdef char *ufunc_tandg_doc = ( + "tandg(x, out=None)\n" + "\n" + "Tangent of angle `x` given in degrees.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Angle, given in degrees.\n" + "out : ndarray, optional\n" + " Optional output array for the function results.\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Tangent at the input.\n" + "\n" + "See Also\n" + "--------\n" + "sindg, cosdg, cotdg\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import scipy.special as sc\n" + "\n" + "It is more accurate than using tangent directly.\n" + "\n" + ">>> x = 180 * np.arange(3)\n" + ">>> sc.tandg(x)\n" + "array([0., 0., 0.])\n" + ">>> np.tan(x * np.pi / 180)\n" + "array([ 0.0000000e+00, -1.2246468e-16, -2.4492936e-16])") +ufunc_tandg_loops[0] = loop_d_d__As_f_f +ufunc_tandg_loops[1] = loop_d_d__As_d_d +ufunc_tandg_types[0] = NPY_FLOAT +ufunc_tandg_types[1] = NPY_FLOAT +ufunc_tandg_types[2] = NPY_DOUBLE +ufunc_tandg_types[3] = NPY_DOUBLE +ufunc_tandg_ptr[2*0] = _func_cephes_tandg +ufunc_tandg_ptr[2*0+1] = ("tandg") +ufunc_tandg_ptr[2*1] = _func_cephes_tandg +ufunc_tandg_ptr[2*1+1] = ("tandg") +ufunc_tandg_data[0] = &ufunc_tandg_ptr[2*0] +ufunc_tandg_data[1] = &ufunc_tandg_ptr[2*1] +tandg = np.PyUFunc_FromFuncAndData(ufunc_tandg_loops, ufunc_tandg_data, ufunc_tandg_types, 2, 1, 1, 0, "tandg", ufunc_tandg_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_tklmbda_loops[2] +cdef void *ufunc_tklmbda_ptr[4] +cdef void *ufunc_tklmbda_data[2] +cdef char ufunc_tklmbda_types[6] +cdef char *ufunc_tklmbda_doc = ( + "tklmbda(x, lmbda, out=None)\n" + "\n" + "Cumulative distribution function of the Tukey lambda distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "x, lmbda : array_like\n" + " Parameters\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "cdf : scalar or ndarray\n" + " Value of the Tukey lambda CDF\n" + "\n" + "See Also\n" + "--------\n" + "scipy.stats.tukeylambda : Tukey lambda distribution\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> from scipy.special import tklmbda, expit\n" + "\n" + "Compute the cumulative distribution function (CDF) of the Tukey lambda\n" + "distribution at several ``x`` values for `lmbda` = -1.5.\n" + "\n" + ">>> x = np.linspace(-2, 2, 9)\n" + ">>> x\n" + "array([-2. , -1.5, -1. , -0.5, 0. , 0.5, 1. , 1.5, 2. ])\n" + ">>> tklmbda(x, -1.5)\n" + "array([0.34688734, 0.3786554 , 0.41528805, 0.45629737, 0.5 ,\n" + " 0.54370263, 0.58471195, 0.6213446 , 0.65311266])\n" + "\n" + "When `lmbda` is 0, the function is the logistic sigmoid function,\n" + "which is implemented in `scipy.special` as `expit`.\n" + "\n" + ">>> tklmbda(x, 0)\n" + "array([0.11920292, 0.18242552, 0.26894142, 0.37754067, 0.5 ,\n" + " 0.62245933, 0.73105858, 0.81757448, 0.88079708])\n" + ">>> expit(x)\n" + "array([0.11920292, 0.18242552, 0.26894142, 0.37754067, 0.5 ,\n" + " 0.62245933, 0.73105858, 0.81757448, 0.88079708])\n" + "\n" + "When `lmbda` is 1, the Tukey lambda distribution is uniform on the\n" + "interval [-1, 1], so the CDF increases linearly.\n" + "\n" + ">>> t = np.linspace(-1, 1, 9)\n" + ">>> tklmbda(t, 1)\n" + "array([0. , 0.125, 0.25 , 0.375, 0.5 , 0.625, 0.75 , 0.875, 1. ])\n" + "\n" + "In the following, we generate plots for several values of `lmbda`.\n" + "\n" + "The first figure shows graphs for `lmbda` <= 0.\n" + "\n" + ">>> styles = ['-', '-.', '--', ':']\n" + ">>> fig, ax = plt.subplots()\n" + ">>> x = np.linspace(-12, 12, 500)\n" + ">>> for k, lmbda in enumerate([-1.0, -0.5, 0.0]):\n" + "... y = tklmbda(x, lmbda)\n" + "... ax.plot(x, y, styles[k], label=rf'$\\lambda$ = {lmbda:-4.1f}')\n" + "\n" + ">>> ax.set_title(r'tklmbda(x, $\\lambda$)')\n" + ">>> ax.set_label('x')\n" + ">>> ax.legend(framealpha=1, shadow=True)\n" + ">>> ax.grid(True)\n" + "\n" + "The second figure shows graphs for `lmbda` > 0. The dots in the\n" + "graphs show the bounds of the support of the distribution.\n" + "\n" + ">>> fig, ax = plt.subplots()\n" + ">>> x = np.linspace(-4.2, 4.2, 500)\n" + ">>> lmbdas = [0.25, 0.5, 1.0, 1.5]\n" + ">>> for k, lmbda in enumerate(lmbdas):\n" + "... y = tklmbda(x, lmbda)\n" + "... ax.plot(x, y, styles[k], label=fr'$\\lambda$ = {lmbda}')\n" + "\n" + ">>> ax.set_prop_cycle(None)\n" + ">>> for lmbda in lmbdas:\n" + "... ax.plot([-1/lmbda, 1/lmbda], [0, 1], '.', ms=8)\n" + "\n" + ">>> ax.set_title(r'tklmbda(x, $\\lambda$)')\n" + ">>> ax.set_xlabel('x')\n" + ">>> ax.legend(framealpha=1, shadow=True)\n" + ">>> ax.grid(True)\n" + "\n" + ">>> plt.tight_layout()\n" + ">>> plt.show()\n" + "\n" + "The CDF of the Tukey lambda distribution is also implemented as the\n" + "``cdf`` method of `scipy.stats.tukeylambda`. In the following,\n" + "``tukeylambda.cdf(x, -0.5)`` and ``tklmbda(x, -0.5)`` compute the\n" + "same values:\n" + "\n" + ">>> from scipy.stats import tukeylambda\n" + ">>> x = np.linspace(-2, 2, 9)\n" + "\n" + ">>> tukeylambda.cdf(x, -0.5)\n" + "array([0.21995157, 0.27093858, 0.33541677, 0.41328161, 0.5 ,\n" + " 0.58671839, 0.66458323, 0.72906142, 0.78004843])\n" + "\n" + ">>> tklmbda(x, -0.5)\n" + "array([0.21995157, 0.27093858, 0.33541677, 0.41328161, 0.5 ,\n" + " 0.58671839, 0.66458323, 0.72906142, 0.78004843])\n" + "\n" + "The implementation in ``tukeylambda`` also provides location and scale\n" + "parameters, and other methods such as ``pdf()`` (the probability\n" + "density function) and ``ppf()`` (the inverse of the CDF), so for\n" + "working with the Tukey lambda distribution, ``tukeylambda`` is more\n" + "generally useful. The primary advantage of ``tklmbda`` is that it is\n" + "significantly faster than ``tukeylambda.cdf``.") +ufunc_tklmbda_loops[0] = loop_d_dd__As_ff_f +ufunc_tklmbda_loops[1] = loop_d_dd__As_dd_d +ufunc_tklmbda_types[0] = NPY_FLOAT +ufunc_tklmbda_types[1] = NPY_FLOAT +ufunc_tklmbda_types[2] = NPY_FLOAT +ufunc_tklmbda_types[3] = NPY_DOUBLE +ufunc_tklmbda_types[4] = NPY_DOUBLE +ufunc_tklmbda_types[5] = NPY_DOUBLE +ufunc_tklmbda_ptr[2*0] = _func_cephes_tukeylambdacdf +ufunc_tklmbda_ptr[2*0+1] = ("tklmbda") +ufunc_tklmbda_ptr[2*1] = _func_cephes_tukeylambdacdf +ufunc_tklmbda_ptr[2*1+1] = ("tklmbda") +ufunc_tklmbda_data[0] = &ufunc_tklmbda_ptr[2*0] +ufunc_tklmbda_data[1] = &ufunc_tklmbda_ptr[2*1] +tklmbda = np.PyUFunc_FromFuncAndData(ufunc_tklmbda_loops, ufunc_tklmbda_data, ufunc_tklmbda_types, 2, 2, 1, 0, "tklmbda", ufunc_tklmbda_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_voigt_profile_loops[2] +cdef void *ufunc_voigt_profile_ptr[4] +cdef void *ufunc_voigt_profile_data[2] +cdef char ufunc_voigt_profile_types[8] +cdef char *ufunc_voigt_profile_doc = ( + "voigt_profile(x, sigma, gamma, out=None)\n" + "\n" + "Voigt profile.\n" + "\n" + "The Voigt profile is a convolution of a 1-D Normal distribution with\n" + "standard deviation ``sigma`` and a 1-D Cauchy distribution with half-width at\n" + "half-maximum ``gamma``.\n" + "\n" + "If ``sigma = 0``, PDF of Cauchy distribution is returned.\n" + "Conversely, if ``gamma = 0``, PDF of Normal distribution is returned.\n" + "If ``sigma = gamma = 0``, the return value is ``Inf`` for ``x = 0``,\n" + "and ``0`` for all other ``x``.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real argument\n" + "sigma : array_like\n" + " The standard deviation of the Normal distribution part\n" + "gamma : array_like\n" + " The half-width at half-maximum of the Cauchy distribution part\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " The Voigt profile at the given arguments\n" + "\n" + "See Also\n" + "--------\n" + "wofz : Faddeeva function\n" + "\n" + "Notes\n" + "-----\n" + "It can be expressed in terms of Faddeeva function\n" + "\n" + ".. math:: V(x; \\sigma, \\gamma) = \\frac{Re[w(z)]}{\\sigma\\sqrt{2\\pi}},\n" + ".. math:: z = \\frac{x + i\\gamma}{\\sqrt{2}\\sigma}\n" + "\n" + "where :math:`w(z)` is the Faddeeva function.\n" + "\n" + "References\n" + "----------\n" + ".. [1] https://en.wikipedia.org/wiki/Voigt_profile\n" + "\n" + "Examples\n" + "--------\n" + "Calculate the function at point 2 for ``sigma=1`` and ``gamma=1``.\n" + "\n" + ">>> from scipy.special import voigt_profile\n" + ">>> import numpy as np\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> voigt_profile(2, 1., 1.)\n" + "0.09071519942627544\n" + "\n" + "Calculate the function at several points by providing a NumPy array\n" + "for `x`.\n" + "\n" + ">>> values = np.array([-2., 0., 5])\n" + ">>> voigt_profile(values, 1., 1.)\n" + "array([0.0907152 , 0.20870928, 0.01388492])\n" + "\n" + "Plot the function for different parameter sets.\n" + "\n" + ">>> fig, ax = plt.subplots(figsize=(8, 8))\n" + ">>> x = np.linspace(-10, 10, 500)\n" + ">>> parameters_list = [(1.5, 0., \"solid\"), (1.3, 0.5, \"dashed\"),\n" + "... (0., 1.8, \"dotted\"), (1., 1., \"dashdot\")]\n" + ">>> for params in parameters_list:\n" + "... sigma, gamma, linestyle = params\n" + "... voigt = voigt_profile(x, sigma, gamma)\n" + "... ax.plot(x, voigt, label=rf\"$\\sigma={sigma},\\, \\gamma={gamma}$\",\n" + "... ls=linestyle)\n" + ">>> ax.legend()\n" + ">>> plt.show()\n" + "\n" + "Verify visually that the Voigt profile indeed arises as the convolution\n" + "of a normal and a Cauchy distribution.\n" + "\n" + ">>> from scipy.signal import convolve\n" + ">>> x, dx = np.linspace(-10, 10, 500, retstep=True)\n" + ">>> def gaussian(x, sigma):\n" + "... return np.exp(-0.5 * x**2/sigma**2)/(sigma * np.sqrt(2*np.pi))\n" + ">>> def cauchy(x, gamma):\n" + "... return gamma/(np.pi * (np.square(x)+gamma**2))\n" + ">>> sigma = 2\n" + ">>> gamma = 1\n" + ">>> gauss_profile = gaussian(x, sigma)\n" + ">>> cauchy_profile = cauchy(x, gamma)\n" + ">>> convolved = dx * convolve(cauchy_profile, gauss_profile, mode=\"same\")\n" + ">>> voigt = voigt_profile(x, sigma, gamma)\n" + ">>> fig, ax = plt.subplots(figsize=(8, 8))\n" + ">>> ax.plot(x, gauss_profile, label=\"Gauss: $G$\", c='b')\n" + ">>> ax.plot(x, cauchy_profile, label=\"Cauchy: $C$\", c='y', ls=\"dashed\")\n" + ">>> xx = 0.5*(x[1:] + x[:-1]) # midpoints\n" + ">>> ax.plot(xx, convolved[1:], label=\"Convolution: $G * C$\", ls='dashdot',\n" + "... c='k')\n" + ">>> ax.plot(x, voigt, label=\"Voigt\", ls='dotted', c='r')\n" + ">>> ax.legend()\n" + ">>> plt.show()") +ufunc_voigt_profile_loops[0] = loop_d_ddd__As_fff_f +ufunc_voigt_profile_loops[1] = loop_d_ddd__As_ddd_d +ufunc_voigt_profile_types[0] = NPY_FLOAT +ufunc_voigt_profile_types[1] = NPY_FLOAT +ufunc_voigt_profile_types[2] = NPY_FLOAT +ufunc_voigt_profile_types[3] = NPY_FLOAT +ufunc_voigt_profile_types[4] = NPY_DOUBLE +ufunc_voigt_profile_types[5] = NPY_DOUBLE +ufunc_voigt_profile_types[6] = NPY_DOUBLE +ufunc_voigt_profile_types[7] = NPY_DOUBLE +ufunc_voigt_profile_ptr[2*0] = scipy.special._ufuncs_cxx._export_faddeeva_voigt_profile +ufunc_voigt_profile_ptr[2*0+1] = ("voigt_profile") +ufunc_voigt_profile_ptr[2*1] = scipy.special._ufuncs_cxx._export_faddeeva_voigt_profile +ufunc_voigt_profile_ptr[2*1+1] = ("voigt_profile") +ufunc_voigt_profile_data[0] = &ufunc_voigt_profile_ptr[2*0] +ufunc_voigt_profile_data[1] = &ufunc_voigt_profile_ptr[2*1] +voigt_profile = np.PyUFunc_FromFuncAndData(ufunc_voigt_profile_loops, ufunc_voigt_profile_data, ufunc_voigt_profile_types, 2, 3, 1, 0, "voigt_profile", ufunc_voigt_profile_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_wofz_loops[2] +cdef void *ufunc_wofz_ptr[4] +cdef void *ufunc_wofz_data[2] +cdef char ufunc_wofz_types[4] +cdef char *ufunc_wofz_doc = ( + "wofz(z, out=None)\n" + "\n" + "Faddeeva function\n" + "\n" + "Returns the value of the Faddeeva function for complex argument::\n" + "\n" + " exp(-z**2) * erfc(-i*z)\n" + "\n" + "Parameters\n" + "----------\n" + "z : array_like\n" + " complex argument\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Value of the Faddeeva function\n" + "\n" + "See Also\n" + "--------\n" + "dawsn, erf, erfc, erfcx, erfi\n" + "\n" + "References\n" + "----------\n" + ".. [1] Steven G. Johnson, Faddeeva W function implementation.\n" + " http://ab-initio.mit.edu/Faddeeva\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> from scipy import special\n" + ">>> import matplotlib.pyplot as plt\n" + "\n" + ">>> x = np.linspace(-3, 3)\n" + ">>> z = special.wofz(x)\n" + "\n" + ">>> plt.plot(x, z.real, label='wofz(x).real')\n" + ">>> plt.plot(x, z.imag, label='wofz(x).imag')\n" + ">>> plt.xlabel('$x$')\n" + ">>> plt.legend(framealpha=1, shadow=True)\n" + ">>> plt.grid(alpha=0.25)\n" + ">>> plt.show()") +ufunc_wofz_loops[0] = loop_D_D__As_F_F +ufunc_wofz_loops[1] = loop_D_D__As_D_D +ufunc_wofz_types[0] = NPY_CFLOAT +ufunc_wofz_types[1] = NPY_CFLOAT +ufunc_wofz_types[2] = NPY_CDOUBLE +ufunc_wofz_types[3] = NPY_CDOUBLE +ufunc_wofz_ptr[2*0] = scipy.special._ufuncs_cxx._export_faddeeva_w +ufunc_wofz_ptr[2*0+1] = ("wofz") +ufunc_wofz_ptr[2*1] = scipy.special._ufuncs_cxx._export_faddeeva_w +ufunc_wofz_ptr[2*1+1] = ("wofz") +ufunc_wofz_data[0] = &ufunc_wofz_ptr[2*0] +ufunc_wofz_data[1] = &ufunc_wofz_ptr[2*1] +wofz = np.PyUFunc_FromFuncAndData(ufunc_wofz_loops, ufunc_wofz_data, ufunc_wofz_types, 2, 1, 1, 0, "wofz", ufunc_wofz_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_wrightomega_loops[4] +cdef void *ufunc_wrightomega_ptr[8] +cdef void *ufunc_wrightomega_data[4] +cdef char ufunc_wrightomega_types[8] +cdef char *ufunc_wrightomega_doc = ( + "wrightomega(z, out=None)\n" + "\n" + "Wright Omega function.\n" + "\n" + "Defined as the solution to\n" + "\n" + ".. math::\n" + "\n" + " \\omega + \\log(\\omega) = z\n" + "\n" + "where :math:`\\log` is the principal branch of the complex logarithm.\n" + "\n" + "Parameters\n" + "----------\n" + "z : array_like\n" + " Points at which to evaluate the Wright Omega function\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "omega : scalar or ndarray\n" + " Values of the Wright Omega function\n" + "\n" + "See Also\n" + "--------\n" + "lambertw : The Lambert W function\n" + "\n" + "Notes\n" + "-----\n" + ".. versionadded:: 0.19.0\n" + "\n" + "The function can also be defined as\n" + "\n" + ".. math::\n" + "\n" + " \\omega(z) = W_{K(z)}(e^z)\n" + "\n" + "where :math:`K(z) = \\lceil (\\Im(z) - \\pi)/(2\\pi) \\rceil` is the\n" + "unwinding number and :math:`W` is the Lambert W function.\n" + "\n" + "The implementation here is taken from [1]_.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Lawrence, Corless, and Jeffrey, \"Algorithm 917: Complex\n" + " Double-Precision Evaluation of the Wright :math:`\\omega`\n" + " Function.\" ACM Transactions on Mathematical Software,\n" + " 2012. :doi:`10.1145/2168773.2168779`.\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> from scipy.special import wrightomega, lambertw\n" + "\n" + ">>> wrightomega([-2, -1, 0, 1, 2])\n" + "array([0.12002824, 0.27846454, 0.56714329, 1. , 1.5571456 ])\n" + "\n" + "Complex input:\n" + "\n" + ">>> wrightomega(3 + 5j)\n" + "(1.5804428632097158+3.8213626783287937j)\n" + "\n" + "Verify that ``wrightomega(z)`` satisfies ``w + log(w) = z``:\n" + "\n" + ">>> w = -5 + 4j\n" + ">>> wrightomega(w + np.log(w))\n" + "(-5+4j)\n" + "\n" + "Verify the connection to ``lambertw``:\n" + "\n" + ">>> z = 0.5 + 3j\n" + ">>> wrightomega(z)\n" + "(0.0966015889280649+1.4937828458191993j)\n" + ">>> lambertw(np.exp(z))\n" + "(0.09660158892806493+1.4937828458191993j)\n" + "\n" + ">>> z = 0.5 + 4j\n" + ">>> wrightomega(z)\n" + "(-0.3362123489037213+2.282986001579032j)\n" + ">>> lambertw(np.exp(z), k=1)\n" + "(-0.33621234890372115+2.282986001579032j)") +ufunc_wrightomega_loops[0] = loop_d_d__As_f_f +ufunc_wrightomega_loops[1] = loop_d_d__As_d_d +ufunc_wrightomega_loops[2] = loop_D_D__As_F_F +ufunc_wrightomega_loops[3] = loop_D_D__As_D_D +ufunc_wrightomega_types[0] = NPY_FLOAT +ufunc_wrightomega_types[1] = NPY_FLOAT +ufunc_wrightomega_types[2] = NPY_DOUBLE +ufunc_wrightomega_types[3] = NPY_DOUBLE +ufunc_wrightomega_types[4] = NPY_CFLOAT +ufunc_wrightomega_types[5] = NPY_CFLOAT +ufunc_wrightomega_types[6] = NPY_CDOUBLE +ufunc_wrightomega_types[7] = NPY_CDOUBLE +ufunc_wrightomega_ptr[2*0] = scipy.special._ufuncs_cxx._export_wrightomega_real +ufunc_wrightomega_ptr[2*0+1] = ("wrightomega") +ufunc_wrightomega_ptr[2*1] = scipy.special._ufuncs_cxx._export_wrightomega_real +ufunc_wrightomega_ptr[2*1+1] = ("wrightomega") +ufunc_wrightomega_ptr[2*2] = scipy.special._ufuncs_cxx._export_wrightomega +ufunc_wrightomega_ptr[2*2+1] = ("wrightomega") +ufunc_wrightomega_ptr[2*3] = scipy.special._ufuncs_cxx._export_wrightomega +ufunc_wrightomega_ptr[2*3+1] = ("wrightomega") +ufunc_wrightomega_data[0] = &ufunc_wrightomega_ptr[2*0] +ufunc_wrightomega_data[1] = &ufunc_wrightomega_ptr[2*1] +ufunc_wrightomega_data[2] = &ufunc_wrightomega_ptr[2*2] +ufunc_wrightomega_data[3] = &ufunc_wrightomega_ptr[2*3] +wrightomega = np.PyUFunc_FromFuncAndData(ufunc_wrightomega_loops, ufunc_wrightomega_data, ufunc_wrightomega_types, 4, 1, 1, 0, "wrightomega", ufunc_wrightomega_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_xlog1py_loops[4] +cdef void *ufunc_xlog1py_ptr[8] +cdef void *ufunc_xlog1py_data[4] +cdef char ufunc_xlog1py_types[12] +cdef char *ufunc_xlog1py_doc = ( + "xlog1py(x, y, out=None)\n" + "\n" + "Compute ``x*log1p(y)`` so that the result is 0 if ``x = 0``.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Multiplier\n" + "y : array_like\n" + " Argument\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "z : scalar or ndarray\n" + " Computed x*log1p(y)\n" + "\n" + "Notes\n" + "-----\n" + "\n" + ".. versionadded:: 0.13.0\n" + "\n" + "Examples\n" + "--------\n" + "This example shows how the function can be used to calculate the log of\n" + "the probability mass function for a geometric discrete random variable.\n" + "The probability mass function of the geometric distribution is defined\n" + "as follows:\n" + "\n" + ".. math:: f(k) = (1-p)^{k-1} p\n" + "\n" + "where :math:`p` is the probability of a single success\n" + "and :math:`1-p` is the probability of a single failure\n" + "and :math:`k` is the number of trials to get the first success.\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import xlog1py\n" + ">>> p = 0.5\n" + ">>> k = 100\n" + ">>> _pmf = np.power(1 - p, k - 1) * p\n" + ">>> _pmf\n" + "7.888609052210118e-31\n" + "\n" + "If we take k as a relatively large number the value of the probability\n" + "mass function can become very low. In such cases taking the log of the\n" + "pmf would be more suitable as the log function can change the values\n" + "to a scale that is more appropriate to work with.\n" + "\n" + ">>> _log_pmf = xlog1py(k - 1, -p) + np.log(p)\n" + ">>> _log_pmf\n" + "-69.31471805599453\n" + "\n" + "We can confirm that we get a value close to the original pmf value by\n" + "taking the exponential of the log pmf.\n" + "\n" + ">>> _orig_pmf = np.exp(_log_pmf)\n" + ">>> np.isclose(_pmf, _orig_pmf)\n" + "True") +ufunc_xlog1py_loops[0] = loop_d_dd__As_ff_f +ufunc_xlog1py_loops[1] = loop_d_dd__As_dd_d +ufunc_xlog1py_loops[2] = loop_D_DD__As_FF_F +ufunc_xlog1py_loops[3] = loop_D_DD__As_DD_D +ufunc_xlog1py_types[0] = NPY_FLOAT +ufunc_xlog1py_types[1] = NPY_FLOAT +ufunc_xlog1py_types[2] = NPY_FLOAT +ufunc_xlog1py_types[3] = NPY_DOUBLE +ufunc_xlog1py_types[4] = NPY_DOUBLE +ufunc_xlog1py_types[5] = NPY_DOUBLE +ufunc_xlog1py_types[6] = NPY_CFLOAT +ufunc_xlog1py_types[7] = NPY_CFLOAT +ufunc_xlog1py_types[8] = NPY_CFLOAT +ufunc_xlog1py_types[9] = NPY_CDOUBLE +ufunc_xlog1py_types[10] = NPY_CDOUBLE +ufunc_xlog1py_types[11] = NPY_CDOUBLE +ufunc_xlog1py_ptr[2*0] = _func_xlog1py[double] +ufunc_xlog1py_ptr[2*0+1] = ("xlog1py") +ufunc_xlog1py_ptr[2*1] = _func_xlog1py[double] +ufunc_xlog1py_ptr[2*1+1] = ("xlog1py") +ufunc_xlog1py_ptr[2*2] = _func_xlog1py[double_complex] +ufunc_xlog1py_ptr[2*2+1] = ("xlog1py") +ufunc_xlog1py_ptr[2*3] = _func_xlog1py[double_complex] +ufunc_xlog1py_ptr[2*3+1] = ("xlog1py") +ufunc_xlog1py_data[0] = &ufunc_xlog1py_ptr[2*0] +ufunc_xlog1py_data[1] = &ufunc_xlog1py_ptr[2*1] +ufunc_xlog1py_data[2] = &ufunc_xlog1py_ptr[2*2] +ufunc_xlog1py_data[3] = &ufunc_xlog1py_ptr[2*3] +xlog1py = np.PyUFunc_FromFuncAndData(ufunc_xlog1py_loops, ufunc_xlog1py_data, ufunc_xlog1py_types, 4, 2, 1, 0, "xlog1py", ufunc_xlog1py_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_xlogy_loops[4] +cdef void *ufunc_xlogy_ptr[8] +cdef void *ufunc_xlogy_data[4] +cdef char ufunc_xlogy_types[12] +cdef char *ufunc_xlogy_doc = ( + "xlogy(x, y, out=None)\n" + "\n" + "Compute ``x*log(y)`` so that the result is 0 if ``x = 0``.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Multiplier\n" + "y : array_like\n" + " Argument\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "z : scalar or ndarray\n" + " Computed x*log(y)\n" + "\n" + "Notes\n" + "-----\n" + "The log function used in the computation is the natural log.\n" + "\n" + ".. versionadded:: 0.13.0\n" + "\n" + "Examples\n" + "--------\n" + "We can use this function to calculate the binary logistic loss also\n" + "known as the binary cross entropy. This loss function is used for\n" + "binary classification problems and is defined as:\n" + "\n" + ".. math::\n" + " L = 1/n * \\sum_{i=0}^n -(y_i*log(y\\_pred_i) + (1-y_i)*log(1-y\\_pred_i))\n" + "\n" + "We can define the parameters `x` and `y` as y and y_pred respectively.\n" + "y is the array of the actual labels which over here can be either 0 or 1.\n" + "y_pred is the array of the predicted probabilities with respect to\n" + "the positive class (1).\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import xlogy\n" + ">>> y = np.array([0, 1, 0, 1, 1, 0])\n" + ">>> y_pred = np.array([0.3, 0.8, 0.4, 0.7, 0.9, 0.2])\n" + ">>> n = len(y)\n" + ">>> loss = -(xlogy(y, y_pred) + xlogy(1 - y, 1 - y_pred)).sum()\n" + ">>> loss /= n\n" + ">>> loss\n" + "0.29597052165495025\n" + "\n" + "A lower loss is usually better as it indicates that the predictions are\n" + "similar to the actual labels. In this example since our predicted\n" + "probabilities are close to the actual labels, we get an overall loss\n" + "that is reasonably low and appropriate.") +ufunc_xlogy_loops[0] = loop_d_dd__As_ff_f +ufunc_xlogy_loops[1] = loop_d_dd__As_dd_d +ufunc_xlogy_loops[2] = loop_D_DD__As_FF_F +ufunc_xlogy_loops[3] = loop_D_DD__As_DD_D +ufunc_xlogy_types[0] = NPY_FLOAT +ufunc_xlogy_types[1] = NPY_FLOAT +ufunc_xlogy_types[2] = NPY_FLOAT +ufunc_xlogy_types[3] = NPY_DOUBLE +ufunc_xlogy_types[4] = NPY_DOUBLE +ufunc_xlogy_types[5] = NPY_DOUBLE +ufunc_xlogy_types[6] = NPY_CFLOAT +ufunc_xlogy_types[7] = NPY_CFLOAT +ufunc_xlogy_types[8] = NPY_CFLOAT +ufunc_xlogy_types[9] = NPY_CDOUBLE +ufunc_xlogy_types[10] = NPY_CDOUBLE +ufunc_xlogy_types[11] = NPY_CDOUBLE +ufunc_xlogy_ptr[2*0] = _func_xlogy[double] +ufunc_xlogy_ptr[2*0+1] = ("xlogy") +ufunc_xlogy_ptr[2*1] = _func_xlogy[double] +ufunc_xlogy_ptr[2*1+1] = ("xlogy") +ufunc_xlogy_ptr[2*2] = _func_xlogy[double_complex] +ufunc_xlogy_ptr[2*2+1] = ("xlogy") +ufunc_xlogy_ptr[2*3] = _func_xlogy[double_complex] +ufunc_xlogy_ptr[2*3+1] = ("xlogy") +ufunc_xlogy_data[0] = &ufunc_xlogy_ptr[2*0] +ufunc_xlogy_data[1] = &ufunc_xlogy_ptr[2*1] +ufunc_xlogy_data[2] = &ufunc_xlogy_ptr[2*2] +ufunc_xlogy_data[3] = &ufunc_xlogy_ptr[2*3] +xlogy = np.PyUFunc_FromFuncAndData(ufunc_xlogy_loops, ufunc_xlogy_data, ufunc_xlogy_types, 4, 2, 1, 0, "xlogy", ufunc_xlogy_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_y0_loops[2] +cdef void *ufunc_y0_ptr[4] +cdef void *ufunc_y0_data[2] +cdef char ufunc_y0_types[4] +cdef char *ufunc_y0_doc = ( + "y0(x, out=None)\n" + "\n" + "Bessel function of the second kind of order 0.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Argument (float).\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "Y : scalar or ndarray\n" + " Value of the Bessel function of the second kind of order 0 at `x`.\n" + "\n" + "See Also\n" + "--------\n" + "j0: Bessel function of the first kind of order 0\n" + "yv: Bessel function of the first kind\n" + "\n" + "Notes\n" + "-----\n" + "The domain is divided into the intervals [0, 5] and (5, infinity). In the\n" + "first interval a rational approximation :math:`R(x)` is employed to\n" + "compute,\n" + "\n" + ".. math::\n" + "\n" + " Y_0(x) = R(x) + \\frac{2 \\log(x) J_0(x)}{\\pi},\n" + "\n" + "where :math:`J_0` is the Bessel function of the first kind of order 0.\n" + "\n" + "In the second interval, the Hankel asymptotic expansion is employed with\n" + "two rational functions of degree 6/6 and 7/7.\n" + "\n" + "This function is a wrapper for the Cephes [1]_ routine `y0`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + "\n" + "Examples\n" + "--------\n" + "Calculate the function at one point:\n" + "\n" + ">>> from scipy.special import y0\n" + ">>> y0(1.)\n" + "0.08825696421567697\n" + "\n" + "Calculate at several points:\n" + "\n" + ">>> import numpy as np\n" + ">>> y0(np.array([0.5, 2., 3.]))\n" + "array([-0.44451873, 0.51037567, 0.37685001])\n" + "\n" + "Plot the function from 0 to 10.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> fig, ax = plt.subplots()\n" + ">>> x = np.linspace(0., 10., 1000)\n" + ">>> y = y0(x)\n" + ">>> ax.plot(x, y)\n" + ">>> plt.show()") +ufunc_y0_loops[0] = loop_d_d__As_f_f +ufunc_y0_loops[1] = loop_d_d__As_d_d +ufunc_y0_types[0] = NPY_FLOAT +ufunc_y0_types[1] = NPY_FLOAT +ufunc_y0_types[2] = NPY_DOUBLE +ufunc_y0_types[3] = NPY_DOUBLE +ufunc_y0_ptr[2*0] = _func_cephes_y0 +ufunc_y0_ptr[2*0+1] = ("y0") +ufunc_y0_ptr[2*1] = _func_cephes_y0 +ufunc_y0_ptr[2*1+1] = ("y0") +ufunc_y0_data[0] = &ufunc_y0_ptr[2*0] +ufunc_y0_data[1] = &ufunc_y0_ptr[2*1] +y0 = np.PyUFunc_FromFuncAndData(ufunc_y0_loops, ufunc_y0_data, ufunc_y0_types, 2, 1, 1, 0, "y0", ufunc_y0_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_y1_loops[2] +cdef void *ufunc_y1_ptr[4] +cdef void *ufunc_y1_data[2] +cdef char ufunc_y1_types[4] +cdef char *ufunc_y1_doc = ( + "y1(x, out=None)\n" + "\n" + "Bessel function of the second kind of order 1.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Argument (float).\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "Y : scalar or ndarray\n" + " Value of the Bessel function of the second kind of order 1 at `x`.\n" + "\n" + "See Also\n" + "--------\n" + "j1: Bessel function of the first kind of order 1\n" + "yn: Bessel function of the second kind\n" + "yv: Bessel function of the second kind\n" + "\n" + "Notes\n" + "-----\n" + "The domain is divided into the intervals [0, 8] and (8, infinity). In the\n" + "first interval a 25 term Chebyshev expansion is used, and computing\n" + ":math:`J_1` (the Bessel function of the first kind) is required. In the\n" + "second, the asymptotic trigonometric representation is employed using two\n" + "rational functions of degree 5/5.\n" + "\n" + "This function is a wrapper for the Cephes [1]_ routine `y1`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + "\n" + "Examples\n" + "--------\n" + "Calculate the function at one point:\n" + "\n" + ">>> from scipy.special import y1\n" + ">>> y1(1.)\n" + "-0.7812128213002888\n" + "\n" + "Calculate at several points:\n" + "\n" + ">>> import numpy as np\n" + ">>> y1(np.array([0.5, 2., 3.]))\n" + "array([-1.47147239, -0.10703243, 0.32467442])\n" + "\n" + "Plot the function from 0 to 10.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> fig, ax = plt.subplots()\n" + ">>> x = np.linspace(0., 10., 1000)\n" + ">>> y = y1(x)\n" + ">>> ax.plot(x, y)\n" + ">>> plt.show()") +ufunc_y1_loops[0] = loop_d_d__As_f_f +ufunc_y1_loops[1] = loop_d_d__As_d_d +ufunc_y1_types[0] = NPY_FLOAT +ufunc_y1_types[1] = NPY_FLOAT +ufunc_y1_types[2] = NPY_DOUBLE +ufunc_y1_types[3] = NPY_DOUBLE +ufunc_y1_ptr[2*0] = _func_cephes_y1 +ufunc_y1_ptr[2*0+1] = ("y1") +ufunc_y1_ptr[2*1] = _func_cephes_y1 +ufunc_y1_ptr[2*1+1] = ("y1") +ufunc_y1_data[0] = &ufunc_y1_ptr[2*0] +ufunc_y1_data[1] = &ufunc_y1_ptr[2*1] +y1 = np.PyUFunc_FromFuncAndData(ufunc_y1_loops, ufunc_y1_data, ufunc_y1_types, 2, 1, 1, 0, "y1", ufunc_y1_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_yn_loops[3] +cdef void *ufunc_yn_ptr[6] +cdef void *ufunc_yn_data[3] +cdef char ufunc_yn_types[9] +cdef char *ufunc_yn_doc = ( + "yn(n, x, out=None)\n" + "\n" + "Bessel function of the second kind of integer order and real argument.\n" + "\n" + "Parameters\n" + "----------\n" + "n : array_like\n" + " Order (integer).\n" + "x : array_like\n" + " Argument (float).\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "Y : scalar or ndarray\n" + " Value of the Bessel function, :math:`Y_n(x)`.\n" + "\n" + "See Also\n" + "--------\n" + "yv : For real order and real or complex argument.\n" + "y0: faster implementation of this function for order 0\n" + "y1: faster implementation of this function for order 1\n" + "\n" + "Notes\n" + "-----\n" + "Wrapper for the Cephes [1]_ routine `yn`.\n" + "\n" + "The function is evaluated by forward recurrence on `n`, starting with\n" + "values computed by the Cephes routines `y0` and `y1`. If `n = 0` or 1,\n" + "the routine for `y0` or `y1` is called directly.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + "\n" + "Examples\n" + "--------\n" + "Evaluate the function of order 0 at one point.\n" + "\n" + ">>> from scipy.special import yn\n" + ">>> yn(0, 1.)\n" + "0.08825696421567697\n" + "\n" + "Evaluate the function at one point for different orders.\n" + "\n" + ">>> yn(0, 1.), yn(1, 1.), yn(2, 1.)\n" + "(0.08825696421567697, -0.7812128213002888, -1.6506826068162546)\n" + "\n" + "The evaluation for different orders can be carried out in one call by\n" + "providing a list or NumPy array as argument for the `v` parameter:\n" + "\n" + ">>> yn([0, 1, 2], 1.)\n" + "array([ 0.08825696, -0.78121282, -1.65068261])\n" + "\n" + "Evaluate the function at several points for order 0 by providing an\n" + "array for `z`.\n" + "\n" + ">>> import numpy as np\n" + ">>> points = np.array([0.5, 3., 8.])\n" + ">>> yn(0, points)\n" + "array([-0.44451873, 0.37685001, 0.22352149])\n" + "\n" + "If `z` is an array, the order parameter `v` must be broadcastable to\n" + "the correct shape if different orders shall be computed in one call.\n" + "To calculate the orders 0 and 1 for an 1D array:\n" + "\n" + ">>> orders = np.array([[0], [1]])\n" + ">>> orders.shape\n" + "(2, 1)\n" + "\n" + ">>> yn(orders, points)\n" + "array([[-0.44451873, 0.37685001, 0.22352149],\n" + " [-1.47147239, 0.32467442, -0.15806046]])\n" + "\n" + "Plot the functions of order 0 to 3 from 0 to 10.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> fig, ax = plt.subplots()\n" + ">>> x = np.linspace(0., 10., 1000)\n" + ">>> for i in range(4):\n" + "... ax.plot(x, yn(i, x), label=f'$Y_{i!r}$')\n" + ">>> ax.set_ylim(-3, 1)\n" + ">>> ax.legend()\n" + ">>> plt.show()") +ufunc_yn_loops[0] = loop_d_pd__As_pd_d +ufunc_yn_loops[1] = loop_d_dd__As_ff_f +ufunc_yn_loops[2] = loop_d_dd__As_dd_d +ufunc_yn_types[0] = NPY_INTP +ufunc_yn_types[1] = NPY_DOUBLE +ufunc_yn_types[2] = NPY_DOUBLE +ufunc_yn_types[3] = NPY_FLOAT +ufunc_yn_types[4] = NPY_FLOAT +ufunc_yn_types[5] = NPY_FLOAT +ufunc_yn_types[6] = NPY_DOUBLE +ufunc_yn_types[7] = NPY_DOUBLE +ufunc_yn_types[8] = NPY_DOUBLE +ufunc_yn_ptr[2*0] = _func_cephes_yn_wrap +ufunc_yn_ptr[2*0+1] = ("yn") +ufunc_yn_ptr[2*1] = _func_yn_unsafe +ufunc_yn_ptr[2*1+1] = ("yn") +ufunc_yn_ptr[2*2] = _func_yn_unsafe +ufunc_yn_ptr[2*2+1] = ("yn") +ufunc_yn_data[0] = &ufunc_yn_ptr[2*0] +ufunc_yn_data[1] = &ufunc_yn_ptr[2*1] +ufunc_yn_data[2] = &ufunc_yn_ptr[2*2] +yn = np.PyUFunc_FromFuncAndData(ufunc_yn_loops, ufunc_yn_data, ufunc_yn_types, 3, 2, 1, 0, "yn", ufunc_yn_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_zetac_loops[2] +cdef void *ufunc_zetac_ptr[4] +cdef void *ufunc_zetac_data[2] +cdef char ufunc_zetac_types[4] +cdef char *ufunc_zetac_doc = ( + "zetac(x, out=None)\n" + "\n" + "Riemann zeta function minus 1.\n" + "\n" + "This function is defined as\n" + "\n" + ".. math:: \\zeta(x) = \\sum_{k=2}^{\\infty} 1 / k^x,\n" + "\n" + "where ``x > 1``. For ``x < 1`` the analytic continuation is\n" + "computed. For more information on the Riemann zeta function, see\n" + "[dlmf]_.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like of float\n" + " Values at which to compute zeta(x) - 1 (must be real).\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of zeta(x) - 1.\n" + "\n" + "See Also\n" + "--------\n" + "zeta\n" + "\n" + "References\n" + "----------\n" + ".. [dlmf] NIST Digital Library of Mathematical Functions\n" + " https://dlmf.nist.gov/25\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> from scipy.special import zetac, zeta\n" + "\n" + "Some special values:\n" + "\n" + ">>> zetac(2), np.pi**2/6 - 1\n" + "(0.64493406684822641, 0.6449340668482264)\n" + "\n" + ">>> zetac(-1), -1.0/12 - 1\n" + "(-1.0833333333333333, -1.0833333333333333)\n" + "\n" + "Compare ``zetac(x)`` to ``zeta(x) - 1`` for large `x`:\n" + "\n" + ">>> zetac(60), zeta(60) - 1\n" + "(8.673617380119933e-19, 0.0)") +ufunc_zetac_loops[0] = loop_d_d__As_f_f +ufunc_zetac_loops[1] = loop_d_d__As_d_d +ufunc_zetac_types[0] = NPY_FLOAT +ufunc_zetac_types[1] = NPY_FLOAT +ufunc_zetac_types[2] = NPY_DOUBLE +ufunc_zetac_types[3] = NPY_DOUBLE +ufunc_zetac_ptr[2*0] = _func_cephes_zetac +ufunc_zetac_ptr[2*0+1] = ("zetac") +ufunc_zetac_ptr[2*1] = _func_cephes_zetac +ufunc_zetac_ptr[2*1+1] = ("zetac") +ufunc_zetac_data[0] = &ufunc_zetac_ptr[2*0] +ufunc_zetac_data[1] = &ufunc_zetac_ptr[2*1] +zetac = np.PyUFunc_FromFuncAndData(ufunc_zetac_loops, ufunc_zetac_data, ufunc_zetac_types, 2, 1, 1, 0, "zetac", ufunc_zetac_doc, 0) + +from ._special_ufuncs import (_cospi, _lambertw, _scaled_exp1, _sinpi, _spherical_jn, _spherical_jn_d, _spherical_yn, _spherical_yn_d, _spherical_in, _spherical_in_d, _spherical_kn, _spherical_kn_d, airy, airye, bei, beip, ber, berp, binom, exp1, expi, expit, exprel, gamma, gammaln, hankel1, hankel1e, hankel2, hankel2e, hyp2f1, it2i0k0, it2j0y0, it2struve0, itairy, iti0k0, itj0y0, itmodstruve0, itstruve0, iv, _iv_ratio, ive, jv, jve, kei, keip, kelvin, ker, kerp, kv, kve, log_expit, log_wright_bessel, loggamma, logit, mathieu_a, mathieu_b, mathieu_cem, mathieu_modcem1, mathieu_modcem2, mathieu_modsem1, mathieu_modsem2, mathieu_sem, modfresnelm, modfresnelp, obl_ang1, obl_ang1_cv, obl_cv, obl_rad1, obl_rad1_cv, obl_rad2, obl_rad2_cv, pbdv, pbvv, pbwa, pro_ang1, pro_ang1_cv, pro_cv, pro_rad1, pro_rad1_cv, pro_rad2, pro_rad2_cv, psi, rgamma, sph_harm, wright_bessel, yv, yve, _zeta) + +# +# Aliases +# +jn = jv diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_ufuncs_cxx.pxd b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_ufuncs_cxx.pxd new file mode 100644 index 0000000000000000000000000000000000000000..a5038ec6f6ef138cf9c0e295a77ece1c0eca005b --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_ufuncs_cxx.pxd @@ -0,0 +1,139 @@ +from . cimport sf_error +cdef void _set_action(sf_error.sf_error_t, sf_error.sf_action_t) noexcept nogil +cdef void *_export_beta_pdf_float +cdef void *_export_beta_pdf_double +cdef void *_export_beta_ppf_float +cdef void *_export_beta_ppf_double +cdef void *_export_binom_cdf_float +cdef void *_export_binom_cdf_double +cdef void *_export_binom_isf_float +cdef void *_export_binom_isf_double +cdef void *_export_binom_pmf_float +cdef void *_export_binom_pmf_double +cdef void *_export_binom_ppf_float +cdef void *_export_binom_ppf_double +cdef void *_export_binom_sf_float +cdef void *_export_binom_sf_double +cdef void *_export_hypergeom_cdf_float +cdef void *_export_hypergeom_cdf_double +cdef void *_export_hypergeom_mean_float +cdef void *_export_hypergeom_mean_double +cdef void *_export_hypergeom_pmf_float +cdef void *_export_hypergeom_pmf_double +cdef void *_export_hypergeom_sf_float +cdef void *_export_hypergeom_sf_double +cdef void *_export_hypergeom_skewness_float +cdef void *_export_hypergeom_skewness_double +cdef void *_export_hypergeom_variance_float +cdef void *_export_hypergeom_variance_double +cdef void *_export_invgauss_isf_float +cdef void *_export_invgauss_isf_double +cdef void *_export_invgauss_ppf_float +cdef void *_export_invgauss_ppf_double +cdef void *_export_nbinom_cdf_float +cdef void *_export_nbinom_cdf_double +cdef void *_export_nbinom_isf_float +cdef void *_export_nbinom_isf_double +cdef void *_export_nbinom_kurtosis_excess_float +cdef void *_export_nbinom_kurtosis_excess_double +cdef void *_export_nbinom_mean_float +cdef void *_export_nbinom_mean_double +cdef void *_export_nbinom_pmf_float +cdef void *_export_nbinom_pmf_double +cdef void *_export_nbinom_ppf_float +cdef void *_export_nbinom_ppf_double +cdef void *_export_nbinom_sf_float +cdef void *_export_nbinom_sf_double +cdef void *_export_nbinom_skewness_float +cdef void *_export_nbinom_skewness_double +cdef void *_export_nbinom_variance_float +cdef void *_export_nbinom_variance_double +cdef void *_export_ncf_cdf_float +cdef void *_export_ncf_cdf_double +cdef void *_export_ncf_isf_float +cdef void *_export_ncf_isf_double +cdef void *_export_ncf_kurtosis_excess_float +cdef void *_export_ncf_kurtosis_excess_double +cdef void *_export_ncf_mean_float +cdef void *_export_ncf_mean_double +cdef void *_export_ncf_pdf_float +cdef void *_export_ncf_pdf_double +cdef void *_export_ncf_ppf_float +cdef void *_export_ncf_ppf_double +cdef void *_export_ncf_sf_float +cdef void *_export_ncf_sf_double +cdef void *_export_ncf_skewness_float +cdef void *_export_ncf_skewness_double +cdef void *_export_ncf_variance_float +cdef void *_export_ncf_variance_double +cdef void *_export_nct_cdf_float +cdef void *_export_nct_cdf_double +cdef void *_export_nct_isf_float +cdef void *_export_nct_isf_double +cdef void *_export_nct_kurtosis_excess_float +cdef void *_export_nct_kurtosis_excess_double +cdef void *_export_nct_mean_float +cdef void *_export_nct_mean_double +cdef void *_export_nct_ppf_float +cdef void *_export_nct_ppf_double +cdef void *_export_nct_sf_float +cdef void *_export_nct_sf_double +cdef void *_export_nct_skewness_float +cdef void *_export_nct_skewness_double +cdef void *_export_nct_variance_float +cdef void *_export_nct_variance_double +cdef void *_export_ncx2_cdf_float +cdef void *_export_ncx2_cdf_double +cdef void *_export_ncx2_isf_float +cdef void *_export_ncx2_isf_double +cdef void *_export_ncx2_pdf_float +cdef void *_export_ncx2_pdf_double +cdef void *_export_ncx2_ppf_float +cdef void *_export_ncx2_ppf_double +cdef void *_export_ncx2_sf_float +cdef void *_export_ncx2_sf_double +cdef void *_export_skewnorm_cdf_float +cdef void *_export_skewnorm_cdf_double +cdef void *_export_skewnorm_isf_float +cdef void *_export_skewnorm_isf_double +cdef void *_export_skewnorm_ppf_float +cdef void *_export_skewnorm_ppf_double +cdef void *_export__stirling2_inexact +cdef void *_export_ibeta_float +cdef void *_export_ibeta_double +cdef void *_export_ibetac_float +cdef void *_export_ibetac_double +cdef void *_export_ibetac_inv_float +cdef void *_export_ibetac_inv_double +cdef void *_export_ibeta_inv_float +cdef void *_export_ibeta_inv_double +cdef void *_export_faddeeva_dawsn +cdef void *_export_faddeeva_dawsn_complex +cdef void *_export_fellint_RC +cdef void *_export_cellint_RC +cdef void *_export_fellint_RD +cdef void *_export_cellint_RD +cdef void *_export_fellint_RF +cdef void *_export_cellint_RF +cdef void *_export_fellint_RG +cdef void *_export_cellint_RG +cdef void *_export_fellint_RJ +cdef void *_export_cellint_RJ +cdef void *_export_faddeeva_erf +cdef void *_export_faddeeva_erfc_complex +cdef void *_export_faddeeva_erfcx +cdef void *_export_faddeeva_erfcx_complex +cdef void *_export_faddeeva_erfi +cdef void *_export_faddeeva_erfi_complex +cdef void *_export_erfinv_float +cdef void *_export_erfinv_double +cdef void *_export_hyp1f1_double +cdef void *_export_faddeeva_log_ndtr +cdef void *_export_faddeeva_log_ndtr_complex +cdef void *_export_faddeeva_ndtr +cdef void *_export_powm1_float +cdef void *_export_powm1_double +cdef void *_export_faddeeva_voigt_profile +cdef void *_export_faddeeva_w +cdef void *_export_wrightomega +cdef void *_export_wrightomega_real \ No newline at end of file diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_ufuncs_cxx.pyx b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_ufuncs_cxx.pyx new file mode 100644 index 0000000000000000000000000000000000000000..4874418a8509c21e04979ec3e796936943397afd --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_ufuncs_cxx.pyx @@ -0,0 +1,418 @@ +# This file is automatically generated by _generate_pyx.py. +# Do not edit manually! + +from libc.math cimport NAN + +include "_ufuncs_extra_code_common.pxi" + +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_beta_pdf_float "beta_pdf_float"(float, float, float) noexcept nogil +cdef void *_export_beta_pdf_float = _func_beta_pdf_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_beta_pdf_double "beta_pdf_double"(double, double, double) noexcept nogil +cdef void *_export_beta_pdf_double = _func_beta_pdf_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_beta_ppf_float "beta_ppf_float"(float, float, float) noexcept nogil +cdef void *_export_beta_ppf_float = _func_beta_ppf_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_beta_ppf_double "beta_ppf_double"(double, double, double) noexcept nogil +cdef void *_export_beta_ppf_double = _func_beta_ppf_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_binom_cdf_float "binom_cdf_float"(float, float, float) noexcept nogil +cdef void *_export_binom_cdf_float = _func_binom_cdf_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_binom_cdf_double "binom_cdf_double"(double, double, double) noexcept nogil +cdef void *_export_binom_cdf_double = _func_binom_cdf_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_binom_isf_float "binom_isf_float"(float, float, float) noexcept nogil +cdef void *_export_binom_isf_float = _func_binom_isf_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_binom_isf_double "binom_isf_double"(double, double, double) noexcept nogil +cdef void *_export_binom_isf_double = _func_binom_isf_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_binom_pmf_float "binom_pmf_float"(float, float, float) noexcept nogil +cdef void *_export_binom_pmf_float = _func_binom_pmf_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_binom_pmf_double "binom_pmf_double"(double, double, double) noexcept nogil +cdef void *_export_binom_pmf_double = _func_binom_pmf_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_binom_ppf_float "binom_ppf_float"(float, float, float) noexcept nogil +cdef void *_export_binom_ppf_float = _func_binom_ppf_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_binom_ppf_double "binom_ppf_double"(double, double, double) noexcept nogil +cdef void *_export_binom_ppf_double = _func_binom_ppf_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_binom_sf_float "binom_sf_float"(float, float, float) noexcept nogil +cdef void *_export_binom_sf_float = _func_binom_sf_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_binom_sf_double "binom_sf_double"(double, double, double) noexcept nogil +cdef void *_export_binom_sf_double = _func_binom_sf_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_hypergeom_cdf_float "hypergeom_cdf_float"(float, float, float, float) noexcept nogil +cdef void *_export_hypergeom_cdf_float = _func_hypergeom_cdf_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_hypergeom_cdf_double "hypergeom_cdf_double"(double, double, double, double) noexcept nogil +cdef void *_export_hypergeom_cdf_double = _func_hypergeom_cdf_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_hypergeom_mean_float "hypergeom_mean_float"(float, float, float) noexcept nogil +cdef void *_export_hypergeom_mean_float = _func_hypergeom_mean_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_hypergeom_mean_double "hypergeom_mean_double"(double, double, double) noexcept nogil +cdef void *_export_hypergeom_mean_double = _func_hypergeom_mean_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_hypergeom_pmf_float "hypergeom_pmf_float"(float, float, float, float) noexcept nogil +cdef void *_export_hypergeom_pmf_float = _func_hypergeom_pmf_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_hypergeom_pmf_double "hypergeom_pmf_double"(double, double, double, double) noexcept nogil +cdef void *_export_hypergeom_pmf_double = _func_hypergeom_pmf_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_hypergeom_sf_float "hypergeom_sf_float"(float, float, float, float) noexcept nogil +cdef void *_export_hypergeom_sf_float = _func_hypergeom_sf_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_hypergeom_sf_double "hypergeom_sf_double"(double, double, double, double) noexcept nogil +cdef void *_export_hypergeom_sf_double = _func_hypergeom_sf_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_hypergeom_skewness_float "hypergeom_skewness_float"(float, float, float) noexcept nogil +cdef void *_export_hypergeom_skewness_float = _func_hypergeom_skewness_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_hypergeom_skewness_double "hypergeom_skewness_double"(double, double, double) noexcept nogil +cdef void *_export_hypergeom_skewness_double = _func_hypergeom_skewness_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_hypergeom_variance_float "hypergeom_variance_float"(float, float, float) noexcept nogil +cdef void *_export_hypergeom_variance_float = _func_hypergeom_variance_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_hypergeom_variance_double "hypergeom_variance_double"(double, double, double) noexcept nogil +cdef void *_export_hypergeom_variance_double = _func_hypergeom_variance_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_invgauss_isf_float "invgauss_isf_float"(float, float, float) noexcept nogil +cdef void *_export_invgauss_isf_float = _func_invgauss_isf_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_invgauss_isf_double "invgauss_isf_double"(double, double, double) noexcept nogil +cdef void *_export_invgauss_isf_double = _func_invgauss_isf_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_invgauss_ppf_float "invgauss_ppf_float"(float, float, float) noexcept nogil +cdef void *_export_invgauss_ppf_float = _func_invgauss_ppf_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_invgauss_ppf_double "invgauss_ppf_double"(double, double, double) noexcept nogil +cdef void *_export_invgauss_ppf_double = _func_invgauss_ppf_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_nbinom_cdf_float "nbinom_cdf_float"(float, float, float) noexcept nogil +cdef void *_export_nbinom_cdf_float = _func_nbinom_cdf_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_nbinom_cdf_double "nbinom_cdf_double"(double, double, double) noexcept nogil +cdef void *_export_nbinom_cdf_double = _func_nbinom_cdf_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_nbinom_isf_float "nbinom_isf_float"(float, float, float) noexcept nogil +cdef void *_export_nbinom_isf_float = _func_nbinom_isf_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_nbinom_isf_double "nbinom_isf_double"(double, double, double) noexcept nogil +cdef void *_export_nbinom_isf_double = _func_nbinom_isf_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_nbinom_kurtosis_excess_float "nbinom_kurtosis_excess_float"(float, float) noexcept nogil +cdef void *_export_nbinom_kurtosis_excess_float = _func_nbinom_kurtosis_excess_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_nbinom_kurtosis_excess_double "nbinom_kurtosis_excess_double"(double, double) noexcept nogil +cdef void *_export_nbinom_kurtosis_excess_double = _func_nbinom_kurtosis_excess_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_nbinom_mean_float "nbinom_mean_float"(float, float) noexcept nogil +cdef void *_export_nbinom_mean_float = _func_nbinom_mean_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_nbinom_mean_double "nbinom_mean_double"(double, double) noexcept nogil +cdef void *_export_nbinom_mean_double = _func_nbinom_mean_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_nbinom_pmf_float "nbinom_pmf_float"(float, float, float) noexcept nogil +cdef void *_export_nbinom_pmf_float = _func_nbinom_pmf_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_nbinom_pmf_double "nbinom_pmf_double"(double, double, double) noexcept nogil +cdef void *_export_nbinom_pmf_double = _func_nbinom_pmf_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_nbinom_ppf_float "nbinom_ppf_float"(float, float, float) noexcept nogil +cdef void *_export_nbinom_ppf_float = _func_nbinom_ppf_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_nbinom_ppf_double "nbinom_ppf_double"(double, double, double) noexcept nogil +cdef void *_export_nbinom_ppf_double = _func_nbinom_ppf_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_nbinom_sf_float "nbinom_sf_float"(float, float, float) noexcept nogil +cdef void *_export_nbinom_sf_float = _func_nbinom_sf_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_nbinom_sf_double "nbinom_sf_double"(double, double, double) noexcept nogil +cdef void *_export_nbinom_sf_double = _func_nbinom_sf_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_nbinom_skewness_float "nbinom_skewness_float"(float, float) noexcept nogil +cdef void *_export_nbinom_skewness_float = _func_nbinom_skewness_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_nbinom_skewness_double "nbinom_skewness_double"(double, double) noexcept nogil +cdef void *_export_nbinom_skewness_double = _func_nbinom_skewness_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_nbinom_variance_float "nbinom_variance_float"(float, float) noexcept nogil +cdef void *_export_nbinom_variance_float = _func_nbinom_variance_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_nbinom_variance_double "nbinom_variance_double"(double, double) noexcept nogil +cdef void *_export_nbinom_variance_double = _func_nbinom_variance_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_ncf_cdf_float "ncf_cdf_float"(float, float, float, float) noexcept nogil +cdef void *_export_ncf_cdf_float = _func_ncf_cdf_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_ncf_cdf_double "ncf_cdf_double"(double, double, double, double) noexcept nogil +cdef void *_export_ncf_cdf_double = _func_ncf_cdf_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_ncf_isf_float "ncf_isf_float"(float, float, float, float) noexcept nogil +cdef void *_export_ncf_isf_float = _func_ncf_isf_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_ncf_isf_double "ncf_isf_double"(double, double, double, double) noexcept nogil +cdef void *_export_ncf_isf_double = _func_ncf_isf_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_ncf_kurtosis_excess_float "ncf_kurtosis_excess_float"(float, float, float) noexcept nogil +cdef void *_export_ncf_kurtosis_excess_float = _func_ncf_kurtosis_excess_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_ncf_kurtosis_excess_double "ncf_kurtosis_excess_double"(double, double, double) noexcept nogil +cdef void *_export_ncf_kurtosis_excess_double = _func_ncf_kurtosis_excess_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_ncf_mean_float "ncf_mean_float"(float, float, float) noexcept nogil +cdef void *_export_ncf_mean_float = _func_ncf_mean_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_ncf_mean_double "ncf_mean_double"(double, double, double) noexcept nogil +cdef void *_export_ncf_mean_double = _func_ncf_mean_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_ncf_pdf_float "ncf_pdf_float"(float, float, float, float) noexcept nogil +cdef void *_export_ncf_pdf_float = _func_ncf_pdf_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_ncf_pdf_double "ncf_pdf_double"(double, double, double, double) noexcept nogil +cdef void *_export_ncf_pdf_double = _func_ncf_pdf_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_ncf_ppf_float "ncf_ppf_float"(float, float, float, float) noexcept nogil +cdef void *_export_ncf_ppf_float = _func_ncf_ppf_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_ncf_ppf_double "ncf_ppf_double"(double, double, double, double) noexcept nogil +cdef void *_export_ncf_ppf_double = _func_ncf_ppf_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_ncf_sf_float "ncf_sf_float"(float, float, float, float) noexcept nogil +cdef void *_export_ncf_sf_float = _func_ncf_sf_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_ncf_sf_double "ncf_sf_double"(double, double, double, double) noexcept nogil +cdef void *_export_ncf_sf_double = _func_ncf_sf_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_ncf_skewness_float "ncf_skewness_float"(float, float, float) noexcept nogil +cdef void *_export_ncf_skewness_float = _func_ncf_skewness_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_ncf_skewness_double "ncf_skewness_double"(double, double, double) noexcept nogil +cdef void *_export_ncf_skewness_double = _func_ncf_skewness_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_ncf_variance_float "ncf_variance_float"(float, float, float) noexcept nogil +cdef void *_export_ncf_variance_float = _func_ncf_variance_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_ncf_variance_double "ncf_variance_double"(double, double, double) noexcept nogil +cdef void *_export_ncf_variance_double = _func_ncf_variance_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_nct_cdf_float "nct_cdf_float"(float, float, float) noexcept nogil +cdef void *_export_nct_cdf_float = _func_nct_cdf_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_nct_cdf_double "nct_cdf_double"(double, double, double) noexcept nogil +cdef void *_export_nct_cdf_double = _func_nct_cdf_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_nct_isf_float "nct_isf_float"(float, float, float) noexcept nogil +cdef void *_export_nct_isf_float = _func_nct_isf_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_nct_isf_double "nct_isf_double"(double, double, double) noexcept nogil +cdef void *_export_nct_isf_double = _func_nct_isf_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_nct_kurtosis_excess_float "nct_kurtosis_excess_float"(float, float) noexcept nogil +cdef void *_export_nct_kurtosis_excess_float = _func_nct_kurtosis_excess_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_nct_kurtosis_excess_double "nct_kurtosis_excess_double"(double, double) noexcept nogil +cdef void *_export_nct_kurtosis_excess_double = _func_nct_kurtosis_excess_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_nct_mean_float "nct_mean_float"(float, float) noexcept nogil +cdef void *_export_nct_mean_float = _func_nct_mean_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_nct_mean_double "nct_mean_double"(double, double) noexcept nogil +cdef void *_export_nct_mean_double = _func_nct_mean_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_nct_ppf_float "nct_ppf_float"(float, float, float) noexcept nogil +cdef void *_export_nct_ppf_float = _func_nct_ppf_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_nct_ppf_double "nct_ppf_double"(double, double, double) noexcept nogil +cdef void *_export_nct_ppf_double = _func_nct_ppf_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_nct_sf_float "nct_sf_float"(float, float, float) noexcept nogil +cdef void *_export_nct_sf_float = _func_nct_sf_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_nct_sf_double "nct_sf_double"(double, double, double) noexcept nogil +cdef void *_export_nct_sf_double = _func_nct_sf_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_nct_skewness_float "nct_skewness_float"(float, float) noexcept nogil +cdef void *_export_nct_skewness_float = _func_nct_skewness_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_nct_skewness_double "nct_skewness_double"(double, double) noexcept nogil +cdef void *_export_nct_skewness_double = _func_nct_skewness_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_nct_variance_float "nct_variance_float"(float, float) noexcept nogil +cdef void *_export_nct_variance_float = _func_nct_variance_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_nct_variance_double "nct_variance_double"(double, double) noexcept nogil +cdef void *_export_nct_variance_double = _func_nct_variance_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_ncx2_cdf_float "ncx2_cdf_float"(float, float, float) noexcept nogil +cdef void *_export_ncx2_cdf_float = _func_ncx2_cdf_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_ncx2_cdf_double "ncx2_cdf_double"(double, double, double) noexcept nogil +cdef void *_export_ncx2_cdf_double = _func_ncx2_cdf_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_ncx2_isf_float "ncx2_isf_float"(float, float, float) noexcept nogil +cdef void *_export_ncx2_isf_float = _func_ncx2_isf_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_ncx2_isf_double "ncx2_isf_double"(double, double, double) noexcept nogil +cdef void *_export_ncx2_isf_double = _func_ncx2_isf_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_ncx2_pdf_float "ncx2_pdf_float"(float, float, float) noexcept nogil +cdef void *_export_ncx2_pdf_float = _func_ncx2_pdf_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_ncx2_pdf_double "ncx2_pdf_double"(double, double, double) noexcept nogil +cdef void *_export_ncx2_pdf_double = _func_ncx2_pdf_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_ncx2_ppf_float "ncx2_ppf_float"(float, float, float) noexcept nogil +cdef void *_export_ncx2_ppf_float = _func_ncx2_ppf_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_ncx2_ppf_double "ncx2_ppf_double"(double, double, double) noexcept nogil +cdef void *_export_ncx2_ppf_double = _func_ncx2_ppf_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_ncx2_sf_float "ncx2_sf_float"(float, float, float) noexcept nogil +cdef void *_export_ncx2_sf_float = _func_ncx2_sf_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_ncx2_sf_double "ncx2_sf_double"(double, double, double) noexcept nogil +cdef void *_export_ncx2_sf_double = _func_ncx2_sf_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_skewnorm_cdf_float "skewnorm_cdf_float"(float, float, float, float) noexcept nogil +cdef void *_export_skewnorm_cdf_float = _func_skewnorm_cdf_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_skewnorm_cdf_double "skewnorm_cdf_double"(double, double, double, double) noexcept nogil +cdef void *_export_skewnorm_cdf_double = _func_skewnorm_cdf_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_skewnorm_isf_float "skewnorm_isf_float"(float, float, float, float) noexcept nogil +cdef void *_export_skewnorm_isf_float = _func_skewnorm_isf_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_skewnorm_isf_double "skewnorm_isf_double"(double, double, double, double) noexcept nogil +cdef void *_export_skewnorm_isf_double = _func_skewnorm_isf_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_skewnorm_ppf_float "skewnorm_ppf_float"(float, float, float, float) noexcept nogil +cdef void *_export_skewnorm_ppf_float = _func_skewnorm_ppf_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_skewnorm_ppf_double "skewnorm_ppf_double"(double, double, double, double) noexcept nogil +cdef void *_export_skewnorm_ppf_double = _func_skewnorm_ppf_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func__stirling2_inexact "_stirling2_inexact"(double, double) noexcept nogil +cdef void *_export__stirling2_inexact = _func__stirling2_inexact +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_ibeta_float "ibeta_float"(float, float, float) noexcept nogil +cdef void *_export_ibeta_float = _func_ibeta_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_ibeta_double "ibeta_double"(double, double, double) noexcept nogil +cdef void *_export_ibeta_double = _func_ibeta_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_ibetac_float "ibetac_float"(float, float, float) noexcept nogil +cdef void *_export_ibetac_float = _func_ibetac_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_ibetac_double "ibetac_double"(double, double, double) noexcept nogil +cdef void *_export_ibetac_double = _func_ibetac_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_ibetac_inv_float "ibetac_inv_float"(float, float, float) noexcept nogil +cdef void *_export_ibetac_inv_float = _func_ibetac_inv_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_ibetac_inv_double "ibetac_inv_double"(double, double, double) noexcept nogil +cdef void *_export_ibetac_inv_double = _func_ibetac_inv_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_ibeta_inv_float "ibeta_inv_float"(float, float, float) noexcept nogil +cdef void *_export_ibeta_inv_float = _func_ibeta_inv_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_ibeta_inv_double "ibeta_inv_double"(double, double, double) noexcept nogil +cdef void *_export_ibeta_inv_double = _func_ibeta_inv_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_faddeeva_dawsn "faddeeva_dawsn"(double) noexcept nogil +cdef void *_export_faddeeva_dawsn = _func_faddeeva_dawsn +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_faddeeva_dawsn_complex "faddeeva_dawsn_complex"(double complex) noexcept nogil +cdef void *_export_faddeeva_dawsn_complex = _func_faddeeva_dawsn_complex +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_fellint_RC "fellint_RC"(double, double) noexcept nogil +cdef void *_export_fellint_RC = _func_fellint_RC +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_cellint_RC "cellint_RC"(double complex, double complex) noexcept nogil +cdef void *_export_cellint_RC = _func_cellint_RC +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_fellint_RD "fellint_RD"(double, double, double) noexcept nogil +cdef void *_export_fellint_RD = _func_fellint_RD +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_cellint_RD "cellint_RD"(double complex, double complex, double complex) noexcept nogil +cdef void *_export_cellint_RD = _func_cellint_RD +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_fellint_RF "fellint_RF"(double, double, double) noexcept nogil +cdef void *_export_fellint_RF = _func_fellint_RF +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_cellint_RF "cellint_RF"(double complex, double complex, double complex) noexcept nogil +cdef void *_export_cellint_RF = _func_cellint_RF +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_fellint_RG "fellint_RG"(double, double, double) noexcept nogil +cdef void *_export_fellint_RG = _func_fellint_RG +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_cellint_RG "cellint_RG"(double complex, double complex, double complex) noexcept nogil +cdef void *_export_cellint_RG = _func_cellint_RG +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_fellint_RJ "fellint_RJ"(double, double, double, double) noexcept nogil +cdef void *_export_fellint_RJ = _func_fellint_RJ +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_cellint_RJ "cellint_RJ"(double complex, double complex, double complex, double complex) noexcept nogil +cdef void *_export_cellint_RJ = _func_cellint_RJ +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_faddeeva_erf "faddeeva_erf"(double complex) noexcept nogil +cdef void *_export_faddeeva_erf = _func_faddeeva_erf +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_faddeeva_erfc_complex "faddeeva_erfc_complex"(double complex) noexcept nogil +cdef void *_export_faddeeva_erfc_complex = _func_faddeeva_erfc_complex +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_faddeeva_erfcx "faddeeva_erfcx"(double) noexcept nogil +cdef void *_export_faddeeva_erfcx = _func_faddeeva_erfcx +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_faddeeva_erfcx_complex "faddeeva_erfcx_complex"(double complex) noexcept nogil +cdef void *_export_faddeeva_erfcx_complex = _func_faddeeva_erfcx_complex +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_faddeeva_erfi "faddeeva_erfi"(double) noexcept nogil +cdef void *_export_faddeeva_erfi = _func_faddeeva_erfi +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_faddeeva_erfi_complex "faddeeva_erfi_complex"(double complex) noexcept nogil +cdef void *_export_faddeeva_erfi_complex = _func_faddeeva_erfi_complex +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_erfinv_float "erfinv_float"(float) noexcept nogil +cdef void *_export_erfinv_float = _func_erfinv_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_erfinv_double "erfinv_double"(double) noexcept nogil +cdef void *_export_erfinv_double = _func_erfinv_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_hyp1f1_double "hyp1f1_double"(double, double, double) noexcept nogil +cdef void *_export_hyp1f1_double = _func_hyp1f1_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_faddeeva_log_ndtr "faddeeva_log_ndtr"(double) noexcept nogil +cdef void *_export_faddeeva_log_ndtr = _func_faddeeva_log_ndtr +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_faddeeva_log_ndtr_complex "faddeeva_log_ndtr_complex"(double complex) noexcept nogil +cdef void *_export_faddeeva_log_ndtr_complex = _func_faddeeva_log_ndtr_complex +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_faddeeva_ndtr "faddeeva_ndtr"(double complex) noexcept nogil +cdef void *_export_faddeeva_ndtr = _func_faddeeva_ndtr +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_powm1_float "powm1_float"(float, float) noexcept nogil +cdef void *_export_powm1_float = _func_powm1_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_powm1_double "powm1_double"(double, double) noexcept nogil +cdef void *_export_powm1_double = _func_powm1_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_faddeeva_voigt_profile "faddeeva_voigt_profile"(double, double, double) noexcept nogil +cdef void *_export_faddeeva_voigt_profile = _func_faddeeva_voigt_profile +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_faddeeva_w "faddeeva_w"(double complex) noexcept nogil +cdef void *_export_faddeeva_w = _func_faddeeva_w +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_wrightomega "wrightomega"(double complex) noexcept nogil +cdef void *_export_wrightomega = _func_wrightomega +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_wrightomega_real "wrightomega_real"(double) noexcept nogil +cdef void *_export_wrightomega_real = _func_wrightomega_real \ No newline at end of file diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_ufuncs_cxx_defs.h b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_ufuncs_cxx_defs.h new file mode 100644 index 0000000000000000000000000000000000000000..3dce5ff792fcaf1a793b92ddaaf4984acd1c4df7 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/_ufuncs_cxx_defs.h @@ -0,0 +1,145 @@ +#ifndef UFUNCS_PROTO_H +#define UFUNCS_PROTO_H 1 +#include "boost_special_functions.h" +npy_float beta_pdf_float(npy_float, npy_float, npy_float); +npy_double beta_pdf_double(npy_double, npy_double, npy_double); +npy_float beta_ppf_float(npy_float, npy_float, npy_float); +npy_double beta_ppf_double(npy_double, npy_double, npy_double); +npy_float binom_cdf_float(npy_float, npy_float, npy_float); +npy_double binom_cdf_double(npy_double, npy_double, npy_double); +npy_float binom_isf_float(npy_float, npy_float, npy_float); +npy_double binom_isf_double(npy_double, npy_double, npy_double); +npy_float binom_pmf_float(npy_float, npy_float, npy_float); +npy_double binom_pmf_double(npy_double, npy_double, npy_double); +npy_float binom_ppf_float(npy_float, npy_float, npy_float); +npy_double binom_ppf_double(npy_double, npy_double, npy_double); +npy_float binom_sf_float(npy_float, npy_float, npy_float); +npy_double binom_sf_double(npy_double, npy_double, npy_double); +npy_float hypergeom_cdf_float(npy_float, npy_float, npy_float, npy_float); +npy_double hypergeom_cdf_double(npy_double, npy_double, npy_double, npy_double); +npy_float hypergeom_mean_float(npy_float, npy_float, npy_float); +npy_double hypergeom_mean_double(npy_double, npy_double, npy_double); +npy_float hypergeom_pmf_float(npy_float, npy_float, npy_float, npy_float); +npy_double hypergeom_pmf_double(npy_double, npy_double, npy_double, npy_double); +npy_float hypergeom_sf_float(npy_float, npy_float, npy_float, npy_float); +npy_double hypergeom_sf_double(npy_double, npy_double, npy_double, npy_double); +npy_float hypergeom_skewness_float(npy_float, npy_float, npy_float); +npy_double hypergeom_skewness_double(npy_double, npy_double, npy_double); +npy_float hypergeom_variance_float(npy_float, npy_float, npy_float); +npy_double hypergeom_variance_double(npy_double, npy_double, npy_double); +npy_float invgauss_isf_float(npy_float, npy_float, npy_float); +npy_double invgauss_isf_double(npy_double, npy_double, npy_double); +npy_float invgauss_ppf_float(npy_float, npy_float, npy_float); +npy_double invgauss_ppf_double(npy_double, npy_double, npy_double); +npy_float nbinom_cdf_float(npy_float, npy_float, npy_float); +npy_double nbinom_cdf_double(npy_double, npy_double, npy_double); +npy_float nbinom_isf_float(npy_float, npy_float, npy_float); +npy_double nbinom_isf_double(npy_double, npy_double, npy_double); +npy_float nbinom_kurtosis_excess_float(npy_float, npy_float); +npy_double nbinom_kurtosis_excess_double(npy_double, npy_double); +npy_float nbinom_mean_float(npy_float, npy_float); +npy_double nbinom_mean_double(npy_double, npy_double); +npy_float nbinom_pmf_float(npy_float, npy_float, npy_float); +npy_double nbinom_pmf_double(npy_double, npy_double, npy_double); +npy_float nbinom_ppf_float(npy_float, npy_float, npy_float); +npy_double nbinom_ppf_double(npy_double, npy_double, npy_double); +npy_float nbinom_sf_float(npy_float, npy_float, npy_float); +npy_double nbinom_sf_double(npy_double, npy_double, npy_double); +npy_float nbinom_skewness_float(npy_float, npy_float); +npy_double nbinom_skewness_double(npy_double, npy_double); +npy_float nbinom_variance_float(npy_float, npy_float); +npy_double nbinom_variance_double(npy_double, npy_double); +npy_float ncf_cdf_float(npy_float, npy_float, npy_float, npy_float); +npy_double ncf_cdf_double(npy_double, npy_double, npy_double, npy_double); +npy_float ncf_isf_float(npy_float, npy_float, npy_float, npy_float); +npy_double ncf_isf_double(npy_double, npy_double, npy_double, npy_double); +npy_float ncf_kurtosis_excess_float(npy_float, npy_float, npy_float); +npy_double ncf_kurtosis_excess_double(npy_double, npy_double, npy_double); +npy_float ncf_mean_float(npy_float, npy_float, npy_float); +npy_double ncf_mean_double(npy_double, npy_double, npy_double); +npy_float ncf_pdf_float(npy_float, npy_float, npy_float, npy_float); +npy_double ncf_pdf_double(npy_double, npy_double, npy_double, npy_double); +npy_float ncf_ppf_float(npy_float, npy_float, npy_float, npy_float); +npy_double ncf_ppf_double(npy_double, npy_double, npy_double, npy_double); +npy_float ncf_sf_float(npy_float, npy_float, npy_float, npy_float); +npy_double ncf_sf_double(npy_double, npy_double, npy_double, npy_double); +npy_float ncf_skewness_float(npy_float, npy_float, npy_float); +npy_double ncf_skewness_double(npy_double, npy_double, npy_double); +npy_float ncf_variance_float(npy_float, npy_float, npy_float); +npy_double ncf_variance_double(npy_double, npy_double, npy_double); +npy_float nct_cdf_float(npy_float, npy_float, npy_float); +npy_double nct_cdf_double(npy_double, npy_double, npy_double); +npy_float nct_isf_float(npy_float, npy_float, npy_float); +npy_double nct_isf_double(npy_double, npy_double, npy_double); +npy_float nct_kurtosis_excess_float(npy_float, npy_float); +npy_double nct_kurtosis_excess_double(npy_double, npy_double); +npy_float nct_mean_float(npy_float, npy_float); +npy_double nct_mean_double(npy_double, npy_double); +npy_float nct_ppf_float(npy_float, npy_float, npy_float); +npy_double nct_ppf_double(npy_double, npy_double, npy_double); +npy_float nct_sf_float(npy_float, npy_float, npy_float); +npy_double nct_sf_double(npy_double, npy_double, npy_double); +npy_float nct_skewness_float(npy_float, npy_float); +npy_double nct_skewness_double(npy_double, npy_double); +npy_float nct_variance_float(npy_float, npy_float); +npy_double nct_variance_double(npy_double, npy_double); +npy_float ncx2_cdf_float(npy_float, npy_float, npy_float); +npy_double ncx2_cdf_double(npy_double, npy_double, npy_double); +npy_float ncx2_isf_float(npy_float, npy_float, npy_float); +npy_double ncx2_isf_double(npy_double, npy_double, npy_double); +npy_float ncx2_pdf_float(npy_float, npy_float, npy_float); +npy_double ncx2_pdf_double(npy_double, npy_double, npy_double); +npy_float ncx2_ppf_float(npy_float, npy_float, npy_float); +npy_double ncx2_ppf_double(npy_double, npy_double, npy_double); +npy_float ncx2_sf_float(npy_float, npy_float, npy_float); +npy_double ncx2_sf_double(npy_double, npy_double, npy_double); +npy_float skewnorm_cdf_float(npy_float, npy_float, npy_float, npy_float); +npy_double skewnorm_cdf_double(npy_double, npy_double, npy_double, npy_double); +npy_float skewnorm_isf_float(npy_float, npy_float, npy_float, npy_float); +npy_double skewnorm_isf_double(npy_double, npy_double, npy_double, npy_double); +npy_float skewnorm_ppf_float(npy_float, npy_float, npy_float, npy_float); +npy_double skewnorm_ppf_double(npy_double, npy_double, npy_double, npy_double); +#include "stirling2.h" +npy_double _stirling2_inexact(npy_double, npy_double); +npy_float ibeta_float(npy_float, npy_float, npy_float); +npy_double ibeta_double(npy_double, npy_double, npy_double); +npy_float ibetac_float(npy_float, npy_float, npy_float); +npy_double ibetac_double(npy_double, npy_double, npy_double); +npy_float ibetac_inv_float(npy_float, npy_float, npy_float); +npy_double ibetac_inv_double(npy_double, npy_double, npy_double); +npy_float ibeta_inv_float(npy_float, npy_float, npy_float); +npy_double ibeta_inv_double(npy_double, npy_double, npy_double); +#include "_faddeeva.h" +npy_double faddeeva_dawsn(npy_double); +npy_cdouble faddeeva_dawsn_complex(npy_cdouble); +#include "ellint_carlson_wrap.hh" +npy_double fellint_RC(npy_double, npy_double); +npy_cdouble cellint_RC(npy_cdouble, npy_cdouble); +npy_double fellint_RD(npy_double, npy_double, npy_double); +npy_cdouble cellint_RD(npy_cdouble, npy_cdouble, npy_cdouble); +npy_double fellint_RF(npy_double, npy_double, npy_double); +npy_cdouble cellint_RF(npy_cdouble, npy_cdouble, npy_cdouble); +npy_double fellint_RG(npy_double, npy_double, npy_double); +npy_cdouble cellint_RG(npy_cdouble, npy_cdouble, npy_cdouble); +npy_double fellint_RJ(npy_double, npy_double, npy_double, npy_double); +npy_cdouble cellint_RJ(npy_cdouble, npy_cdouble, npy_cdouble, npy_cdouble); +npy_cdouble faddeeva_erf(npy_cdouble); +npy_cdouble faddeeva_erfc_complex(npy_cdouble); +npy_double faddeeva_erfcx(npy_double); +npy_cdouble faddeeva_erfcx_complex(npy_cdouble); +npy_double faddeeva_erfi(npy_double); +npy_cdouble faddeeva_erfi_complex(npy_cdouble); +npy_float erfinv_float(npy_float); +npy_double erfinv_double(npy_double); +npy_double hyp1f1_double(npy_double, npy_double, npy_double); +npy_double faddeeva_log_ndtr(npy_double); +npy_cdouble faddeeva_log_ndtr_complex(npy_cdouble); +npy_cdouble faddeeva_ndtr(npy_cdouble); +npy_float powm1_float(npy_float, npy_float); +npy_double powm1_double(npy_double, npy_double); +npy_double faddeeva_voigt_profile(npy_double, npy_double, npy_double); +npy_cdouble faddeeva_w(npy_cdouble); +#include "_wright.h" +npy_cdouble wrightomega(npy_cdouble); +npy_double wrightomega_real(npy_double); +#endif diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/add_newdocs.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/add_newdocs.py new file mode 100644 index 0000000000000000000000000000000000000000..5549717d35710d71655e42c836625cde9346bcc3 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/add_newdocs.py @@ -0,0 +1,15 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__: list[str] = [] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="special", module="add_newdocs", + private_modules=["_add_newdocs"], all=__all__, + attribute=name) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/basic.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/basic.py new file mode 100644 index 0000000000000000000000000000000000000000..e55695f44d05187d6c83f1ebefd70270af2c2d76 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/basic.py @@ -0,0 +1,87 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.special` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'ai_zeros', + 'assoc_laguerre', + 'bei_zeros', + 'beip_zeros', + 'ber_zeros', + 'bernoulli', + 'berp_zeros', + 'bi_zeros', + 'clpmn', + 'comb', + 'digamma', + 'diric', + 'erf_zeros', + 'euler', + 'factorial', + 'factorial2', + 'factorialk', + 'fresnel_zeros', + 'fresnelc_zeros', + 'fresnels_zeros', + 'gamma', + 'h1vp', + 'h2vp', + 'hankel1', + 'hankel2', + 'iv', + 'ivp', + 'jn_zeros', + 'jnjnp_zeros', + 'jnp_zeros', + 'jnyn_zeros', + 'jv', + 'jvp', + 'kei_zeros', + 'keip_zeros', + 'kelvin_zeros', + 'ker_zeros', + 'kerp_zeros', + 'kv', + 'kvp', + 'lmbda', + 'lpmn', + 'lpn', + 'lqmn', + 'lqn', + 'mathieu_a', + 'mathieu_b', + 'mathieu_even_coef', + 'mathieu_odd_coef', + 'obl_cv_seq', + 'pbdn_seq', + 'pbdv_seq', + 'pbvv_seq', + 'perm', + 'polygamma', + 'pro_cv_seq', + 'psi', + 'riccati_jn', + 'riccati_yn', + 'sinc', + 'y0_zeros', + 'y1_zeros', + 'y1p_zeros', + 'yn_zeros', + 'ynp_zeros', + 'yv', + 'yvp', + 'zeta' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="special", module="basic", + private_modules=["_basic", "_ufuncs"], all=__all__, + attribute=name) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/cython_special.pxd b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/cython_special.pxd new file mode 100644 index 0000000000000000000000000000000000000000..a472206d75fd090c3ee4fede5a53decb58020e42 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/cython_special.pxd @@ -0,0 +1,261 @@ + +ctypedef fused number_t: + double complex + double + +cpdef number_t spherical_jn(Py_ssize_t n, number_t z, bint derivative=*) noexcept nogil +cpdef number_t spherical_yn(Py_ssize_t n, number_t z, bint derivative=*) noexcept nogil +cpdef number_t spherical_in(Py_ssize_t n, number_t z, bint derivative=*) noexcept nogil +cpdef number_t spherical_kn(Py_ssize_t n, number_t z, bint derivative=*) noexcept nogil + +ctypedef fused Dd_number_t: + double complex + double + +ctypedef fused df_number_t: + double + float + +ctypedef fused dfg_number_t: + double + float + long double + +ctypedef fused dlp_number_t: + double + long + Py_ssize_t + +cpdef double voigt_profile(double x0, double x1, double x2) noexcept nogil +cpdef double agm(double x0, double x1) noexcept nogil +cdef void airy(Dd_number_t x0, Dd_number_t *y0, Dd_number_t *y1, Dd_number_t *y2, Dd_number_t *y3) noexcept nogil +cdef void airye(Dd_number_t x0, Dd_number_t *y0, Dd_number_t *y1, Dd_number_t *y2, Dd_number_t *y3) noexcept nogil +cpdef double bdtr(double x0, dlp_number_t x1, double x2) noexcept nogil +cpdef double bdtrc(double x0, dlp_number_t x1, double x2) noexcept nogil +cpdef double bdtri(double x0, dlp_number_t x1, double x2) noexcept nogil +cpdef double bdtrik(double x0, double x1, double x2) noexcept nogil +cpdef double bdtrin(double x0, double x1, double x2) noexcept nogil +cpdef double bei(double x0) noexcept nogil +cpdef double beip(double x0) noexcept nogil +cpdef double ber(double x0) noexcept nogil +cpdef double berp(double x0) noexcept nogil +cpdef double besselpoly(double x0, double x1, double x2) noexcept nogil +cpdef double beta(double x0, double x1) noexcept nogil +cpdef df_number_t betainc(df_number_t x0, df_number_t x1, df_number_t x2) noexcept nogil +cpdef df_number_t betaincc(df_number_t x0, df_number_t x1, df_number_t x2) noexcept nogil +cpdef df_number_t betaincinv(df_number_t x0, df_number_t x1, df_number_t x2) noexcept nogil +cpdef df_number_t betainccinv(df_number_t x0, df_number_t x1, df_number_t x2) noexcept nogil +cpdef double betaln(double x0, double x1) noexcept nogil +cpdef double binom(double x0, double x1) noexcept nogil +cpdef double boxcox(double x0, double x1) noexcept nogil +cpdef double boxcox1p(double x0, double x1) noexcept nogil +cpdef double btdtr(double x0, double x1, double x2) noexcept nogil +cpdef double btdtri(double x0, double x1, double x2) noexcept nogil +cpdef double btdtria(double x0, double x1, double x2) noexcept nogil +cpdef double btdtrib(double x0, double x1, double x2) noexcept nogil +cpdef double cbrt(double x0) noexcept nogil +cpdef double chdtr(double x0, double x1) noexcept nogil +cpdef double chdtrc(double x0, double x1) noexcept nogil +cpdef double chdtri(double x0, double x1) noexcept nogil +cpdef double chdtriv(double x0, double x1) noexcept nogil +cpdef double chndtr(double x0, double x1, double x2) noexcept nogil +cpdef double chndtridf(double x0, double x1, double x2) noexcept nogil +cpdef double chndtrinc(double x0, double x1, double x2) noexcept nogil +cpdef double chndtrix(double x0, double x1, double x2) noexcept nogil +cpdef double cosdg(double x0) noexcept nogil +cpdef double cosm1(double x0) noexcept nogil +cpdef double cotdg(double x0) noexcept nogil +cpdef Dd_number_t dawsn(Dd_number_t x0) noexcept nogil +cpdef double ellipe(double x0) noexcept nogil +cpdef double ellipeinc(double x0, double x1) noexcept nogil +cdef void ellipj(double x0, double x1, double *y0, double *y1, double *y2, double *y3) noexcept nogil +cpdef double ellipkinc(double x0, double x1) noexcept nogil +cpdef double ellipkm1(double x0) noexcept nogil +cpdef double ellipk(double x0) noexcept nogil +cpdef Dd_number_t elliprc(Dd_number_t x0, Dd_number_t x1) noexcept nogil +cpdef Dd_number_t elliprd(Dd_number_t x0, Dd_number_t x1, Dd_number_t x2) noexcept nogil +cpdef Dd_number_t elliprf(Dd_number_t x0, Dd_number_t x1, Dd_number_t x2) noexcept nogil +cpdef Dd_number_t elliprg(Dd_number_t x0, Dd_number_t x1, Dd_number_t x2) noexcept nogil +cpdef Dd_number_t elliprj(Dd_number_t x0, Dd_number_t x1, Dd_number_t x2, Dd_number_t x3) noexcept nogil +cpdef double entr(double x0) noexcept nogil +cpdef Dd_number_t erf(Dd_number_t x0) noexcept nogil +cpdef Dd_number_t erfc(Dd_number_t x0) noexcept nogil +cpdef Dd_number_t erfcx(Dd_number_t x0) noexcept nogil +cpdef Dd_number_t erfi(Dd_number_t x0) noexcept nogil +cpdef df_number_t erfinv(df_number_t x0) noexcept nogil +cpdef double erfcinv(double x0) noexcept nogil +cpdef Dd_number_t eval_chebyc(dlp_number_t x0, Dd_number_t x1) noexcept nogil +cpdef Dd_number_t eval_chebys(dlp_number_t x0, Dd_number_t x1) noexcept nogil +cpdef Dd_number_t eval_chebyt(dlp_number_t x0, Dd_number_t x1) noexcept nogil +cpdef Dd_number_t eval_chebyu(dlp_number_t x0, Dd_number_t x1) noexcept nogil +cpdef Dd_number_t eval_gegenbauer(dlp_number_t x0, double x1, Dd_number_t x2) noexcept nogil +cpdef Dd_number_t eval_genlaguerre(dlp_number_t x0, double x1, Dd_number_t x2) noexcept nogil +cpdef double eval_hermite(Py_ssize_t x0, double x1) noexcept nogil +cpdef double eval_hermitenorm(Py_ssize_t x0, double x1) noexcept nogil +cpdef Dd_number_t eval_jacobi(dlp_number_t x0, double x1, double x2, Dd_number_t x3) noexcept nogil +cpdef Dd_number_t eval_laguerre(dlp_number_t x0, Dd_number_t x1) noexcept nogil +cpdef Dd_number_t eval_legendre(dlp_number_t x0, Dd_number_t x1) noexcept nogil +cpdef Dd_number_t eval_sh_chebyt(dlp_number_t x0, Dd_number_t x1) noexcept nogil +cpdef Dd_number_t eval_sh_chebyu(dlp_number_t x0, Dd_number_t x1) noexcept nogil +cpdef Dd_number_t eval_sh_jacobi(dlp_number_t x0, double x1, double x2, Dd_number_t x3) noexcept nogil +cpdef Dd_number_t eval_sh_legendre(dlp_number_t x0, Dd_number_t x1) noexcept nogil +cpdef Dd_number_t exp1(Dd_number_t x0) noexcept nogil +cpdef double exp10(double x0) noexcept nogil +cpdef double exp2(double x0) noexcept nogil +cpdef Dd_number_t expi(Dd_number_t x0) noexcept nogil +cpdef dfg_number_t expit(dfg_number_t x0) noexcept nogil +cpdef Dd_number_t expm1(Dd_number_t x0) noexcept nogil +cpdef double expn(dlp_number_t x0, double x1) noexcept nogil +cpdef double exprel(double x0) noexcept nogil +cpdef double fdtr(double x0, double x1, double x2) noexcept nogil +cpdef double fdtrc(double x0, double x1, double x2) noexcept nogil +cpdef double fdtri(double x0, double x1, double x2) noexcept nogil +cpdef double fdtridfd(double x0, double x1, double x2) noexcept nogil +cdef void fresnel(Dd_number_t x0, Dd_number_t *y0, Dd_number_t *y1) noexcept nogil +cpdef Dd_number_t gamma(Dd_number_t x0) noexcept nogil +cpdef double gammainc(double x0, double x1) noexcept nogil +cpdef double gammaincc(double x0, double x1) noexcept nogil +cpdef double gammainccinv(double x0, double x1) noexcept nogil +cpdef double gammaincinv(double x0, double x1) noexcept nogil +cpdef double gammaln(double x0) noexcept nogil +cpdef double gammasgn(double x0) noexcept nogil +cpdef double gdtr(double x0, double x1, double x2) noexcept nogil +cpdef double gdtrc(double x0, double x1, double x2) noexcept nogil +cpdef double gdtria(double x0, double x1, double x2) noexcept nogil +cpdef double gdtrib(double x0, double x1, double x2) noexcept nogil +cpdef double gdtrix(double x0, double x1, double x2) noexcept nogil +cpdef double complex hankel1(double x0, double complex x1) noexcept nogil +cpdef double complex hankel1e(double x0, double complex x1) noexcept nogil +cpdef double complex hankel2(double x0, double complex x1) noexcept nogil +cpdef double complex hankel2e(double x0, double complex x1) noexcept nogil +cpdef double huber(double x0, double x1) noexcept nogil +cpdef Dd_number_t hyp0f1(double x0, Dd_number_t x1) noexcept nogil +cpdef Dd_number_t hyp1f1(double x0, double x1, Dd_number_t x2) noexcept nogil +cpdef Dd_number_t hyp2f1(double x0, double x1, double x2, Dd_number_t x3) noexcept nogil +cpdef double hyperu(double x0, double x1, double x2) noexcept nogil +cpdef double i0(double x0) noexcept nogil +cpdef double i0e(double x0) noexcept nogil +cpdef double i1(double x0) noexcept nogil +cpdef double i1e(double x0) noexcept nogil +cpdef double inv_boxcox(double x0, double x1) noexcept nogil +cpdef double inv_boxcox1p(double x0, double x1) noexcept nogil +cdef void it2i0k0(double x0, double *y0, double *y1) noexcept nogil +cdef void it2j0y0(double x0, double *y0, double *y1) noexcept nogil +cpdef double it2struve0(double x0) noexcept nogil +cdef void itairy(double x0, double *y0, double *y1, double *y2, double *y3) noexcept nogil +cdef void iti0k0(double x0, double *y0, double *y1) noexcept nogil +cdef void itj0y0(double x0, double *y0, double *y1) noexcept nogil +cpdef double itmodstruve0(double x0) noexcept nogil +cpdef double itstruve0(double x0) noexcept nogil +cpdef Dd_number_t iv(double x0, Dd_number_t x1) noexcept nogil +cpdef Dd_number_t ive(double x0, Dd_number_t x1) noexcept nogil +cpdef double j0(double x0) noexcept nogil +cpdef double j1(double x0) noexcept nogil +cpdef Dd_number_t jv(double x0, Dd_number_t x1) noexcept nogil +cpdef Dd_number_t jve(double x0, Dd_number_t x1) noexcept nogil +cpdef double k0(double x0) noexcept nogil +cpdef double k0e(double x0) noexcept nogil +cpdef double k1(double x0) noexcept nogil +cpdef double k1e(double x0) noexcept nogil +cpdef double kei(double x0) noexcept nogil +cpdef double keip(double x0) noexcept nogil +cdef void kelvin(double x0, double complex *y0, double complex *y1, double complex *y2, double complex *y3) noexcept nogil +cpdef double ker(double x0) noexcept nogil +cpdef double kerp(double x0) noexcept nogil +cpdef double kl_div(double x0, double x1) noexcept nogil +cpdef double kn(dlp_number_t x0, double x1) noexcept nogil +cpdef double kolmogi(double x0) noexcept nogil +cpdef double kolmogorov(double x0) noexcept nogil +cpdef Dd_number_t kv(double x0, Dd_number_t x1) noexcept nogil +cpdef Dd_number_t kve(double x0, Dd_number_t x1) noexcept nogil +cpdef Dd_number_t log1p(Dd_number_t x0) noexcept nogil +cpdef dfg_number_t log_expit(dfg_number_t x0) noexcept nogil +cpdef Dd_number_t log_ndtr(Dd_number_t x0) noexcept nogil +cpdef Dd_number_t loggamma(Dd_number_t x0) noexcept nogil +cpdef dfg_number_t logit(dfg_number_t x0) noexcept nogil +cpdef double lpmv(double x0, double x1, double x2) noexcept nogil +cpdef double mathieu_a(double x0, double x1) noexcept nogil +cpdef double mathieu_b(double x0, double x1) noexcept nogil +cdef void mathieu_cem(double x0, double x1, double x2, double *y0, double *y1) noexcept nogil +cdef void mathieu_modcem1(double x0, double x1, double x2, double *y0, double *y1) noexcept nogil +cdef void mathieu_modcem2(double x0, double x1, double x2, double *y0, double *y1) noexcept nogil +cdef void mathieu_modsem1(double x0, double x1, double x2, double *y0, double *y1) noexcept nogil +cdef void mathieu_modsem2(double x0, double x1, double x2, double *y0, double *y1) noexcept nogil +cdef void mathieu_sem(double x0, double x1, double x2, double *y0, double *y1) noexcept nogil +cdef void modfresnelm(double x0, double complex *y0, double complex *y1) noexcept nogil +cdef void modfresnelp(double x0, double complex *y0, double complex *y1) noexcept nogil +cpdef double modstruve(double x0, double x1) noexcept nogil +cpdef double nbdtr(dlp_number_t x0, dlp_number_t x1, double x2) noexcept nogil +cpdef double nbdtrc(dlp_number_t x0, dlp_number_t x1, double x2) noexcept nogil +cpdef double nbdtri(dlp_number_t x0, dlp_number_t x1, double x2) noexcept nogil +cpdef double nbdtrik(double x0, double x1, double x2) noexcept nogil +cpdef double nbdtrin(double x0, double x1, double x2) noexcept nogil +cpdef double ncfdtr(double x0, double x1, double x2, double x3) noexcept nogil +cpdef double ncfdtri(double x0, double x1, double x2, double x3) noexcept nogil +cpdef double ncfdtridfd(double x0, double x1, double x2, double x3) noexcept nogil +cpdef double ncfdtridfn(double x0, double x1, double x2, double x3) noexcept nogil +cpdef double ncfdtrinc(double x0, double x1, double x2, double x3) noexcept nogil +cpdef double nctdtr(double x0, double x1, double x2) noexcept nogil +cpdef double nctdtridf(double x0, double x1, double x2) noexcept nogil +cpdef double nctdtrinc(double x0, double x1, double x2) noexcept nogil +cpdef double nctdtrit(double x0, double x1, double x2) noexcept nogil +cpdef Dd_number_t ndtr(Dd_number_t x0) noexcept nogil +cpdef double ndtri(double x0) noexcept nogil +cpdef double nrdtrimn(double x0, double x1, double x2) noexcept nogil +cpdef double nrdtrisd(double x0, double x1, double x2) noexcept nogil +cdef void obl_ang1(double x0, double x1, double x2, double x3, double *y0, double *y1) noexcept nogil +cdef void obl_ang1_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) noexcept nogil +cpdef double obl_cv(double x0, double x1, double x2) noexcept nogil +cdef void obl_rad1(double x0, double x1, double x2, double x3, double *y0, double *y1) noexcept nogil +cdef void obl_rad1_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) noexcept nogil +cdef void obl_rad2(double x0, double x1, double x2, double x3, double *y0, double *y1) noexcept nogil +cdef void obl_rad2_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) noexcept nogil +cpdef double owens_t(double x0, double x1) noexcept nogil +cdef void pbdv(double x0, double x1, double *y0, double *y1) noexcept nogil +cdef void pbvv(double x0, double x1, double *y0, double *y1) noexcept nogil +cdef void pbwa(double x0, double x1, double *y0, double *y1) noexcept nogil +cpdef double pdtr(double x0, double x1) noexcept nogil +cpdef double pdtrc(double x0, double x1) noexcept nogil +cpdef double pdtri(dlp_number_t x0, double x1) noexcept nogil +cpdef double pdtrik(double x0, double x1) noexcept nogil +cpdef double poch(double x0, double x1) noexcept nogil +cpdef df_number_t powm1(df_number_t x0, df_number_t x1) noexcept nogil +cdef void pro_ang1(double x0, double x1, double x2, double x3, double *y0, double *y1) noexcept nogil +cdef void pro_ang1_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) noexcept nogil +cpdef double pro_cv(double x0, double x1, double x2) noexcept nogil +cdef void pro_rad1(double x0, double x1, double x2, double x3, double *y0, double *y1) noexcept nogil +cdef void pro_rad1_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) noexcept nogil +cdef void pro_rad2(double x0, double x1, double x2, double x3, double *y0, double *y1) noexcept nogil +cdef void pro_rad2_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) noexcept nogil +cpdef double pseudo_huber(double x0, double x1) noexcept nogil +cpdef Dd_number_t psi(Dd_number_t x0) noexcept nogil +cpdef double radian(double x0, double x1, double x2) noexcept nogil +cpdef double rel_entr(double x0, double x1) noexcept nogil +cpdef Dd_number_t rgamma(Dd_number_t x0) noexcept nogil +cpdef double round(double x0) noexcept nogil +cdef void shichi(Dd_number_t x0, Dd_number_t *y0, Dd_number_t *y1) noexcept nogil +cdef void sici(Dd_number_t x0, Dd_number_t *y0, Dd_number_t *y1) noexcept nogil +cpdef double sindg(double x0) noexcept nogil +cpdef double smirnov(dlp_number_t x0, double x1) noexcept nogil +cpdef double smirnovi(dlp_number_t x0, double x1) noexcept nogil +cpdef Dd_number_t spence(Dd_number_t x0) noexcept nogil +cpdef double complex sph_harm(dlp_number_t x0, dlp_number_t x1, double x2, double x3) noexcept nogil +cpdef double stdtr(double x0, double x1) noexcept nogil +cpdef double stdtridf(double x0, double x1) noexcept nogil +cpdef double stdtrit(double x0, double x1) noexcept nogil +cpdef double struve(double x0, double x1) noexcept nogil +cpdef double tandg(double x0) noexcept nogil +cpdef double tklmbda(double x0, double x1) noexcept nogil +cpdef double complex wofz(double complex x0) noexcept nogil +cpdef Dd_number_t wrightomega(Dd_number_t x0) noexcept nogil +cpdef Dd_number_t xlog1py(Dd_number_t x0, Dd_number_t x1) noexcept nogil +cpdef Dd_number_t xlogy(Dd_number_t x0, Dd_number_t x1) noexcept nogil +cpdef double y0(double x0) noexcept nogil +cpdef double y1(double x0) noexcept nogil +cpdef double yn(dlp_number_t x0, double x1) noexcept nogil +cpdef Dd_number_t yv(double x0, Dd_number_t x1) noexcept nogil +cpdef Dd_number_t yve(double x0, Dd_number_t x1) noexcept nogil +cpdef double zetac(double x0) noexcept nogil +cpdef double wright_bessel(double x0, double x1, double x2) noexcept nogil +cpdef double log_wright_bessel(double x0, double x1, double x2) noexcept nogil +cpdef double ndtri_exp(double x0) noexcept nogil \ No newline at end of file diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/cython_special.pyi b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/cython_special.pyi new file mode 100644 index 0000000000000000000000000000000000000000..024e962b10df8892631eaad20223f7fc8378ea83 --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/cython_special.pyi @@ -0,0 +1,3 @@ +from typing import Any + +def __getattr__(name) -> Any: ... diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/libsf_error_state.so b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/libsf_error_state.so new file mode 100644 index 0000000000000000000000000000000000000000..4056cb019ba6f23334be96ba6851e50c06e89ef6 Binary files /dev/null and b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/libsf_error_state.so differ diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/orthogonal.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/orthogonal.py new file mode 100644 index 0000000000000000000000000000000000000000..0b13a08a96cb683d72a4a00d6962446e1779c88a --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/orthogonal.py @@ -0,0 +1,45 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.special` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +_polyfuns = ['legendre', 'chebyt', 'chebyu', 'chebyc', 'chebys', + 'jacobi', 'laguerre', 'genlaguerre', 'hermite', + 'hermitenorm', 'gegenbauer', 'sh_legendre', 'sh_chebyt', + 'sh_chebyu', 'sh_jacobi'] + +# Correspondence between new and old names of root functions +_rootfuns_map = {'roots_legendre': 'p_roots', + 'roots_chebyt': 't_roots', + 'roots_chebyu': 'u_roots', + 'roots_chebyc': 'c_roots', + 'roots_chebys': 's_roots', + 'roots_jacobi': 'j_roots', + 'roots_laguerre': 'l_roots', + 'roots_genlaguerre': 'la_roots', + 'roots_hermite': 'h_roots', + 'roots_hermitenorm': 'he_roots', + 'roots_gegenbauer': 'cg_roots', + 'roots_sh_legendre': 'ps_roots', + 'roots_sh_chebyt': 'ts_roots', + 'roots_sh_chebyu': 'us_roots', + 'roots_sh_jacobi': 'js_roots'} + + +__all__ = _polyfuns + list(_rootfuns_map.keys()) + [ # noqa: F822 + 'airy', 'p_roots', 't_roots', 'u_roots', 'c_roots', 's_roots', + 'j_roots', 'l_roots', 'la_roots', 'h_roots', 'he_roots', 'cg_roots', + 'ps_roots', 'ts_roots', 'us_roots', 'js_roots' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="special", module="orthogonal", + private_modules=["_orthogonal"], all=__all__, + attribute=name) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/specfun.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/specfun.py new file mode 100644 index 0000000000000000000000000000000000000000..9fca00415a6406b8cdf41a42b6fbf991cea1f53f --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/specfun.py @@ -0,0 +1,24 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.special` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +# ruff: noqa: F822 +__all__ = [ + 'clpmn', + 'lpmn', + 'lpn', + 'lqmn', + 'pbdv' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="special", module="specfun", + private_modules=["_basic", "_specfun"], all=__all__, + attribute=name) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/spfun_stats.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/spfun_stats.py new file mode 100644 index 0000000000000000000000000000000000000000..a1e58487aaa547483c9f2531ac4efc2ad5e4795c --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/spfun_stats.py @@ -0,0 +1,17 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.special` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = ['multigammaln'] # noqa: F822 + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="special", module="spfun_stats", + private_modules=["_spfun_stats"], all=__all__, + attribute=name) diff --git a/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/tests/test_pcf.py b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/tests/test_pcf.py new file mode 100644 index 0000000000000000000000000000000000000000..a8c42aa688081fb58f79ad2c8ea932d03b33523b --- /dev/null +++ b/emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/special/tests/test_pcf.py @@ -0,0 +1,24 @@ +"""Tests for parabolic cylinder functions. + +""" +import numpy as np +from numpy.testing import assert_allclose, assert_equal +import scipy.special as sc + + +def test_pbwa_segfault(): + # Regression test for https://github.com/scipy/scipy/issues/6208. + # + # Data generated by mpmath. + # + w = 1.02276567211316867161 + wp = -0.48887053372346189882 + assert_allclose(sc.pbwa(0, 0), (w, wp), rtol=1e-13, atol=0) + + +def test_pbwa_nan(): + # Check that NaN's are returned outside of the range in which the + # implementation is accurate. + pts = [(-6, -6), (-6, 6), (6, -6), (6, 6)] + for p in pts: + assert_equal(sc.pbwa(*p), (np.nan, np.nan))