Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_gcutils.cpython-310.pyc +0 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/doccer.cpython-310.pyc +0 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/uarray.cpython-310.pyc +0 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/_uarray/LICENSE +29 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/_uarray/__init__.py +116 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/_uarray/__pycache__/__init__.cpython-310.pyc +0 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/_uarray/__pycache__/_backend.cpython-310.pyc +0 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/_uarray/_backend.py +704 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/__init__.py +22 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/__pycache__/__init__.cpython-310.pyc +0 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/__pycache__/_internal.cpython-310.pyc +0 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/_internal.py +46 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__init__.py +1 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/__init__.cpython-310.pyc +0 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_aliases.cpython-310.pyc +0 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_fft.cpython-310.pyc +0 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_helpers.cpython-310.pyc +0 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_linalg.cpython-310.pyc +0 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_typing.cpython-310.pyc +0 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_aliases.py +554 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_fft.py +183 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_helpers.py +515 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_linalg.py +161 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_typing.py +23 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__init__.py +16 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/__init__.cpython-310.pyc +0 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/_aliases.cpython-310.pyc +0 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/_typing.cpython-310.pyc +0 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/fft.cpython-310.pyc +0 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/linalg.cpython-310.pyc +0 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/_aliases.py +81 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/_typing.py +46 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/fft.py +36 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/linalg.py +49 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/dask/__init__.py +0 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/dask/__pycache__/__init__.cpython-310.pyc +0 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/dask/array/__init__.py +8 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/dask/array/__pycache__/__init__.cpython-310.pyc +0 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/dask/array/__pycache__/_aliases.cpython-310.pyc +0 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/dask/array/__pycache__/linalg.cpython-310.pyc +0 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/dask/array/_aliases.py +146 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/dask/array/linalg.py +72 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__init__.py +24 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/__init__.cpython-310.pyc +0 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/_aliases.cpython-310.pyc +0 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/_typing.cpython-310.pyc +0 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/fft.cpython-310.pyc +0 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/linalg.cpython-310.pyc +0 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/_aliases.py +81 -0
- emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/_typing.py +46 -0
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_gcutils.cpython-310.pyc
ADDED
Binary file (3.08 kB). View file
|
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/doccer.cpython-310.pyc
ADDED
Binary file (7.82 kB). View file
|
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/uarray.cpython-310.pyc
ADDED
Binary file (825 Bytes). View file
|
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/_uarray/LICENSE
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
BSD 3-Clause License
|
2 |
+
|
3 |
+
Copyright (c) 2018, Quansight-Labs
|
4 |
+
All rights reserved.
|
5 |
+
|
6 |
+
Redistribution and use in source and binary forms, with or without
|
7 |
+
modification, are permitted provided that the following conditions are met:
|
8 |
+
|
9 |
+
* Redistributions of source code must retain the above copyright notice, this
|
10 |
+
list of conditions and the following disclaimer.
|
11 |
+
|
12 |
+
* Redistributions in binary form must reproduce the above copyright notice,
|
13 |
+
this list of conditions and the following disclaimer in the documentation
|
14 |
+
and/or other materials provided with the distribution.
|
15 |
+
|
16 |
+
* Neither the name of the copyright holder nor the names of its
|
17 |
+
contributors may be used to endorse or promote products derived from
|
18 |
+
this software without specific prior written permission.
|
19 |
+
|
20 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
21 |
+
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
22 |
+
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
23 |
+
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
24 |
+
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
25 |
+
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
26 |
+
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
27 |
+
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
28 |
+
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/_uarray/__init__.py
ADDED
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
.. note:
|
3 |
+
If you are looking for overrides for NumPy-specific methods, see the
|
4 |
+
documentation for :obj:`unumpy`. This page explains how to write
|
5 |
+
back-ends and multimethods.
|
6 |
+
|
7 |
+
``uarray`` is built around a back-end protocol, and overridable multimethods.
|
8 |
+
It is necessary to define multimethods for back-ends to be able to override them.
|
9 |
+
See the documentation of :obj:`generate_multimethod` on how to write multimethods.
|
10 |
+
|
11 |
+
|
12 |
+
|
13 |
+
Let's start with the simplest:
|
14 |
+
|
15 |
+
``__ua_domain__`` defines the back-end *domain*. The domain consists of period-
|
16 |
+
separated string consisting of the modules you extend plus the submodule. For
|
17 |
+
example, if a submodule ``module2.submodule`` extends ``module1``
|
18 |
+
(i.e., it exposes dispatchables marked as types available in ``module1``),
|
19 |
+
then the domain string should be ``"module1.module2.submodule"``.
|
20 |
+
|
21 |
+
|
22 |
+
For the purpose of this demonstration, we'll be creating an object and setting
|
23 |
+
its attributes directly. However, note that you can use a module or your own type
|
24 |
+
as a backend as well.
|
25 |
+
|
26 |
+
>>> class Backend: pass
|
27 |
+
>>> be = Backend()
|
28 |
+
>>> be.__ua_domain__ = "ua_examples"
|
29 |
+
|
30 |
+
It might be useful at this point to sidetrack to the documentation of
|
31 |
+
:obj:`generate_multimethod` to find out how to generate a multimethod
|
32 |
+
overridable by :obj:`uarray`. Needless to say, writing a backend and
|
33 |
+
creating multimethods are mostly orthogonal activities, and knowing
|
34 |
+
one doesn't necessarily require knowledge of the other, although it
|
35 |
+
is certainly helpful. We expect core API designers/specifiers to write the
|
36 |
+
multimethods, and implementors to override them. But, as is often the case,
|
37 |
+
similar people write both.
|
38 |
+
|
39 |
+
Without further ado, here's an example multimethod:
|
40 |
+
|
41 |
+
>>> import uarray as ua
|
42 |
+
>>> from uarray import Dispatchable
|
43 |
+
>>> def override_me(a, b):
|
44 |
+
... return Dispatchable(a, int),
|
45 |
+
>>> def override_replacer(args, kwargs, dispatchables):
|
46 |
+
... return (dispatchables[0], args[1]), {}
|
47 |
+
>>> overridden_me = ua.generate_multimethod(
|
48 |
+
... override_me, override_replacer, "ua_examples"
|
49 |
+
... )
|
50 |
+
|
51 |
+
Next comes the part about overriding the multimethod. This requires
|
52 |
+
the ``__ua_function__`` protocol, and the ``__ua_convert__``
|
53 |
+
protocol. The ``__ua_function__`` protocol has the signature
|
54 |
+
``(method, args, kwargs)`` where ``method`` is the passed
|
55 |
+
multimethod, ``args``/``kwargs`` specify the arguments and ``dispatchables``
|
56 |
+
is the list of converted dispatchables passed in.
|
57 |
+
|
58 |
+
>>> def __ua_function__(method, args, kwargs):
|
59 |
+
... return method.__name__, args, kwargs
|
60 |
+
>>> be.__ua_function__ = __ua_function__
|
61 |
+
|
62 |
+
The other protocol of interest is the ``__ua_convert__`` protocol. It has the
|
63 |
+
signature ``(dispatchables, coerce)``. When ``coerce`` is ``False``, conversion
|
64 |
+
between the formats should ideally be an ``O(1)`` operation, but it means that
|
65 |
+
no memory copying should be involved, only views of the existing data.
|
66 |
+
|
67 |
+
>>> def __ua_convert__(dispatchables, coerce):
|
68 |
+
... for d in dispatchables:
|
69 |
+
... if d.type is int:
|
70 |
+
... if coerce and d.coercible:
|
71 |
+
... yield str(d.value)
|
72 |
+
... else:
|
73 |
+
... yield d.value
|
74 |
+
>>> be.__ua_convert__ = __ua_convert__
|
75 |
+
|
76 |
+
Now that we have defined the backend, the next thing to do is to call the multimethod.
|
77 |
+
|
78 |
+
>>> with ua.set_backend(be):
|
79 |
+
... overridden_me(1, "2")
|
80 |
+
('override_me', (1, '2'), {})
|
81 |
+
|
82 |
+
Note that the marked type has no effect on the actual type of the passed object.
|
83 |
+
We can also coerce the type of the input.
|
84 |
+
|
85 |
+
>>> with ua.set_backend(be, coerce=True):
|
86 |
+
... overridden_me(1, "2")
|
87 |
+
... overridden_me(1.0, "2")
|
88 |
+
('override_me', ('1', '2'), {})
|
89 |
+
('override_me', ('1.0', '2'), {})
|
90 |
+
|
91 |
+
Another feature is that if you remove ``__ua_convert__``, the arguments are not
|
92 |
+
converted at all and it's up to the backend to handle that.
|
93 |
+
|
94 |
+
>>> del be.__ua_convert__
|
95 |
+
>>> with ua.set_backend(be):
|
96 |
+
... overridden_me(1, "2")
|
97 |
+
('override_me', (1, '2'), {})
|
98 |
+
|
99 |
+
You also have the option to return ``NotImplemented``, in which case processing moves on
|
100 |
+
to the next back-end, which in this case, doesn't exist. The same applies to
|
101 |
+
``__ua_convert__``.
|
102 |
+
|
103 |
+
>>> be.__ua_function__ = lambda *a, **kw: NotImplemented
|
104 |
+
>>> with ua.set_backend(be):
|
105 |
+
... overridden_me(1, "2")
|
106 |
+
Traceback (most recent call last):
|
107 |
+
...
|
108 |
+
uarray.BackendNotImplementedError: ...
|
109 |
+
|
110 |
+
The last possibility is if we don't have ``__ua_convert__``, in which case the job is
|
111 |
+
left up to ``__ua_function__``, but putting things back into arrays after conversion
|
112 |
+
will not be possible.
|
113 |
+
"""
|
114 |
+
|
115 |
+
from ._backend import *
|
116 |
+
__version__ = '0.8.8.dev0+aa94c5a4.scipy'
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/_uarray/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (4.73 kB). View file
|
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/_uarray/__pycache__/_backend.cpython-310.pyc
ADDED
Binary file (20.4 kB). View file
|
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/_uarray/_backend.py
ADDED
@@ -0,0 +1,704 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import typing
|
2 |
+
import types
|
3 |
+
import inspect
|
4 |
+
import functools
|
5 |
+
from . import _uarray
|
6 |
+
import copyreg
|
7 |
+
import pickle
|
8 |
+
import contextlib
|
9 |
+
|
10 |
+
from ._uarray import ( # type: ignore
|
11 |
+
BackendNotImplementedError,
|
12 |
+
_Function,
|
13 |
+
_SkipBackendContext,
|
14 |
+
_SetBackendContext,
|
15 |
+
_BackendState,
|
16 |
+
)
|
17 |
+
|
18 |
+
__all__ = [
|
19 |
+
"set_backend",
|
20 |
+
"set_global_backend",
|
21 |
+
"skip_backend",
|
22 |
+
"register_backend",
|
23 |
+
"determine_backend",
|
24 |
+
"determine_backend_multi",
|
25 |
+
"clear_backends",
|
26 |
+
"create_multimethod",
|
27 |
+
"generate_multimethod",
|
28 |
+
"_Function",
|
29 |
+
"BackendNotImplementedError",
|
30 |
+
"Dispatchable",
|
31 |
+
"wrap_single_convertor",
|
32 |
+
"wrap_single_convertor_instance",
|
33 |
+
"all_of_type",
|
34 |
+
"mark_as",
|
35 |
+
"set_state",
|
36 |
+
"get_state",
|
37 |
+
"reset_state",
|
38 |
+
"_BackendState",
|
39 |
+
"_SkipBackendContext",
|
40 |
+
"_SetBackendContext",
|
41 |
+
]
|
42 |
+
|
43 |
+
ArgumentExtractorType = typing.Callable[..., tuple["Dispatchable", ...]]
|
44 |
+
ArgumentReplacerType = typing.Callable[
|
45 |
+
[tuple, dict, tuple], tuple[tuple, dict]
|
46 |
+
]
|
47 |
+
|
48 |
+
def unpickle_function(mod_name, qname, self_):
|
49 |
+
import importlib
|
50 |
+
|
51 |
+
try:
|
52 |
+
module = importlib.import_module(mod_name)
|
53 |
+
qname = qname.split(".")
|
54 |
+
func = module
|
55 |
+
for q in qname:
|
56 |
+
func = getattr(func, q)
|
57 |
+
|
58 |
+
if self_ is not None:
|
59 |
+
func = types.MethodType(func, self_)
|
60 |
+
|
61 |
+
return func
|
62 |
+
except (ImportError, AttributeError) as e:
|
63 |
+
from pickle import UnpicklingError
|
64 |
+
|
65 |
+
raise UnpicklingError from e
|
66 |
+
|
67 |
+
|
68 |
+
def pickle_function(func):
|
69 |
+
mod_name = getattr(func, "__module__", None)
|
70 |
+
qname = getattr(func, "__qualname__", None)
|
71 |
+
self_ = getattr(func, "__self__", None)
|
72 |
+
|
73 |
+
try:
|
74 |
+
test = unpickle_function(mod_name, qname, self_)
|
75 |
+
except pickle.UnpicklingError:
|
76 |
+
test = None
|
77 |
+
|
78 |
+
if test is not func:
|
79 |
+
raise pickle.PicklingError(
|
80 |
+
f"Can't pickle {func}: it's not the same object as {test}"
|
81 |
+
)
|
82 |
+
|
83 |
+
return unpickle_function, (mod_name, qname, self_)
|
84 |
+
|
85 |
+
|
86 |
+
def pickle_state(state):
|
87 |
+
return _uarray._BackendState._unpickle, state._pickle()
|
88 |
+
|
89 |
+
|
90 |
+
def pickle_set_backend_context(ctx):
|
91 |
+
return _SetBackendContext, ctx._pickle()
|
92 |
+
|
93 |
+
|
94 |
+
def pickle_skip_backend_context(ctx):
|
95 |
+
return _SkipBackendContext, ctx._pickle()
|
96 |
+
|
97 |
+
|
98 |
+
copyreg.pickle(_Function, pickle_function)
|
99 |
+
copyreg.pickle(_uarray._BackendState, pickle_state)
|
100 |
+
copyreg.pickle(_SetBackendContext, pickle_set_backend_context)
|
101 |
+
copyreg.pickle(_SkipBackendContext, pickle_skip_backend_context)
|
102 |
+
|
103 |
+
|
104 |
+
def get_state():
|
105 |
+
"""
|
106 |
+
Returns an opaque object containing the current state of all the backends.
|
107 |
+
|
108 |
+
Can be used for synchronization between threads/processes.
|
109 |
+
|
110 |
+
See Also
|
111 |
+
--------
|
112 |
+
set_state
|
113 |
+
Sets the state returned by this function.
|
114 |
+
"""
|
115 |
+
return _uarray.get_state()
|
116 |
+
|
117 |
+
|
118 |
+
@contextlib.contextmanager
|
119 |
+
def reset_state():
|
120 |
+
"""
|
121 |
+
Returns a context manager that resets all state once exited.
|
122 |
+
|
123 |
+
See Also
|
124 |
+
--------
|
125 |
+
set_state
|
126 |
+
Context manager that sets the backend state.
|
127 |
+
get_state
|
128 |
+
Gets a state to be set by this context manager.
|
129 |
+
"""
|
130 |
+
with set_state(get_state()):
|
131 |
+
yield
|
132 |
+
|
133 |
+
|
134 |
+
@contextlib.contextmanager
|
135 |
+
def set_state(state):
|
136 |
+
"""
|
137 |
+
A context manager that sets the state of the backends to one returned by :obj:`get_state`.
|
138 |
+
|
139 |
+
See Also
|
140 |
+
--------
|
141 |
+
get_state
|
142 |
+
Gets a state to be set by this context manager.
|
143 |
+
""" # noqa: E501
|
144 |
+
old_state = get_state()
|
145 |
+
_uarray.set_state(state)
|
146 |
+
try:
|
147 |
+
yield
|
148 |
+
finally:
|
149 |
+
_uarray.set_state(old_state, True)
|
150 |
+
|
151 |
+
|
152 |
+
def create_multimethod(*args, **kwargs):
|
153 |
+
"""
|
154 |
+
Creates a decorator for generating multimethods.
|
155 |
+
|
156 |
+
This function creates a decorator that can be used with an argument
|
157 |
+
extractor in order to generate a multimethod. Other than for the
|
158 |
+
argument extractor, all arguments are passed on to
|
159 |
+
:obj:`generate_multimethod`.
|
160 |
+
|
161 |
+
See Also
|
162 |
+
--------
|
163 |
+
generate_multimethod
|
164 |
+
Generates a multimethod.
|
165 |
+
"""
|
166 |
+
|
167 |
+
def wrapper(a):
|
168 |
+
return generate_multimethod(a, *args, **kwargs)
|
169 |
+
|
170 |
+
return wrapper
|
171 |
+
|
172 |
+
|
173 |
+
def generate_multimethod(
|
174 |
+
argument_extractor: ArgumentExtractorType,
|
175 |
+
argument_replacer: ArgumentReplacerType,
|
176 |
+
domain: str,
|
177 |
+
default: typing.Optional[typing.Callable] = None,
|
178 |
+
):
|
179 |
+
"""
|
180 |
+
Generates a multimethod.
|
181 |
+
|
182 |
+
Parameters
|
183 |
+
----------
|
184 |
+
argument_extractor : ArgumentExtractorType
|
185 |
+
A callable which extracts the dispatchable arguments. Extracted arguments
|
186 |
+
should be marked by the :obj:`Dispatchable` class. It has the same signature
|
187 |
+
as the desired multimethod.
|
188 |
+
argument_replacer : ArgumentReplacerType
|
189 |
+
A callable with the signature (args, kwargs, dispatchables), which should also
|
190 |
+
return an (args, kwargs) pair with the dispatchables replaced inside the
|
191 |
+
args/kwargs.
|
192 |
+
domain : str
|
193 |
+
A string value indicating the domain of this multimethod.
|
194 |
+
default: Optional[Callable], optional
|
195 |
+
The default implementation of this multimethod, where ``None`` (the default)
|
196 |
+
specifies there is no default implementation.
|
197 |
+
|
198 |
+
Examples
|
199 |
+
--------
|
200 |
+
In this example, ``a`` is to be dispatched over, so we return it, while marking it
|
201 |
+
as an ``int``.
|
202 |
+
The trailing comma is needed because the args have to be returned as an iterable.
|
203 |
+
|
204 |
+
>>> def override_me(a, b):
|
205 |
+
... return Dispatchable(a, int),
|
206 |
+
|
207 |
+
Next, we define the argument replacer that replaces the dispatchables inside
|
208 |
+
args/kwargs with the supplied ones.
|
209 |
+
|
210 |
+
>>> def override_replacer(args, kwargs, dispatchables):
|
211 |
+
... return (dispatchables[0], args[1]), {}
|
212 |
+
|
213 |
+
Next, we define the multimethod.
|
214 |
+
|
215 |
+
>>> overridden_me = generate_multimethod(
|
216 |
+
... override_me, override_replacer, "ua_examples"
|
217 |
+
... )
|
218 |
+
|
219 |
+
Notice that there's no default implementation, unless you supply one.
|
220 |
+
|
221 |
+
>>> overridden_me(1, "a")
|
222 |
+
Traceback (most recent call last):
|
223 |
+
...
|
224 |
+
uarray.BackendNotImplementedError: ...
|
225 |
+
|
226 |
+
>>> overridden_me2 = generate_multimethod(
|
227 |
+
... override_me, override_replacer, "ua_examples", default=lambda x, y: (x, y)
|
228 |
+
... )
|
229 |
+
>>> overridden_me2(1, "a")
|
230 |
+
(1, 'a')
|
231 |
+
|
232 |
+
See Also
|
233 |
+
--------
|
234 |
+
uarray
|
235 |
+
See the module documentation for how to override the method by creating
|
236 |
+
backends.
|
237 |
+
"""
|
238 |
+
kw_defaults, arg_defaults, opts = get_defaults(argument_extractor)
|
239 |
+
ua_func = _Function(
|
240 |
+
argument_extractor,
|
241 |
+
argument_replacer,
|
242 |
+
domain,
|
243 |
+
arg_defaults,
|
244 |
+
kw_defaults,
|
245 |
+
default,
|
246 |
+
)
|
247 |
+
|
248 |
+
return functools.update_wrapper(ua_func, argument_extractor)
|
249 |
+
|
250 |
+
|
251 |
+
def set_backend(backend, coerce=False, only=False):
|
252 |
+
"""
|
253 |
+
A context manager that sets the preferred backend.
|
254 |
+
|
255 |
+
Parameters
|
256 |
+
----------
|
257 |
+
backend
|
258 |
+
The backend to set.
|
259 |
+
coerce
|
260 |
+
Whether or not to coerce to a specific backend's types. Implies ``only``.
|
261 |
+
only
|
262 |
+
Whether or not this should be the last backend to try.
|
263 |
+
|
264 |
+
See Also
|
265 |
+
--------
|
266 |
+
skip_backend: A context manager that allows skipping of backends.
|
267 |
+
set_global_backend: Set a single, global backend for a domain.
|
268 |
+
"""
|
269 |
+
try:
|
270 |
+
return backend.__ua_cache__["set", coerce, only]
|
271 |
+
except AttributeError:
|
272 |
+
backend.__ua_cache__ = {}
|
273 |
+
except KeyError:
|
274 |
+
pass
|
275 |
+
|
276 |
+
ctx = _SetBackendContext(backend, coerce, only)
|
277 |
+
backend.__ua_cache__["set", coerce, only] = ctx
|
278 |
+
return ctx
|
279 |
+
|
280 |
+
|
281 |
+
def skip_backend(backend):
|
282 |
+
"""
|
283 |
+
A context manager that allows one to skip a given backend from processing
|
284 |
+
entirely. This allows one to use another backend's code in a library that
|
285 |
+
is also a consumer of the same backend.
|
286 |
+
|
287 |
+
Parameters
|
288 |
+
----------
|
289 |
+
backend
|
290 |
+
The backend to skip.
|
291 |
+
|
292 |
+
See Also
|
293 |
+
--------
|
294 |
+
set_backend: A context manager that allows setting of backends.
|
295 |
+
set_global_backend: Set a single, global backend for a domain.
|
296 |
+
"""
|
297 |
+
try:
|
298 |
+
return backend.__ua_cache__["skip"]
|
299 |
+
except AttributeError:
|
300 |
+
backend.__ua_cache__ = {}
|
301 |
+
except KeyError:
|
302 |
+
pass
|
303 |
+
|
304 |
+
ctx = _SkipBackendContext(backend)
|
305 |
+
backend.__ua_cache__["skip"] = ctx
|
306 |
+
return ctx
|
307 |
+
|
308 |
+
|
309 |
+
def get_defaults(f):
|
310 |
+
sig = inspect.signature(f)
|
311 |
+
kw_defaults = {}
|
312 |
+
arg_defaults = []
|
313 |
+
opts = set()
|
314 |
+
for k, v in sig.parameters.items():
|
315 |
+
if v.default is not inspect.Parameter.empty:
|
316 |
+
kw_defaults[k] = v.default
|
317 |
+
if v.kind in (
|
318 |
+
inspect.Parameter.POSITIONAL_ONLY,
|
319 |
+
inspect.Parameter.POSITIONAL_OR_KEYWORD,
|
320 |
+
):
|
321 |
+
arg_defaults.append(v.default)
|
322 |
+
opts.add(k)
|
323 |
+
|
324 |
+
return kw_defaults, tuple(arg_defaults), opts
|
325 |
+
|
326 |
+
|
327 |
+
def set_global_backend(backend, coerce=False, only=False, *, try_last=False):
|
328 |
+
"""
|
329 |
+
This utility method replaces the default backend for permanent use. It
|
330 |
+
will be tried in the list of backends automatically, unless the
|
331 |
+
``only`` flag is set on a backend. This will be the first tried
|
332 |
+
backend outside the :obj:`set_backend` context manager.
|
333 |
+
|
334 |
+
Note that this method is not thread-safe.
|
335 |
+
|
336 |
+
.. warning::
|
337 |
+
We caution library authors against using this function in
|
338 |
+
their code. We do *not* support this use-case. This function
|
339 |
+
is meant to be used only by users themselves, or by a reference
|
340 |
+
implementation, if one exists.
|
341 |
+
|
342 |
+
Parameters
|
343 |
+
----------
|
344 |
+
backend
|
345 |
+
The backend to register.
|
346 |
+
coerce : bool
|
347 |
+
Whether to coerce input types when trying this backend.
|
348 |
+
only : bool
|
349 |
+
If ``True``, no more backends will be tried if this fails.
|
350 |
+
Implied by ``coerce=True``.
|
351 |
+
try_last : bool
|
352 |
+
If ``True``, the global backend is tried after registered backends.
|
353 |
+
|
354 |
+
See Also
|
355 |
+
--------
|
356 |
+
set_backend: A context manager that allows setting of backends.
|
357 |
+
skip_backend: A context manager that allows skipping of backends.
|
358 |
+
"""
|
359 |
+
_uarray.set_global_backend(backend, coerce, only, try_last)
|
360 |
+
|
361 |
+
|
362 |
+
def register_backend(backend):
|
363 |
+
"""
|
364 |
+
This utility method sets registers backend for permanent use. It
|
365 |
+
will be tried in the list of backends automatically, unless the
|
366 |
+
``only`` flag is set on a backend.
|
367 |
+
|
368 |
+
Note that this method is not thread-safe.
|
369 |
+
|
370 |
+
Parameters
|
371 |
+
----------
|
372 |
+
backend
|
373 |
+
The backend to register.
|
374 |
+
"""
|
375 |
+
_uarray.register_backend(backend)
|
376 |
+
|
377 |
+
|
378 |
+
def clear_backends(domain, registered=True, globals=False):
|
379 |
+
"""
|
380 |
+
This utility method clears registered backends.
|
381 |
+
|
382 |
+
.. warning::
|
383 |
+
We caution library authors against using this function in
|
384 |
+
their code. We do *not* support this use-case. This function
|
385 |
+
is meant to be used only by users themselves.
|
386 |
+
|
387 |
+
.. warning::
|
388 |
+
Do NOT use this method inside a multimethod call, or the
|
389 |
+
program is likely to crash.
|
390 |
+
|
391 |
+
Parameters
|
392 |
+
----------
|
393 |
+
domain : Optional[str]
|
394 |
+
The domain for which to de-register backends. ``None`` means
|
395 |
+
de-register for all domains.
|
396 |
+
registered : bool
|
397 |
+
Whether or not to clear registered backends. See :obj:`register_backend`.
|
398 |
+
globals : bool
|
399 |
+
Whether or not to clear global backends. See :obj:`set_global_backend`.
|
400 |
+
|
401 |
+
See Also
|
402 |
+
--------
|
403 |
+
register_backend : Register a backend globally.
|
404 |
+
set_global_backend : Set a global backend.
|
405 |
+
"""
|
406 |
+
_uarray.clear_backends(domain, registered, globals)
|
407 |
+
|
408 |
+
|
409 |
+
class Dispatchable:
|
410 |
+
"""
|
411 |
+
A utility class which marks an argument with a specific dispatch type.
|
412 |
+
|
413 |
+
|
414 |
+
Attributes
|
415 |
+
----------
|
416 |
+
value
|
417 |
+
The value of the Dispatchable.
|
418 |
+
|
419 |
+
type
|
420 |
+
The type of the Dispatchable.
|
421 |
+
|
422 |
+
Examples
|
423 |
+
--------
|
424 |
+
>>> x = Dispatchable(1, str)
|
425 |
+
>>> x
|
426 |
+
<Dispatchable: type=<class 'str'>, value=1>
|
427 |
+
|
428 |
+
See Also
|
429 |
+
--------
|
430 |
+
all_of_type
|
431 |
+
Marks all unmarked parameters of a function.
|
432 |
+
|
433 |
+
mark_as
|
434 |
+
Allows one to create a utility function to mark as a given type.
|
435 |
+
"""
|
436 |
+
|
437 |
+
def __init__(self, value, dispatch_type, coercible=True):
|
438 |
+
self.value = value
|
439 |
+
self.type = dispatch_type
|
440 |
+
self.coercible = coercible
|
441 |
+
|
442 |
+
def __getitem__(self, index):
|
443 |
+
return (self.type, self.value)[index]
|
444 |
+
|
445 |
+
def __str__(self):
|
446 |
+
return f"<{type(self).__name__}: type={self.type!r}, value={self.value!r}>"
|
447 |
+
|
448 |
+
__repr__ = __str__
|
449 |
+
|
450 |
+
|
451 |
+
def mark_as(dispatch_type):
|
452 |
+
"""
|
453 |
+
Creates a utility function to mark something as a specific type.
|
454 |
+
|
455 |
+
Examples
|
456 |
+
--------
|
457 |
+
>>> mark_int = mark_as(int)
|
458 |
+
>>> mark_int(1)
|
459 |
+
<Dispatchable: type=<class 'int'>, value=1>
|
460 |
+
"""
|
461 |
+
return functools.partial(Dispatchable, dispatch_type=dispatch_type)
|
462 |
+
|
463 |
+
|
464 |
+
def all_of_type(arg_type):
|
465 |
+
"""
|
466 |
+
Marks all unmarked arguments as a given type.
|
467 |
+
|
468 |
+
Examples
|
469 |
+
--------
|
470 |
+
>>> @all_of_type(str)
|
471 |
+
... def f(a, b):
|
472 |
+
... return a, Dispatchable(b, int)
|
473 |
+
>>> f('a', 1)
|
474 |
+
(<Dispatchable: type=<class 'str'>, value='a'>,
|
475 |
+
<Dispatchable: type=<class 'int'>, value=1>)
|
476 |
+
"""
|
477 |
+
|
478 |
+
def outer(func):
|
479 |
+
@functools.wraps(func)
|
480 |
+
def inner(*args, **kwargs):
|
481 |
+
extracted_args = func(*args, **kwargs)
|
482 |
+
return tuple(
|
483 |
+
Dispatchable(arg, arg_type)
|
484 |
+
if not isinstance(arg, Dispatchable)
|
485 |
+
else arg
|
486 |
+
for arg in extracted_args
|
487 |
+
)
|
488 |
+
|
489 |
+
return inner
|
490 |
+
|
491 |
+
return outer
|
492 |
+
|
493 |
+
|
494 |
+
def wrap_single_convertor(convert_single):
|
495 |
+
"""
|
496 |
+
Wraps a ``__ua_convert__`` defined for a single element to all elements.
|
497 |
+
If any of them return ``NotImplemented``, the operation is assumed to be
|
498 |
+
undefined.
|
499 |
+
|
500 |
+
Accepts a signature of (value, type, coerce).
|
501 |
+
"""
|
502 |
+
|
503 |
+
@functools.wraps(convert_single)
|
504 |
+
def __ua_convert__(dispatchables, coerce):
|
505 |
+
converted = []
|
506 |
+
for d in dispatchables:
|
507 |
+
c = convert_single(d.value, d.type, coerce and d.coercible)
|
508 |
+
|
509 |
+
if c is NotImplemented:
|
510 |
+
return NotImplemented
|
511 |
+
|
512 |
+
converted.append(c)
|
513 |
+
|
514 |
+
return converted
|
515 |
+
|
516 |
+
return __ua_convert__
|
517 |
+
|
518 |
+
|
519 |
+
def wrap_single_convertor_instance(convert_single):
|
520 |
+
"""
|
521 |
+
Wraps a ``__ua_convert__`` defined for a single element to all elements.
|
522 |
+
If any of them return ``NotImplemented``, the operation is assumed to be
|
523 |
+
undefined.
|
524 |
+
|
525 |
+
Accepts a signature of (value, type, coerce).
|
526 |
+
"""
|
527 |
+
|
528 |
+
@functools.wraps(convert_single)
|
529 |
+
def __ua_convert__(self, dispatchables, coerce):
|
530 |
+
converted = []
|
531 |
+
for d in dispatchables:
|
532 |
+
c = convert_single(self, d.value, d.type, coerce and d.coercible)
|
533 |
+
|
534 |
+
if c is NotImplemented:
|
535 |
+
return NotImplemented
|
536 |
+
|
537 |
+
converted.append(c)
|
538 |
+
|
539 |
+
return converted
|
540 |
+
|
541 |
+
return __ua_convert__
|
542 |
+
|
543 |
+
|
544 |
+
def determine_backend(value, dispatch_type, *, domain, only=True, coerce=False):
|
545 |
+
"""Set the backend to the first active backend that supports ``value``
|
546 |
+
|
547 |
+
This is useful for functions that call multimethods without any dispatchable
|
548 |
+
arguments. You can use :func:`determine_backend` to ensure the same backend
|
549 |
+
is used everywhere in a block of multimethod calls.
|
550 |
+
|
551 |
+
Parameters
|
552 |
+
----------
|
553 |
+
value
|
554 |
+
The value being tested
|
555 |
+
dispatch_type
|
556 |
+
The dispatch type associated with ``value``, aka
|
557 |
+
":ref:`marking <MarkingGlossary>`".
|
558 |
+
domain: string
|
559 |
+
The domain to query for backends and set.
|
560 |
+
coerce: bool
|
561 |
+
Whether or not to allow coercion to the backend's types. Implies ``only``.
|
562 |
+
only: bool
|
563 |
+
Whether or not this should be the last backend to try.
|
564 |
+
|
565 |
+
See Also
|
566 |
+
--------
|
567 |
+
set_backend: For when you know which backend to set
|
568 |
+
|
569 |
+
Notes
|
570 |
+
-----
|
571 |
+
|
572 |
+
Support is determined by the ``__ua_convert__`` protocol. Backends not
|
573 |
+
supporting the type must return ``NotImplemented`` from their
|
574 |
+
``__ua_convert__`` if they don't support input of that type.
|
575 |
+
|
576 |
+
Examples
|
577 |
+
--------
|
578 |
+
|
579 |
+
Suppose we have two backends ``BackendA`` and ``BackendB`` each supporting
|
580 |
+
different types, ``TypeA`` and ``TypeB``. Neither supporting the other type:
|
581 |
+
|
582 |
+
>>> with ua.set_backend(ex.BackendA):
|
583 |
+
... ex.call_multimethod(ex.TypeB(), ex.TypeB())
|
584 |
+
Traceback (most recent call last):
|
585 |
+
...
|
586 |
+
uarray.BackendNotImplementedError: ...
|
587 |
+
|
588 |
+
Now consider a multimethod that creates a new object of ``TypeA``, or
|
589 |
+
``TypeB`` depending on the active backend.
|
590 |
+
|
591 |
+
>>> with ua.set_backend(ex.BackendA), ua.set_backend(ex.BackendB):
|
592 |
+
... res = ex.creation_multimethod()
|
593 |
+
... ex.call_multimethod(res, ex.TypeA())
|
594 |
+
Traceback (most recent call last):
|
595 |
+
...
|
596 |
+
uarray.BackendNotImplementedError: ...
|
597 |
+
|
598 |
+
``res`` is an object of ``TypeB`` because ``BackendB`` is set in the
|
599 |
+
innermost with statement. So, ``call_multimethod`` fails since the types
|
600 |
+
don't match.
|
601 |
+
|
602 |
+
Instead, we need to first find a backend suitable for all of our objects.
|
603 |
+
|
604 |
+
>>> with ua.set_backend(ex.BackendA), ua.set_backend(ex.BackendB):
|
605 |
+
... x = ex.TypeA()
|
606 |
+
... with ua.determine_backend(x, "mark", domain="ua_examples"):
|
607 |
+
... res = ex.creation_multimethod()
|
608 |
+
... ex.call_multimethod(res, x)
|
609 |
+
TypeA
|
610 |
+
|
611 |
+
"""
|
612 |
+
dispatchables = (Dispatchable(value, dispatch_type, coerce),)
|
613 |
+
backend = _uarray.determine_backend(domain, dispatchables, coerce)
|
614 |
+
|
615 |
+
return set_backend(backend, coerce=coerce, only=only)
|
616 |
+
|
617 |
+
|
618 |
+
def determine_backend_multi(
|
619 |
+
dispatchables, *, domain, only=True, coerce=False, **kwargs
|
620 |
+
):
|
621 |
+
"""Set a backend supporting all ``dispatchables``
|
622 |
+
|
623 |
+
This is useful for functions that call multimethods without any dispatchable
|
624 |
+
arguments. You can use :func:`determine_backend_multi` to ensure the same
|
625 |
+
backend is used everywhere in a block of multimethod calls involving
|
626 |
+
multiple arrays.
|
627 |
+
|
628 |
+
Parameters
|
629 |
+
----------
|
630 |
+
dispatchables: Sequence[Union[uarray.Dispatchable, Any]]
|
631 |
+
The dispatchables that must be supported
|
632 |
+
domain: string
|
633 |
+
The domain to query for backends and set.
|
634 |
+
coerce: bool
|
635 |
+
Whether or not to allow coercion to the backend's types. Implies ``only``.
|
636 |
+
only: bool
|
637 |
+
Whether or not this should be the last backend to try.
|
638 |
+
dispatch_type: Optional[Any]
|
639 |
+
The default dispatch type associated with ``dispatchables``, aka
|
640 |
+
":ref:`marking <MarkingGlossary>`".
|
641 |
+
|
642 |
+
See Also
|
643 |
+
--------
|
644 |
+
determine_backend: For a single dispatch value
|
645 |
+
set_backend: For when you know which backend to set
|
646 |
+
|
647 |
+
Notes
|
648 |
+
-----
|
649 |
+
|
650 |
+
Support is determined by the ``__ua_convert__`` protocol. Backends not
|
651 |
+
supporting the type must return ``NotImplemented`` from their
|
652 |
+
``__ua_convert__`` if they don't support input of that type.
|
653 |
+
|
654 |
+
Examples
|
655 |
+
--------
|
656 |
+
|
657 |
+
:func:`determine_backend` allows the backend to be set from a single
|
658 |
+
object. :func:`determine_backend_multi` allows multiple objects to be
|
659 |
+
checked simultaneously for support in the backend. Suppose we have a
|
660 |
+
``BackendAB`` which supports ``TypeA`` and ``TypeB`` in the same call,
|
661 |
+
and a ``BackendBC`` that doesn't support ``TypeA``.
|
662 |
+
|
663 |
+
>>> with ua.set_backend(ex.BackendAB), ua.set_backend(ex.BackendBC):
|
664 |
+
... a, b = ex.TypeA(), ex.TypeB()
|
665 |
+
... with ua.determine_backend_multi(
|
666 |
+
... [ua.Dispatchable(a, "mark"), ua.Dispatchable(b, "mark")],
|
667 |
+
... domain="ua_examples"
|
668 |
+
... ):
|
669 |
+
... res = ex.creation_multimethod()
|
670 |
+
... ex.call_multimethod(res, a, b)
|
671 |
+
TypeA
|
672 |
+
|
673 |
+
This won't call ``BackendBC`` because it doesn't support ``TypeA``.
|
674 |
+
|
675 |
+
We can also use leave out the ``ua.Dispatchable`` if we specify the
|
676 |
+
default ``dispatch_type`` for the ``dispatchables`` argument.
|
677 |
+
|
678 |
+
>>> with ua.set_backend(ex.BackendAB), ua.set_backend(ex.BackendBC):
|
679 |
+
... a, b = ex.TypeA(), ex.TypeB()
|
680 |
+
... with ua.determine_backend_multi(
|
681 |
+
... [a, b], dispatch_type="mark", domain="ua_examples"
|
682 |
+
... ):
|
683 |
+
... res = ex.creation_multimethod()
|
684 |
+
... ex.call_multimethod(res, a, b)
|
685 |
+
TypeA
|
686 |
+
|
687 |
+
"""
|
688 |
+
if "dispatch_type" in kwargs:
|
689 |
+
disp_type = kwargs.pop("dispatch_type")
|
690 |
+
dispatchables = tuple(
|
691 |
+
d if isinstance(d, Dispatchable) else Dispatchable(d, disp_type)
|
692 |
+
for d in dispatchables
|
693 |
+
)
|
694 |
+
else:
|
695 |
+
dispatchables = tuple(dispatchables)
|
696 |
+
if not all(isinstance(d, Dispatchable) for d in dispatchables):
|
697 |
+
raise TypeError("dispatchables must be instances of uarray.Dispatchable")
|
698 |
+
|
699 |
+
if len(kwargs) != 0:
|
700 |
+
raise TypeError(f"Received unexpected keyword arguments: {kwargs}")
|
701 |
+
|
702 |
+
backend = _uarray.determine_backend(domain, dispatchables, coerce)
|
703 |
+
|
704 |
+
return set_backend(backend, coerce=coerce, only=only)
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/__init__.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
NumPy Array API compatibility library
|
3 |
+
|
4 |
+
This is a small wrapper around NumPy and CuPy that is compatible with the
|
5 |
+
Array API standard https://data-apis.org/array-api/latest/. See also NEP 47
|
6 |
+
https://numpy.org/neps/nep-0047-array-api-standard.html.
|
7 |
+
|
8 |
+
Unlike array_api_strict, this is not a strict minimal implementation of the
|
9 |
+
Array API, but rather just an extension of the main NumPy namespace with
|
10 |
+
changes needed to be compliant with the Array API. See
|
11 |
+
https://numpy.org/doc/stable/reference/array_api.html for a full list of
|
12 |
+
changes. In particular, unlike array_api_strict, this package does not use a
|
13 |
+
separate Array object, but rather just uses numpy.ndarray directly.
|
14 |
+
|
15 |
+
Library authors using the Array API may wish to test against array_api_strict
|
16 |
+
to ensure they are not using functionality outside of the standard, but prefer
|
17 |
+
this implementation for the default when working with NumPy arrays.
|
18 |
+
|
19 |
+
"""
|
20 |
+
__version__ = '1.5.1'
|
21 |
+
|
22 |
+
from .common import * # noqa: F401, F403
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.19 kB). View file
|
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/__pycache__/_internal.cpython-310.pyc
ADDED
Binary file (1.55 kB). View file
|
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/_internal.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Internal helpers
|
3 |
+
"""
|
4 |
+
|
5 |
+
from functools import wraps
|
6 |
+
from inspect import signature
|
7 |
+
|
8 |
+
def get_xp(xp):
|
9 |
+
"""
|
10 |
+
Decorator to automatically replace xp with the corresponding array module.
|
11 |
+
|
12 |
+
Use like
|
13 |
+
|
14 |
+
import numpy as np
|
15 |
+
|
16 |
+
@get_xp(np)
|
17 |
+
def func(x, /, xp, kwarg=None):
|
18 |
+
return xp.func(x, kwarg=kwarg)
|
19 |
+
|
20 |
+
Note that xp must be a keyword argument and come after all non-keyword
|
21 |
+
arguments.
|
22 |
+
|
23 |
+
"""
|
24 |
+
|
25 |
+
def inner(f):
|
26 |
+
@wraps(f)
|
27 |
+
def wrapped_f(*args, **kwargs):
|
28 |
+
return f(*args, xp=xp, **kwargs)
|
29 |
+
|
30 |
+
sig = signature(f)
|
31 |
+
new_sig = sig.replace(
|
32 |
+
parameters=[sig.parameters[i] for i in sig.parameters if i != "xp"]
|
33 |
+
)
|
34 |
+
|
35 |
+
if wrapped_f.__doc__ is None:
|
36 |
+
wrapped_f.__doc__ = f"""\
|
37 |
+
Array API compatibility wrapper for {f.__name__}.
|
38 |
+
|
39 |
+
See the corresponding documentation in NumPy/CuPy and/or the array API
|
40 |
+
specification for more details.
|
41 |
+
|
42 |
+
"""
|
43 |
+
wrapped_f.__signature__ = new_sig
|
44 |
+
return wrapped_f
|
45 |
+
|
46 |
+
return inner
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
from ._helpers import * # noqa: F403
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (257 Bytes). View file
|
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_aliases.cpython-310.pyc
ADDED
Binary file (12.3 kB). View file
|
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_fft.cpython-310.pyc
ADDED
Binary file (3.32 kB). View file
|
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_helpers.cpython-310.pyc
ADDED
Binary file (12.6 kB). View file
|
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_linalg.cpython-310.pyc
ADDED
Binary file (5.98 kB). View file
|
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_typing.cpython-310.pyc
ADDED
Binary file (983 Bytes). View file
|
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_aliases.py
ADDED
@@ -0,0 +1,554 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
These are functions that are just aliases of existing functions in NumPy.
|
3 |
+
"""
|
4 |
+
|
5 |
+
from __future__ import annotations
|
6 |
+
|
7 |
+
from typing import TYPE_CHECKING
|
8 |
+
if TYPE_CHECKING:
|
9 |
+
import numpy as np
|
10 |
+
from typing import Optional, Sequence, Tuple, Union
|
11 |
+
from ._typing import ndarray, Device, Dtype, NestedSequence, SupportsBufferProtocol
|
12 |
+
|
13 |
+
from typing import NamedTuple
|
14 |
+
from types import ModuleType
|
15 |
+
import inspect
|
16 |
+
|
17 |
+
from ._helpers import _check_device, is_numpy_array, array_namespace
|
18 |
+
|
19 |
+
# These functions are modified from the NumPy versions.
|
20 |
+
|
21 |
+
def arange(
|
22 |
+
start: Union[int, float],
|
23 |
+
/,
|
24 |
+
stop: Optional[Union[int, float]] = None,
|
25 |
+
step: Union[int, float] = 1,
|
26 |
+
*,
|
27 |
+
xp,
|
28 |
+
dtype: Optional[Dtype] = None,
|
29 |
+
device: Optional[Device] = None,
|
30 |
+
**kwargs
|
31 |
+
) -> ndarray:
|
32 |
+
_check_device(xp, device)
|
33 |
+
return xp.arange(start, stop=stop, step=step, dtype=dtype, **kwargs)
|
34 |
+
|
35 |
+
def empty(
|
36 |
+
shape: Union[int, Tuple[int, ...]],
|
37 |
+
xp,
|
38 |
+
*,
|
39 |
+
dtype: Optional[Dtype] = None,
|
40 |
+
device: Optional[Device] = None,
|
41 |
+
**kwargs
|
42 |
+
) -> ndarray:
|
43 |
+
_check_device(xp, device)
|
44 |
+
return xp.empty(shape, dtype=dtype, **kwargs)
|
45 |
+
|
46 |
+
def empty_like(
|
47 |
+
x: ndarray, /, xp, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None,
|
48 |
+
**kwargs
|
49 |
+
) -> ndarray:
|
50 |
+
_check_device(xp, device)
|
51 |
+
return xp.empty_like(x, dtype=dtype, **kwargs)
|
52 |
+
|
53 |
+
def eye(
|
54 |
+
n_rows: int,
|
55 |
+
n_cols: Optional[int] = None,
|
56 |
+
/,
|
57 |
+
*,
|
58 |
+
xp,
|
59 |
+
k: int = 0,
|
60 |
+
dtype: Optional[Dtype] = None,
|
61 |
+
device: Optional[Device] = None,
|
62 |
+
**kwargs,
|
63 |
+
) -> ndarray:
|
64 |
+
_check_device(xp, device)
|
65 |
+
return xp.eye(n_rows, M=n_cols, k=k, dtype=dtype, **kwargs)
|
66 |
+
|
67 |
+
def full(
|
68 |
+
shape: Union[int, Tuple[int, ...]],
|
69 |
+
fill_value: Union[int, float],
|
70 |
+
xp,
|
71 |
+
*,
|
72 |
+
dtype: Optional[Dtype] = None,
|
73 |
+
device: Optional[Device] = None,
|
74 |
+
**kwargs,
|
75 |
+
) -> ndarray:
|
76 |
+
_check_device(xp, device)
|
77 |
+
return xp.full(shape, fill_value, dtype=dtype, **kwargs)
|
78 |
+
|
79 |
+
def full_like(
|
80 |
+
x: ndarray,
|
81 |
+
/,
|
82 |
+
fill_value: Union[int, float],
|
83 |
+
*,
|
84 |
+
xp,
|
85 |
+
dtype: Optional[Dtype] = None,
|
86 |
+
device: Optional[Device] = None,
|
87 |
+
**kwargs,
|
88 |
+
) -> ndarray:
|
89 |
+
_check_device(xp, device)
|
90 |
+
return xp.full_like(x, fill_value, dtype=dtype, **kwargs)
|
91 |
+
|
92 |
+
def linspace(
|
93 |
+
start: Union[int, float],
|
94 |
+
stop: Union[int, float],
|
95 |
+
/,
|
96 |
+
num: int,
|
97 |
+
*,
|
98 |
+
xp,
|
99 |
+
dtype: Optional[Dtype] = None,
|
100 |
+
device: Optional[Device] = None,
|
101 |
+
endpoint: bool = True,
|
102 |
+
**kwargs,
|
103 |
+
) -> ndarray:
|
104 |
+
_check_device(xp, device)
|
105 |
+
return xp.linspace(start, stop, num, dtype=dtype, endpoint=endpoint, **kwargs)
|
106 |
+
|
107 |
+
def ones(
|
108 |
+
shape: Union[int, Tuple[int, ...]],
|
109 |
+
xp,
|
110 |
+
*,
|
111 |
+
dtype: Optional[Dtype] = None,
|
112 |
+
device: Optional[Device] = None,
|
113 |
+
**kwargs,
|
114 |
+
) -> ndarray:
|
115 |
+
_check_device(xp, device)
|
116 |
+
return xp.ones(shape, dtype=dtype, **kwargs)
|
117 |
+
|
118 |
+
def ones_like(
|
119 |
+
x: ndarray, /, xp, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None,
|
120 |
+
**kwargs,
|
121 |
+
) -> ndarray:
|
122 |
+
_check_device(xp, device)
|
123 |
+
return xp.ones_like(x, dtype=dtype, **kwargs)
|
124 |
+
|
125 |
+
def zeros(
|
126 |
+
shape: Union[int, Tuple[int, ...]],
|
127 |
+
xp,
|
128 |
+
*,
|
129 |
+
dtype: Optional[Dtype] = None,
|
130 |
+
device: Optional[Device] = None,
|
131 |
+
**kwargs,
|
132 |
+
) -> ndarray:
|
133 |
+
_check_device(xp, device)
|
134 |
+
return xp.zeros(shape, dtype=dtype, **kwargs)
|
135 |
+
|
136 |
+
def zeros_like(
|
137 |
+
x: ndarray, /, xp, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None,
|
138 |
+
**kwargs,
|
139 |
+
) -> ndarray:
|
140 |
+
_check_device(xp, device)
|
141 |
+
return xp.zeros_like(x, dtype=dtype, **kwargs)
|
142 |
+
|
143 |
+
# np.unique() is split into four functions in the array API:
|
144 |
+
# unique_all, unique_counts, unique_inverse, and unique_values (this is done
|
145 |
+
# to remove polymorphic return types).
|
146 |
+
|
147 |
+
# The functions here return namedtuples (np.unique() returns a normal
|
148 |
+
# tuple).
|
149 |
+
|
150 |
+
# Note that these named tuples aren't actually part of the standard namespace,
|
151 |
+
# but I don't see any issue with exporting the names here regardless.
|
152 |
+
class UniqueAllResult(NamedTuple):
|
153 |
+
values: ndarray
|
154 |
+
indices: ndarray
|
155 |
+
inverse_indices: ndarray
|
156 |
+
counts: ndarray
|
157 |
+
|
158 |
+
|
159 |
+
class UniqueCountsResult(NamedTuple):
|
160 |
+
values: ndarray
|
161 |
+
counts: ndarray
|
162 |
+
|
163 |
+
|
164 |
+
class UniqueInverseResult(NamedTuple):
|
165 |
+
values: ndarray
|
166 |
+
inverse_indices: ndarray
|
167 |
+
|
168 |
+
|
169 |
+
def _unique_kwargs(xp):
|
170 |
+
# Older versions of NumPy and CuPy do not have equal_nan. Rather than
|
171 |
+
# trying to parse version numbers, just check if equal_nan is in the
|
172 |
+
# signature.
|
173 |
+
s = inspect.signature(xp.unique)
|
174 |
+
if 'equal_nan' in s.parameters:
|
175 |
+
return {'equal_nan': False}
|
176 |
+
return {}
|
177 |
+
|
178 |
+
def unique_all(x: ndarray, /, xp) -> UniqueAllResult:
|
179 |
+
kwargs = _unique_kwargs(xp)
|
180 |
+
values, indices, inverse_indices, counts = xp.unique(
|
181 |
+
x,
|
182 |
+
return_counts=True,
|
183 |
+
return_index=True,
|
184 |
+
return_inverse=True,
|
185 |
+
**kwargs,
|
186 |
+
)
|
187 |
+
# np.unique() flattens inverse indices, but they need to share x's shape
|
188 |
+
# See https://github.com/numpy/numpy/issues/20638
|
189 |
+
inverse_indices = inverse_indices.reshape(x.shape)
|
190 |
+
return UniqueAllResult(
|
191 |
+
values,
|
192 |
+
indices,
|
193 |
+
inverse_indices,
|
194 |
+
counts,
|
195 |
+
)
|
196 |
+
|
197 |
+
|
198 |
+
def unique_counts(x: ndarray, /, xp) -> UniqueCountsResult:
|
199 |
+
kwargs = _unique_kwargs(xp)
|
200 |
+
res = xp.unique(
|
201 |
+
x,
|
202 |
+
return_counts=True,
|
203 |
+
return_index=False,
|
204 |
+
return_inverse=False,
|
205 |
+
**kwargs
|
206 |
+
)
|
207 |
+
|
208 |
+
return UniqueCountsResult(*res)
|
209 |
+
|
210 |
+
|
211 |
+
def unique_inverse(x: ndarray, /, xp) -> UniqueInverseResult:
|
212 |
+
kwargs = _unique_kwargs(xp)
|
213 |
+
values, inverse_indices = xp.unique(
|
214 |
+
x,
|
215 |
+
return_counts=False,
|
216 |
+
return_index=False,
|
217 |
+
return_inverse=True,
|
218 |
+
**kwargs,
|
219 |
+
)
|
220 |
+
# xp.unique() flattens inverse indices, but they need to share x's shape
|
221 |
+
# See https://github.com/numpy/numpy/issues/20638
|
222 |
+
inverse_indices = inverse_indices.reshape(x.shape)
|
223 |
+
return UniqueInverseResult(values, inverse_indices)
|
224 |
+
|
225 |
+
|
226 |
+
def unique_values(x: ndarray, /, xp) -> ndarray:
|
227 |
+
kwargs = _unique_kwargs(xp)
|
228 |
+
return xp.unique(
|
229 |
+
x,
|
230 |
+
return_counts=False,
|
231 |
+
return_index=False,
|
232 |
+
return_inverse=False,
|
233 |
+
**kwargs,
|
234 |
+
)
|
235 |
+
|
236 |
+
def astype(x: ndarray, dtype: Dtype, /, *, copy: bool = True) -> ndarray:
|
237 |
+
if not copy and dtype == x.dtype:
|
238 |
+
return x
|
239 |
+
return x.astype(dtype=dtype, copy=copy)
|
240 |
+
|
241 |
+
# These functions have different keyword argument names
|
242 |
+
|
243 |
+
def std(
|
244 |
+
x: ndarray,
|
245 |
+
/,
|
246 |
+
xp,
|
247 |
+
*,
|
248 |
+
axis: Optional[Union[int, Tuple[int, ...]]] = None,
|
249 |
+
correction: Union[int, float] = 0.0, # correction instead of ddof
|
250 |
+
keepdims: bool = False,
|
251 |
+
**kwargs,
|
252 |
+
) -> ndarray:
|
253 |
+
return xp.std(x, axis=axis, ddof=correction, keepdims=keepdims, **kwargs)
|
254 |
+
|
255 |
+
def var(
|
256 |
+
x: ndarray,
|
257 |
+
/,
|
258 |
+
xp,
|
259 |
+
*,
|
260 |
+
axis: Optional[Union[int, Tuple[int, ...]]] = None,
|
261 |
+
correction: Union[int, float] = 0.0, # correction instead of ddof
|
262 |
+
keepdims: bool = False,
|
263 |
+
**kwargs,
|
264 |
+
) -> ndarray:
|
265 |
+
return xp.var(x, axis=axis, ddof=correction, keepdims=keepdims, **kwargs)
|
266 |
+
|
267 |
+
# Unlike transpose(), the axes argument to permute_dims() is required.
|
268 |
+
def permute_dims(x: ndarray, /, axes: Tuple[int, ...], xp) -> ndarray:
|
269 |
+
return xp.transpose(x, axes)
|
270 |
+
|
271 |
+
# Creation functions add the device keyword (which does nothing for NumPy)
|
272 |
+
|
273 |
+
# asarray also adds the copy keyword
|
274 |
+
def _asarray(
|
275 |
+
obj: Union[
|
276 |
+
ndarray,
|
277 |
+
bool,
|
278 |
+
int,
|
279 |
+
float,
|
280 |
+
NestedSequence[bool | int | float],
|
281 |
+
SupportsBufferProtocol,
|
282 |
+
],
|
283 |
+
/,
|
284 |
+
*,
|
285 |
+
dtype: Optional[Dtype] = None,
|
286 |
+
device: Optional[Device] = None,
|
287 |
+
copy: "Optional[Union[bool, np._CopyMode]]" = None,
|
288 |
+
namespace = None,
|
289 |
+
**kwargs,
|
290 |
+
) -> ndarray:
|
291 |
+
"""
|
292 |
+
Array API compatibility wrapper for asarray().
|
293 |
+
|
294 |
+
See the corresponding documentation in NumPy/CuPy and/or the array API
|
295 |
+
specification for more details.
|
296 |
+
|
297 |
+
"""
|
298 |
+
if namespace is None:
|
299 |
+
try:
|
300 |
+
xp = array_namespace(obj, _use_compat=False)
|
301 |
+
except ValueError:
|
302 |
+
# TODO: What about lists of arrays?
|
303 |
+
raise ValueError("A namespace must be specified for asarray() with non-array input")
|
304 |
+
elif isinstance(namespace, ModuleType):
|
305 |
+
xp = namespace
|
306 |
+
elif namespace == 'numpy':
|
307 |
+
import numpy as xp
|
308 |
+
elif namespace == 'cupy':
|
309 |
+
import cupy as xp
|
310 |
+
elif namespace == 'dask.array':
|
311 |
+
import dask.array as xp
|
312 |
+
else:
|
313 |
+
raise ValueError("Unrecognized namespace argument to asarray()")
|
314 |
+
|
315 |
+
_check_device(xp, device)
|
316 |
+
if is_numpy_array(obj):
|
317 |
+
import numpy as np
|
318 |
+
if hasattr(np, '_CopyMode'):
|
319 |
+
# Not present in older NumPys
|
320 |
+
COPY_FALSE = (False, np._CopyMode.IF_NEEDED)
|
321 |
+
COPY_TRUE = (True, np._CopyMode.ALWAYS)
|
322 |
+
else:
|
323 |
+
COPY_FALSE = (False,)
|
324 |
+
COPY_TRUE = (True,)
|
325 |
+
else:
|
326 |
+
COPY_FALSE = (False,)
|
327 |
+
COPY_TRUE = (True,)
|
328 |
+
if copy in COPY_FALSE and namespace != "dask.array":
|
329 |
+
# copy=False is not yet implemented in xp.asarray
|
330 |
+
raise NotImplementedError("copy=False is not yet implemented")
|
331 |
+
if (hasattr(xp, "ndarray") and isinstance(obj, xp.ndarray)):
|
332 |
+
if dtype is not None and obj.dtype != dtype:
|
333 |
+
copy = True
|
334 |
+
if copy in COPY_TRUE:
|
335 |
+
return xp.array(obj, copy=True, dtype=dtype)
|
336 |
+
return obj
|
337 |
+
elif namespace == "dask.array":
|
338 |
+
if copy in COPY_TRUE:
|
339 |
+
if dtype is None:
|
340 |
+
return obj.copy()
|
341 |
+
# Go through numpy, since dask copy is no-op by default
|
342 |
+
import numpy as np
|
343 |
+
obj = np.array(obj, dtype=dtype, copy=True)
|
344 |
+
return xp.array(obj, dtype=dtype)
|
345 |
+
else:
|
346 |
+
import dask.array as da
|
347 |
+
import numpy as np
|
348 |
+
if not isinstance(obj, da.Array):
|
349 |
+
obj = np.asarray(obj, dtype=dtype)
|
350 |
+
return da.from_array(obj)
|
351 |
+
return obj
|
352 |
+
|
353 |
+
return xp.asarray(obj, dtype=dtype, **kwargs)
|
354 |
+
|
355 |
+
# np.reshape calls the keyword argument 'newshape' instead of 'shape'
|
356 |
+
def reshape(x: ndarray,
|
357 |
+
/,
|
358 |
+
shape: Tuple[int, ...],
|
359 |
+
xp, copy: Optional[bool] = None,
|
360 |
+
**kwargs) -> ndarray:
|
361 |
+
if copy is True:
|
362 |
+
x = x.copy()
|
363 |
+
elif copy is False:
|
364 |
+
y = x.view()
|
365 |
+
y.shape = shape
|
366 |
+
return y
|
367 |
+
return xp.reshape(x, shape, **kwargs)
|
368 |
+
|
369 |
+
# The descending keyword is new in sort and argsort, and 'kind' replaced with
|
370 |
+
# 'stable'
|
371 |
+
def argsort(
|
372 |
+
x: ndarray, /, xp, *, axis: int = -1, descending: bool = False, stable: bool = True,
|
373 |
+
**kwargs,
|
374 |
+
) -> ndarray:
|
375 |
+
# Note: this keyword argument is different, and the default is different.
|
376 |
+
# We set it in kwargs like this because numpy.sort uses kind='quicksort'
|
377 |
+
# as the default whereas cupy.sort uses kind=None.
|
378 |
+
if stable:
|
379 |
+
kwargs['kind'] = "stable"
|
380 |
+
if not descending:
|
381 |
+
res = xp.argsort(x, axis=axis, **kwargs)
|
382 |
+
else:
|
383 |
+
# As NumPy has no native descending sort, we imitate it here. Note that
|
384 |
+
# simply flipping the results of xp.argsort(x, ...) would not
|
385 |
+
# respect the relative order like it would in native descending sorts.
|
386 |
+
res = xp.flip(
|
387 |
+
xp.argsort(xp.flip(x, axis=axis), axis=axis, **kwargs),
|
388 |
+
axis=axis,
|
389 |
+
)
|
390 |
+
# Rely on flip()/argsort() to validate axis
|
391 |
+
normalised_axis = axis if axis >= 0 else x.ndim + axis
|
392 |
+
max_i = x.shape[normalised_axis] - 1
|
393 |
+
res = max_i - res
|
394 |
+
return res
|
395 |
+
|
396 |
+
def sort(
|
397 |
+
x: ndarray, /, xp, *, axis: int = -1, descending: bool = False, stable: bool = True,
|
398 |
+
**kwargs,
|
399 |
+
) -> ndarray:
|
400 |
+
# Note: this keyword argument is different, and the default is different.
|
401 |
+
# We set it in kwargs like this because numpy.sort uses kind='quicksort'
|
402 |
+
# as the default whereas cupy.sort uses kind=None.
|
403 |
+
if stable:
|
404 |
+
kwargs['kind'] = "stable"
|
405 |
+
res = xp.sort(x, axis=axis, **kwargs)
|
406 |
+
if descending:
|
407 |
+
res = xp.flip(res, axis=axis)
|
408 |
+
return res
|
409 |
+
|
410 |
+
# nonzero should error for zero-dimensional arrays
|
411 |
+
def nonzero(x: ndarray, /, xp, **kwargs) -> Tuple[ndarray, ...]:
|
412 |
+
if x.ndim == 0:
|
413 |
+
raise ValueError("nonzero() does not support zero-dimensional arrays")
|
414 |
+
return xp.nonzero(x, **kwargs)
|
415 |
+
|
416 |
+
# sum() and prod() should always upcast when dtype=None
|
417 |
+
def sum(
|
418 |
+
x: ndarray,
|
419 |
+
/,
|
420 |
+
xp,
|
421 |
+
*,
|
422 |
+
axis: Optional[Union[int, Tuple[int, ...]]] = None,
|
423 |
+
dtype: Optional[Dtype] = None,
|
424 |
+
keepdims: bool = False,
|
425 |
+
**kwargs,
|
426 |
+
) -> ndarray:
|
427 |
+
# `xp.sum` already upcasts integers, but not floats or complexes
|
428 |
+
if dtype is None:
|
429 |
+
if x.dtype == xp.float32:
|
430 |
+
dtype = xp.float64
|
431 |
+
elif x.dtype == xp.complex64:
|
432 |
+
dtype = xp.complex128
|
433 |
+
return xp.sum(x, axis=axis, dtype=dtype, keepdims=keepdims, **kwargs)
|
434 |
+
|
435 |
+
def prod(
|
436 |
+
x: ndarray,
|
437 |
+
/,
|
438 |
+
xp,
|
439 |
+
*,
|
440 |
+
axis: Optional[Union[int, Tuple[int, ...]]] = None,
|
441 |
+
dtype: Optional[Dtype] = None,
|
442 |
+
keepdims: bool = False,
|
443 |
+
**kwargs,
|
444 |
+
) -> ndarray:
|
445 |
+
if dtype is None:
|
446 |
+
if x.dtype == xp.float32:
|
447 |
+
dtype = xp.float64
|
448 |
+
elif x.dtype == xp.complex64:
|
449 |
+
dtype = xp.complex128
|
450 |
+
return xp.prod(x, dtype=dtype, axis=axis, keepdims=keepdims, **kwargs)
|
451 |
+
|
452 |
+
# ceil, floor, and trunc return integers for integer inputs
|
453 |
+
|
454 |
+
def ceil(x: ndarray, /, xp, **kwargs) -> ndarray:
|
455 |
+
if xp.issubdtype(x.dtype, xp.integer):
|
456 |
+
return x
|
457 |
+
return xp.ceil(x, **kwargs)
|
458 |
+
|
459 |
+
def floor(x: ndarray, /, xp, **kwargs) -> ndarray:
|
460 |
+
if xp.issubdtype(x.dtype, xp.integer):
|
461 |
+
return x
|
462 |
+
return xp.floor(x, **kwargs)
|
463 |
+
|
464 |
+
def trunc(x: ndarray, /, xp, **kwargs) -> ndarray:
|
465 |
+
if xp.issubdtype(x.dtype, xp.integer):
|
466 |
+
return x
|
467 |
+
return xp.trunc(x, **kwargs)
|
468 |
+
|
469 |
+
# linear algebra functions
|
470 |
+
|
471 |
+
def matmul(x1: ndarray, x2: ndarray, /, xp, **kwargs) -> ndarray:
|
472 |
+
return xp.matmul(x1, x2, **kwargs)
|
473 |
+
|
474 |
+
# Unlike transpose, matrix_transpose only transposes the last two axes.
|
475 |
+
def matrix_transpose(x: ndarray, /, xp) -> ndarray:
|
476 |
+
if x.ndim < 2:
|
477 |
+
raise ValueError("x must be at least 2-dimensional for matrix_transpose")
|
478 |
+
return xp.swapaxes(x, -1, -2)
|
479 |
+
|
480 |
+
def tensordot(x1: ndarray,
|
481 |
+
x2: ndarray,
|
482 |
+
/,
|
483 |
+
xp,
|
484 |
+
*,
|
485 |
+
axes: Union[int, Tuple[Sequence[int], Sequence[int]]] = 2,
|
486 |
+
**kwargs,
|
487 |
+
) -> ndarray:
|
488 |
+
return xp.tensordot(x1, x2, axes=axes, **kwargs)
|
489 |
+
|
490 |
+
def vecdot(x1: ndarray, x2: ndarray, /, xp, *, axis: int = -1) -> ndarray:
|
491 |
+
if x1.shape[axis] != x2.shape[axis]:
|
492 |
+
raise ValueError("x1 and x2 must have the same size along the given axis")
|
493 |
+
|
494 |
+
if hasattr(xp, 'broadcast_tensors'):
|
495 |
+
_broadcast = xp.broadcast_tensors
|
496 |
+
else:
|
497 |
+
_broadcast = xp.broadcast_arrays
|
498 |
+
|
499 |
+
x1_ = xp.moveaxis(x1, axis, -1)
|
500 |
+
x2_ = xp.moveaxis(x2, axis, -1)
|
501 |
+
x1_, x2_ = _broadcast(x1_, x2_)
|
502 |
+
|
503 |
+
res = x1_[..., None, :] @ x2_[..., None]
|
504 |
+
return res[..., 0, 0]
|
505 |
+
|
506 |
+
# isdtype is a new function in the 2022.12 array API specification.
|
507 |
+
|
508 |
+
def isdtype(
|
509 |
+
dtype: Dtype, kind: Union[Dtype, str, Tuple[Union[Dtype, str], ...]], xp,
|
510 |
+
*, _tuple=True, # Disallow nested tuples
|
511 |
+
) -> bool:
|
512 |
+
"""
|
513 |
+
Returns a boolean indicating whether a provided dtype is of a specified data type ``kind``.
|
514 |
+
|
515 |
+
Note that outside of this function, this compat library does not yet fully
|
516 |
+
support complex numbers.
|
517 |
+
|
518 |
+
See
|
519 |
+
https://data-apis.org/array-api/latest/API_specification/generated/array_api.isdtype.html
|
520 |
+
for more details
|
521 |
+
"""
|
522 |
+
if isinstance(kind, tuple) and _tuple:
|
523 |
+
return any(isdtype(dtype, k, xp, _tuple=False) for k in kind)
|
524 |
+
elif isinstance(kind, str):
|
525 |
+
if kind == 'bool':
|
526 |
+
return dtype == xp.bool_
|
527 |
+
elif kind == 'signed integer':
|
528 |
+
return xp.issubdtype(dtype, xp.signedinteger)
|
529 |
+
elif kind == 'unsigned integer':
|
530 |
+
return xp.issubdtype(dtype, xp.unsignedinteger)
|
531 |
+
elif kind == 'integral':
|
532 |
+
return xp.issubdtype(dtype, xp.integer)
|
533 |
+
elif kind == 'real floating':
|
534 |
+
return xp.issubdtype(dtype, xp.floating)
|
535 |
+
elif kind == 'complex floating':
|
536 |
+
return xp.issubdtype(dtype, xp.complexfloating)
|
537 |
+
elif kind == 'numeric':
|
538 |
+
return xp.issubdtype(dtype, xp.number)
|
539 |
+
else:
|
540 |
+
raise ValueError(f"Unrecognized data type kind: {kind!r}")
|
541 |
+
else:
|
542 |
+
# This will allow things that aren't required by the spec, like
|
543 |
+
# isdtype(np.float64, float) or isdtype(np.int64, 'l'). Should we be
|
544 |
+
# more strict here to match the type annotation? Note that the
|
545 |
+
# array_api_strict implementation will be very strict.
|
546 |
+
return dtype == kind
|
547 |
+
|
548 |
+
__all__ = ['arange', 'empty', 'empty_like', 'eye', 'full', 'full_like',
|
549 |
+
'linspace', 'ones', 'ones_like', 'zeros', 'zeros_like',
|
550 |
+
'UniqueAllResult', 'UniqueCountsResult', 'UniqueInverseResult',
|
551 |
+
'unique_all', 'unique_counts', 'unique_inverse', 'unique_values',
|
552 |
+
'astype', 'std', 'var', 'permute_dims', 'reshape', 'argsort',
|
553 |
+
'sort', 'nonzero', 'sum', 'prod', 'ceil', 'floor', 'trunc',
|
554 |
+
'matmul', 'matrix_transpose', 'tensordot', 'vecdot', 'isdtype']
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_fft.py
ADDED
@@ -0,0 +1,183 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from typing import TYPE_CHECKING, Union, Optional, Literal
|
4 |
+
|
5 |
+
if TYPE_CHECKING:
|
6 |
+
from ._typing import Device, ndarray
|
7 |
+
from collections.abc import Sequence
|
8 |
+
|
9 |
+
# Note: NumPy fft functions improperly upcast float32 and complex64 to
|
10 |
+
# complex128, which is why we require wrapping them all here.
|
11 |
+
|
12 |
+
def fft(
|
13 |
+
x: ndarray,
|
14 |
+
/,
|
15 |
+
xp,
|
16 |
+
*,
|
17 |
+
n: Optional[int] = None,
|
18 |
+
axis: int = -1,
|
19 |
+
norm: Literal["backward", "ortho", "forward"] = "backward",
|
20 |
+
) -> ndarray:
|
21 |
+
res = xp.fft.fft(x, n=n, axis=axis, norm=norm)
|
22 |
+
if x.dtype in [xp.float32, xp.complex64]:
|
23 |
+
return res.astype(xp.complex64)
|
24 |
+
return res
|
25 |
+
|
26 |
+
def ifft(
|
27 |
+
x: ndarray,
|
28 |
+
/,
|
29 |
+
xp,
|
30 |
+
*,
|
31 |
+
n: Optional[int] = None,
|
32 |
+
axis: int = -1,
|
33 |
+
norm: Literal["backward", "ortho", "forward"] = "backward",
|
34 |
+
) -> ndarray:
|
35 |
+
res = xp.fft.ifft(x, n=n, axis=axis, norm=norm)
|
36 |
+
if x.dtype in [xp.float32, xp.complex64]:
|
37 |
+
return res.astype(xp.complex64)
|
38 |
+
return res
|
39 |
+
|
40 |
+
def fftn(
|
41 |
+
x: ndarray,
|
42 |
+
/,
|
43 |
+
xp,
|
44 |
+
*,
|
45 |
+
s: Sequence[int] = None,
|
46 |
+
axes: Sequence[int] = None,
|
47 |
+
norm: Literal["backward", "ortho", "forward"] = "backward",
|
48 |
+
) -> ndarray:
|
49 |
+
res = xp.fft.fftn(x, s=s, axes=axes, norm=norm)
|
50 |
+
if x.dtype in [xp.float32, xp.complex64]:
|
51 |
+
return res.astype(xp.complex64)
|
52 |
+
return res
|
53 |
+
|
54 |
+
def ifftn(
|
55 |
+
x: ndarray,
|
56 |
+
/,
|
57 |
+
xp,
|
58 |
+
*,
|
59 |
+
s: Sequence[int] = None,
|
60 |
+
axes: Sequence[int] = None,
|
61 |
+
norm: Literal["backward", "ortho", "forward"] = "backward",
|
62 |
+
) -> ndarray:
|
63 |
+
res = xp.fft.ifftn(x, s=s, axes=axes, norm=norm)
|
64 |
+
if x.dtype in [xp.float32, xp.complex64]:
|
65 |
+
return res.astype(xp.complex64)
|
66 |
+
return res
|
67 |
+
|
68 |
+
def rfft(
|
69 |
+
x: ndarray,
|
70 |
+
/,
|
71 |
+
xp,
|
72 |
+
*,
|
73 |
+
n: Optional[int] = None,
|
74 |
+
axis: int = -1,
|
75 |
+
norm: Literal["backward", "ortho", "forward"] = "backward",
|
76 |
+
) -> ndarray:
|
77 |
+
res = xp.fft.rfft(x, n=n, axis=axis, norm=norm)
|
78 |
+
if x.dtype == xp.float32:
|
79 |
+
return res.astype(xp.complex64)
|
80 |
+
return res
|
81 |
+
|
82 |
+
def irfft(
|
83 |
+
x: ndarray,
|
84 |
+
/,
|
85 |
+
xp,
|
86 |
+
*,
|
87 |
+
n: Optional[int] = None,
|
88 |
+
axis: int = -1,
|
89 |
+
norm: Literal["backward", "ortho", "forward"] = "backward",
|
90 |
+
) -> ndarray:
|
91 |
+
res = xp.fft.irfft(x, n=n, axis=axis, norm=norm)
|
92 |
+
if x.dtype == xp.complex64:
|
93 |
+
return res.astype(xp.float32)
|
94 |
+
return res
|
95 |
+
|
96 |
+
def rfftn(
|
97 |
+
x: ndarray,
|
98 |
+
/,
|
99 |
+
xp,
|
100 |
+
*,
|
101 |
+
s: Sequence[int] = None,
|
102 |
+
axes: Sequence[int] = None,
|
103 |
+
norm: Literal["backward", "ortho", "forward"] = "backward",
|
104 |
+
) -> ndarray:
|
105 |
+
res = xp.fft.rfftn(x, s=s, axes=axes, norm=norm)
|
106 |
+
if x.dtype == xp.float32:
|
107 |
+
return res.astype(xp.complex64)
|
108 |
+
return res
|
109 |
+
|
110 |
+
def irfftn(
|
111 |
+
x: ndarray,
|
112 |
+
/,
|
113 |
+
xp,
|
114 |
+
*,
|
115 |
+
s: Sequence[int] = None,
|
116 |
+
axes: Sequence[int] = None,
|
117 |
+
norm: Literal["backward", "ortho", "forward"] = "backward",
|
118 |
+
) -> ndarray:
|
119 |
+
res = xp.fft.irfftn(x, s=s, axes=axes, norm=norm)
|
120 |
+
if x.dtype == xp.complex64:
|
121 |
+
return res.astype(xp.float32)
|
122 |
+
return res
|
123 |
+
|
124 |
+
def hfft(
|
125 |
+
x: ndarray,
|
126 |
+
/,
|
127 |
+
xp,
|
128 |
+
*,
|
129 |
+
n: Optional[int] = None,
|
130 |
+
axis: int = -1,
|
131 |
+
norm: Literal["backward", "ortho", "forward"] = "backward",
|
132 |
+
) -> ndarray:
|
133 |
+
res = xp.fft.hfft(x, n=n, axis=axis, norm=norm)
|
134 |
+
if x.dtype in [xp.float32, xp.complex64]:
|
135 |
+
return res.astype(xp.float32)
|
136 |
+
return res
|
137 |
+
|
138 |
+
def ihfft(
|
139 |
+
x: ndarray,
|
140 |
+
/,
|
141 |
+
xp,
|
142 |
+
*,
|
143 |
+
n: Optional[int] = None,
|
144 |
+
axis: int = -1,
|
145 |
+
norm: Literal["backward", "ortho", "forward"] = "backward",
|
146 |
+
) -> ndarray:
|
147 |
+
res = xp.fft.ihfft(x, n=n, axis=axis, norm=norm)
|
148 |
+
if x.dtype in [xp.float32, xp.complex64]:
|
149 |
+
return res.astype(xp.complex64)
|
150 |
+
return res
|
151 |
+
|
152 |
+
def fftfreq(n: int, /, xp, *, d: float = 1.0, device: Optional[Device] = None) -> ndarray:
|
153 |
+
if device not in ["cpu", None]:
|
154 |
+
raise ValueError(f"Unsupported device {device!r}")
|
155 |
+
return xp.fft.fftfreq(n, d=d)
|
156 |
+
|
157 |
+
def rfftfreq(n: int, /, xp, *, d: float = 1.0, device: Optional[Device] = None) -> ndarray:
|
158 |
+
if device not in ["cpu", None]:
|
159 |
+
raise ValueError(f"Unsupported device {device!r}")
|
160 |
+
return xp.fft.rfftfreq(n, d=d)
|
161 |
+
|
162 |
+
def fftshift(x: ndarray, /, xp, *, axes: Union[int, Sequence[int]] = None) -> ndarray:
|
163 |
+
return xp.fft.fftshift(x, axes=axes)
|
164 |
+
|
165 |
+
def ifftshift(x: ndarray, /, xp, *, axes: Union[int, Sequence[int]] = None) -> ndarray:
|
166 |
+
return xp.fft.ifftshift(x, axes=axes)
|
167 |
+
|
168 |
+
__all__ = [
|
169 |
+
"fft",
|
170 |
+
"ifft",
|
171 |
+
"fftn",
|
172 |
+
"ifftn",
|
173 |
+
"rfft",
|
174 |
+
"irfft",
|
175 |
+
"rfftn",
|
176 |
+
"irfftn",
|
177 |
+
"hfft",
|
178 |
+
"ihfft",
|
179 |
+
"fftfreq",
|
180 |
+
"rfftfreq",
|
181 |
+
"fftshift",
|
182 |
+
"ifftshift",
|
183 |
+
]
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_helpers.py
ADDED
@@ -0,0 +1,515 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Various helper functions which are not part of the spec.
|
3 |
+
|
4 |
+
Functions which start with an underscore are for internal use only but helpers
|
5 |
+
that are in __all__ are intended as additional helper functions for use by end
|
6 |
+
users of the compat library.
|
7 |
+
"""
|
8 |
+
from __future__ import annotations
|
9 |
+
|
10 |
+
from typing import TYPE_CHECKING
|
11 |
+
|
12 |
+
if TYPE_CHECKING:
|
13 |
+
from typing import Optional, Union, Any
|
14 |
+
from ._typing import Array, Device
|
15 |
+
|
16 |
+
import sys
|
17 |
+
import math
|
18 |
+
import inspect
|
19 |
+
import warnings
|
20 |
+
|
21 |
+
def is_numpy_array(x):
|
22 |
+
"""
|
23 |
+
Return True if `x` is a NumPy array.
|
24 |
+
|
25 |
+
This function does not import NumPy if it has not already been imported
|
26 |
+
and is therefore cheap to use.
|
27 |
+
|
28 |
+
This also returns True for `ndarray` subclasses and NumPy scalar objects.
|
29 |
+
|
30 |
+
See Also
|
31 |
+
--------
|
32 |
+
|
33 |
+
array_namespace
|
34 |
+
is_array_api_obj
|
35 |
+
is_cupy_array
|
36 |
+
is_torch_array
|
37 |
+
is_dask_array
|
38 |
+
is_jax_array
|
39 |
+
"""
|
40 |
+
# Avoid importing NumPy if it isn't already
|
41 |
+
if 'numpy' not in sys.modules:
|
42 |
+
return False
|
43 |
+
|
44 |
+
import numpy as np
|
45 |
+
|
46 |
+
# TODO: Should we reject ndarray subclasses?
|
47 |
+
return isinstance(x, (np.ndarray, np.generic))
|
48 |
+
|
49 |
+
def is_cupy_array(x):
|
50 |
+
"""
|
51 |
+
Return True if `x` is a CuPy array.
|
52 |
+
|
53 |
+
This function does not import CuPy if it has not already been imported
|
54 |
+
and is therefore cheap to use.
|
55 |
+
|
56 |
+
This also returns True for `cupy.ndarray` subclasses and CuPy scalar objects.
|
57 |
+
|
58 |
+
See Also
|
59 |
+
--------
|
60 |
+
|
61 |
+
array_namespace
|
62 |
+
is_array_api_obj
|
63 |
+
is_numpy_array
|
64 |
+
is_torch_array
|
65 |
+
is_dask_array
|
66 |
+
is_jax_array
|
67 |
+
"""
|
68 |
+
# Avoid importing NumPy if it isn't already
|
69 |
+
if 'cupy' not in sys.modules:
|
70 |
+
return False
|
71 |
+
|
72 |
+
import cupy as cp
|
73 |
+
|
74 |
+
# TODO: Should we reject ndarray subclasses?
|
75 |
+
return isinstance(x, (cp.ndarray, cp.generic))
|
76 |
+
|
77 |
+
def is_torch_array(x):
|
78 |
+
"""
|
79 |
+
Return True if `x` is a PyTorch tensor.
|
80 |
+
|
81 |
+
This function does not import PyTorch if it has not already been imported
|
82 |
+
and is therefore cheap to use.
|
83 |
+
|
84 |
+
See Also
|
85 |
+
--------
|
86 |
+
|
87 |
+
array_namespace
|
88 |
+
is_array_api_obj
|
89 |
+
is_numpy_array
|
90 |
+
is_cupy_array
|
91 |
+
is_dask_array
|
92 |
+
is_jax_array
|
93 |
+
"""
|
94 |
+
# Avoid importing torch if it isn't already
|
95 |
+
if 'torch' not in sys.modules:
|
96 |
+
return False
|
97 |
+
|
98 |
+
import torch
|
99 |
+
|
100 |
+
# TODO: Should we reject ndarray subclasses?
|
101 |
+
return isinstance(x, torch.Tensor)
|
102 |
+
|
103 |
+
def is_dask_array(x):
|
104 |
+
"""
|
105 |
+
Return True if `x` is a dask.array Array.
|
106 |
+
|
107 |
+
This function does not import dask if it has not already been imported
|
108 |
+
and is therefore cheap to use.
|
109 |
+
|
110 |
+
See Also
|
111 |
+
--------
|
112 |
+
|
113 |
+
array_namespace
|
114 |
+
is_array_api_obj
|
115 |
+
is_numpy_array
|
116 |
+
is_cupy_array
|
117 |
+
is_torch_array
|
118 |
+
is_jax_array
|
119 |
+
"""
|
120 |
+
# Avoid importing dask if it isn't already
|
121 |
+
if 'dask.array' not in sys.modules:
|
122 |
+
return False
|
123 |
+
|
124 |
+
import dask.array
|
125 |
+
|
126 |
+
return isinstance(x, dask.array.Array)
|
127 |
+
|
128 |
+
def is_jax_array(x):
|
129 |
+
"""
|
130 |
+
Return True if `x` is a JAX array.
|
131 |
+
|
132 |
+
This function does not import JAX if it has not already been imported
|
133 |
+
and is therefore cheap to use.
|
134 |
+
|
135 |
+
|
136 |
+
See Also
|
137 |
+
--------
|
138 |
+
|
139 |
+
array_namespace
|
140 |
+
is_array_api_obj
|
141 |
+
is_numpy_array
|
142 |
+
is_cupy_array
|
143 |
+
is_torch_array
|
144 |
+
is_dask_array
|
145 |
+
"""
|
146 |
+
# Avoid importing jax if it isn't already
|
147 |
+
if 'jax' not in sys.modules:
|
148 |
+
return False
|
149 |
+
|
150 |
+
import jax
|
151 |
+
|
152 |
+
return isinstance(x, jax.Array)
|
153 |
+
|
154 |
+
def is_array_api_obj(x):
|
155 |
+
"""
|
156 |
+
Return True if `x` is an array API compatible array object.
|
157 |
+
|
158 |
+
See Also
|
159 |
+
--------
|
160 |
+
|
161 |
+
array_namespace
|
162 |
+
is_numpy_array
|
163 |
+
is_cupy_array
|
164 |
+
is_torch_array
|
165 |
+
is_dask_array
|
166 |
+
is_jax_array
|
167 |
+
"""
|
168 |
+
return is_numpy_array(x) \
|
169 |
+
or is_cupy_array(x) \
|
170 |
+
or is_torch_array(x) \
|
171 |
+
or is_dask_array(x) \
|
172 |
+
or is_jax_array(x) \
|
173 |
+
or hasattr(x, '__array_namespace__')
|
174 |
+
|
175 |
+
def _check_api_version(api_version):
|
176 |
+
if api_version == '2021.12':
|
177 |
+
warnings.warn("The 2021.12 version of the array API specification was requested but the returned namespace is actually version 2022.12")
|
178 |
+
elif api_version is not None and api_version != '2022.12':
|
179 |
+
raise ValueError("Only the 2022.12 version of the array API specification is currently supported")
|
180 |
+
|
181 |
+
def array_namespace(*xs, api_version=None, _use_compat=True):
|
182 |
+
"""
|
183 |
+
Get the array API compatible namespace for the arrays `xs`.
|
184 |
+
|
185 |
+
Parameters
|
186 |
+
----------
|
187 |
+
xs: arrays
|
188 |
+
one or more arrays.
|
189 |
+
|
190 |
+
api_version: str
|
191 |
+
The newest version of the spec that you need support for (currently
|
192 |
+
the compat library wrapped APIs support v2022.12).
|
193 |
+
|
194 |
+
Returns
|
195 |
+
-------
|
196 |
+
|
197 |
+
out: namespace
|
198 |
+
The array API compatible namespace corresponding to the arrays in `xs`.
|
199 |
+
|
200 |
+
Raises
|
201 |
+
------
|
202 |
+
TypeError
|
203 |
+
If `xs` contains arrays from different array libraries or contains a
|
204 |
+
non-array.
|
205 |
+
|
206 |
+
|
207 |
+
Typical usage is to pass the arguments of a function to
|
208 |
+
`array_namespace()` at the top of a function to get the corresponding
|
209 |
+
array API namespace:
|
210 |
+
|
211 |
+
.. code:: python
|
212 |
+
|
213 |
+
def your_function(x, y):
|
214 |
+
xp = array_api_compat.array_namespace(x, y)
|
215 |
+
# Now use xp as the array library namespace
|
216 |
+
return xp.mean(x, axis=0) + 2*xp.std(y, axis=0)
|
217 |
+
|
218 |
+
|
219 |
+
Wrapped array namespaces can also be imported directly. For example,
|
220 |
+
`array_namespace(np.array(...))` will return `array_api_compat.numpy`.
|
221 |
+
This function will also work for any array library not wrapped by
|
222 |
+
array-api-compat if it explicitly defines `__array_namespace__
|
223 |
+
<https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__array_namespace__.html>`__
|
224 |
+
(the wrapped namespace is always preferred if it exists).
|
225 |
+
|
226 |
+
See Also
|
227 |
+
--------
|
228 |
+
|
229 |
+
is_array_api_obj
|
230 |
+
is_numpy_array
|
231 |
+
is_cupy_array
|
232 |
+
is_torch_array
|
233 |
+
is_dask_array
|
234 |
+
is_jax_array
|
235 |
+
|
236 |
+
"""
|
237 |
+
namespaces = set()
|
238 |
+
for x in xs:
|
239 |
+
if is_numpy_array(x):
|
240 |
+
_check_api_version(api_version)
|
241 |
+
if _use_compat:
|
242 |
+
from .. import numpy as numpy_namespace
|
243 |
+
namespaces.add(numpy_namespace)
|
244 |
+
else:
|
245 |
+
import numpy as np
|
246 |
+
namespaces.add(np)
|
247 |
+
elif is_cupy_array(x):
|
248 |
+
_check_api_version(api_version)
|
249 |
+
if _use_compat:
|
250 |
+
from .. import cupy as cupy_namespace
|
251 |
+
namespaces.add(cupy_namespace)
|
252 |
+
else:
|
253 |
+
import cupy as cp
|
254 |
+
namespaces.add(cp)
|
255 |
+
elif is_torch_array(x):
|
256 |
+
_check_api_version(api_version)
|
257 |
+
if _use_compat:
|
258 |
+
from .. import torch as torch_namespace
|
259 |
+
namespaces.add(torch_namespace)
|
260 |
+
else:
|
261 |
+
import torch
|
262 |
+
namespaces.add(torch)
|
263 |
+
elif is_dask_array(x):
|
264 |
+
_check_api_version(api_version)
|
265 |
+
if _use_compat:
|
266 |
+
from ..dask import array as dask_namespace
|
267 |
+
namespaces.add(dask_namespace)
|
268 |
+
else:
|
269 |
+
raise TypeError("_use_compat cannot be False if input array is a dask array!")
|
270 |
+
elif is_jax_array(x):
|
271 |
+
_check_api_version(api_version)
|
272 |
+
# jax.experimental.array_api is already an array namespace. We do
|
273 |
+
# not have a wrapper submodule for it.
|
274 |
+
import jax.experimental.array_api as jnp
|
275 |
+
namespaces.add(jnp)
|
276 |
+
elif hasattr(x, '__array_namespace__'):
|
277 |
+
namespaces.add(x.__array_namespace__(api_version=api_version))
|
278 |
+
else:
|
279 |
+
# TODO: Support Python scalars?
|
280 |
+
raise TypeError(f"{type(x).__name__} is not a supported array type")
|
281 |
+
|
282 |
+
if not namespaces:
|
283 |
+
raise TypeError("Unrecognized array input")
|
284 |
+
|
285 |
+
if len(namespaces) != 1:
|
286 |
+
raise TypeError(f"Multiple namespaces for array inputs: {namespaces}")
|
287 |
+
|
288 |
+
xp, = namespaces
|
289 |
+
|
290 |
+
return xp
|
291 |
+
|
292 |
+
# backwards compatibility alias
|
293 |
+
get_namespace = array_namespace
|
294 |
+
|
295 |
+
def _check_device(xp, device):
|
296 |
+
if xp == sys.modules.get('numpy'):
|
297 |
+
if device not in ["cpu", None]:
|
298 |
+
raise ValueError(f"Unsupported device for NumPy: {device!r}")
|
299 |
+
|
300 |
+
# Placeholder object to represent the dask device
|
301 |
+
# when the array backend is not the CPU.
|
302 |
+
# (since it is not easy to tell which device a dask array is on)
|
303 |
+
class _dask_device:
|
304 |
+
def __repr__(self):
|
305 |
+
return "DASK_DEVICE"
|
306 |
+
|
307 |
+
_DASK_DEVICE = _dask_device()
|
308 |
+
|
309 |
+
# device() is not on numpy.ndarray or dask.array and to_device() is not on numpy.ndarray
|
310 |
+
# or cupy.ndarray. They are not included in array objects of this library
|
311 |
+
# because this library just reuses the respective ndarray classes without
|
312 |
+
# wrapping or subclassing them. These helper functions can be used instead of
|
313 |
+
# the wrapper functions for libraries that need to support both NumPy/CuPy and
|
314 |
+
# other libraries that use devices.
|
315 |
+
def device(x: Array, /) -> Device:
|
316 |
+
"""
|
317 |
+
Hardware device the array data resides on.
|
318 |
+
|
319 |
+
This is equivalent to `x.device` according to the `standard
|
320 |
+
<https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.device.html>`__.
|
321 |
+
This helper is included because some array libraries either do not have
|
322 |
+
the `device` attribute or include it with an incompatible API.
|
323 |
+
|
324 |
+
Parameters
|
325 |
+
----------
|
326 |
+
x: array
|
327 |
+
array instance from an array API compatible library.
|
328 |
+
|
329 |
+
Returns
|
330 |
+
-------
|
331 |
+
out: device
|
332 |
+
a ``device`` object (see the `Device Support <https://data-apis.org/array-api/latest/design_topics/device_support.html>`__
|
333 |
+
section of the array API specification).
|
334 |
+
|
335 |
+
Notes
|
336 |
+
-----
|
337 |
+
|
338 |
+
For NumPy the device is always `"cpu"`. For Dask, the device is always a
|
339 |
+
special `DASK_DEVICE` object.
|
340 |
+
|
341 |
+
See Also
|
342 |
+
--------
|
343 |
+
|
344 |
+
to_device : Move array data to a different device.
|
345 |
+
|
346 |
+
"""
|
347 |
+
if is_numpy_array(x):
|
348 |
+
return "cpu"
|
349 |
+
elif is_dask_array(x):
|
350 |
+
# Peek at the metadata of the jax array to determine type
|
351 |
+
try:
|
352 |
+
import numpy as np
|
353 |
+
if isinstance(x._meta, np.ndarray):
|
354 |
+
# Must be on CPU since backed by numpy
|
355 |
+
return "cpu"
|
356 |
+
except ImportError:
|
357 |
+
pass
|
358 |
+
return _DASK_DEVICE
|
359 |
+
elif is_jax_array(x):
|
360 |
+
# JAX has .device() as a method, but it is being deprecated so that it
|
361 |
+
# can become a property, in accordance with the standard. In order for
|
362 |
+
# this function to not break when JAX makes the flip, we check for
|
363 |
+
# both here.
|
364 |
+
if inspect.ismethod(x.device):
|
365 |
+
return x.device()
|
366 |
+
else:
|
367 |
+
return x.device
|
368 |
+
return x.device
|
369 |
+
|
370 |
+
# Based on cupy.array_api.Array.to_device
|
371 |
+
def _cupy_to_device(x, device, /, stream=None):
|
372 |
+
import cupy as cp
|
373 |
+
from cupy.cuda import Device as _Device
|
374 |
+
from cupy.cuda import stream as stream_module
|
375 |
+
from cupy_backends.cuda.api import runtime
|
376 |
+
|
377 |
+
if device == x.device:
|
378 |
+
return x
|
379 |
+
elif device == "cpu":
|
380 |
+
# allowing us to use `to_device(x, "cpu")`
|
381 |
+
# is useful for portable test swapping between
|
382 |
+
# host and device backends
|
383 |
+
return x.get()
|
384 |
+
elif not isinstance(device, _Device):
|
385 |
+
raise ValueError(f"Unsupported device {device!r}")
|
386 |
+
else:
|
387 |
+
# see cupy/cupy#5985 for the reason how we handle device/stream here
|
388 |
+
prev_device = runtime.getDevice()
|
389 |
+
prev_stream: stream_module.Stream = None
|
390 |
+
if stream is not None:
|
391 |
+
prev_stream = stream_module.get_current_stream()
|
392 |
+
# stream can be an int as specified in __dlpack__, or a CuPy stream
|
393 |
+
if isinstance(stream, int):
|
394 |
+
stream = cp.cuda.ExternalStream(stream)
|
395 |
+
elif isinstance(stream, cp.cuda.Stream):
|
396 |
+
pass
|
397 |
+
else:
|
398 |
+
raise ValueError('the input stream is not recognized')
|
399 |
+
stream.use()
|
400 |
+
try:
|
401 |
+
runtime.setDevice(device.id)
|
402 |
+
arr = x.copy()
|
403 |
+
finally:
|
404 |
+
runtime.setDevice(prev_device)
|
405 |
+
if stream is not None:
|
406 |
+
prev_stream.use()
|
407 |
+
return arr
|
408 |
+
|
409 |
+
def _torch_to_device(x, device, /, stream=None):
|
410 |
+
if stream is not None:
|
411 |
+
raise NotImplementedError
|
412 |
+
return x.to(device)
|
413 |
+
|
414 |
+
def to_device(x: Array, device: Device, /, *, stream: Optional[Union[int, Any]] = None) -> Array:
|
415 |
+
"""
|
416 |
+
Copy the array from the device on which it currently resides to the specified ``device``.
|
417 |
+
|
418 |
+
This is equivalent to `x.to_device(device, stream=stream)` according to
|
419 |
+
the `standard
|
420 |
+
<https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.to_device.html>`__.
|
421 |
+
This helper is included because some array libraries do not have the
|
422 |
+
`to_device` method.
|
423 |
+
|
424 |
+
Parameters
|
425 |
+
----------
|
426 |
+
|
427 |
+
x: array
|
428 |
+
array instance from an array API compatible library.
|
429 |
+
|
430 |
+
device: device
|
431 |
+
a ``device`` object (see the `Device Support <https://data-apis.org/array-api/latest/design_topics/device_support.html>`__
|
432 |
+
section of the array API specification).
|
433 |
+
|
434 |
+
stream: Optional[Union[int, Any]]
|
435 |
+
stream object to use during copy. In addition to the types supported
|
436 |
+
in ``array.__dlpack__``, implementations may choose to support any
|
437 |
+
library-specific stream object with the caveat that any code using
|
438 |
+
such an object would not be portable.
|
439 |
+
|
440 |
+
Returns
|
441 |
+
-------
|
442 |
+
|
443 |
+
out: array
|
444 |
+
an array with the same data and data type as ``x`` and located on the
|
445 |
+
specified ``device``.
|
446 |
+
|
447 |
+
Notes
|
448 |
+
-----
|
449 |
+
|
450 |
+
For NumPy, this function effectively does nothing since the only supported
|
451 |
+
device is the CPU. For CuPy, this method supports CuPy CUDA
|
452 |
+
:external+cupy:class:`Device <cupy.cuda.Device>` and
|
453 |
+
:external+cupy:class:`Stream <cupy.cuda.Stream>` objects. For PyTorch,
|
454 |
+
this is the same as :external+torch:meth:`x.to(device) <torch.Tensor.to>`
|
455 |
+
(the ``stream`` argument is not supported in PyTorch).
|
456 |
+
|
457 |
+
See Also
|
458 |
+
--------
|
459 |
+
|
460 |
+
device : Hardware device the array data resides on.
|
461 |
+
|
462 |
+
"""
|
463 |
+
if is_numpy_array(x):
|
464 |
+
if stream is not None:
|
465 |
+
raise ValueError("The stream argument to to_device() is not supported")
|
466 |
+
if device == 'cpu':
|
467 |
+
return x
|
468 |
+
raise ValueError(f"Unsupported device {device!r}")
|
469 |
+
elif is_cupy_array(x):
|
470 |
+
# cupy does not yet have to_device
|
471 |
+
return _cupy_to_device(x, device, stream=stream)
|
472 |
+
elif is_torch_array(x):
|
473 |
+
return _torch_to_device(x, device, stream=stream)
|
474 |
+
elif is_dask_array(x):
|
475 |
+
if stream is not None:
|
476 |
+
raise ValueError("The stream argument to to_device() is not supported")
|
477 |
+
# TODO: What if our array is on the GPU already?
|
478 |
+
if device == 'cpu':
|
479 |
+
return x
|
480 |
+
raise ValueError(f"Unsupported device {device!r}")
|
481 |
+
elif is_jax_array(x):
|
482 |
+
# This import adds to_device to x
|
483 |
+
import jax.experimental.array_api # noqa: F401
|
484 |
+
return x.to_device(device, stream=stream)
|
485 |
+
return x.to_device(device, stream=stream)
|
486 |
+
|
487 |
+
def size(x):
|
488 |
+
"""
|
489 |
+
Return the total number of elements of x.
|
490 |
+
|
491 |
+
This is equivalent to `x.size` according to the `standard
|
492 |
+
<https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.size.html>`__.
|
493 |
+
This helper is included because PyTorch defines `size` in an
|
494 |
+
:external+torch:meth:`incompatible way <torch.Tensor.size>`.
|
495 |
+
|
496 |
+
"""
|
497 |
+
if None in x.shape:
|
498 |
+
return None
|
499 |
+
return math.prod(x.shape)
|
500 |
+
|
501 |
+
__all__ = [
|
502 |
+
"array_namespace",
|
503 |
+
"device",
|
504 |
+
"get_namespace",
|
505 |
+
"is_array_api_obj",
|
506 |
+
"is_cupy_array",
|
507 |
+
"is_dask_array",
|
508 |
+
"is_jax_array",
|
509 |
+
"is_numpy_array",
|
510 |
+
"is_torch_array",
|
511 |
+
"size",
|
512 |
+
"to_device",
|
513 |
+
]
|
514 |
+
|
515 |
+
_all_ignore = ['sys', 'math', 'inspect', 'warnings']
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_linalg.py
ADDED
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from typing import TYPE_CHECKING, NamedTuple
|
4 |
+
if TYPE_CHECKING:
|
5 |
+
from typing import Literal, Optional, Tuple, Union
|
6 |
+
from ._typing import ndarray
|
7 |
+
|
8 |
+
import math
|
9 |
+
|
10 |
+
import numpy as np
|
11 |
+
if np.__version__[0] == "2":
|
12 |
+
from numpy.lib.array_utils import normalize_axis_tuple
|
13 |
+
else:
|
14 |
+
from numpy.core.numeric import normalize_axis_tuple
|
15 |
+
|
16 |
+
from ._aliases import matmul, matrix_transpose, tensordot, vecdot, isdtype
|
17 |
+
from .._internal import get_xp
|
18 |
+
|
19 |
+
# These are in the main NumPy namespace but not in numpy.linalg
|
20 |
+
def cross(x1: ndarray, x2: ndarray, /, xp, *, axis: int = -1, **kwargs) -> ndarray:
|
21 |
+
return xp.cross(x1, x2, axis=axis, **kwargs)
|
22 |
+
|
23 |
+
def outer(x1: ndarray, x2: ndarray, /, xp, **kwargs) -> ndarray:
|
24 |
+
return xp.outer(x1, x2, **kwargs)
|
25 |
+
|
26 |
+
class EighResult(NamedTuple):
|
27 |
+
eigenvalues: ndarray
|
28 |
+
eigenvectors: ndarray
|
29 |
+
|
30 |
+
class QRResult(NamedTuple):
|
31 |
+
Q: ndarray
|
32 |
+
R: ndarray
|
33 |
+
|
34 |
+
class SlogdetResult(NamedTuple):
|
35 |
+
sign: ndarray
|
36 |
+
logabsdet: ndarray
|
37 |
+
|
38 |
+
class SVDResult(NamedTuple):
|
39 |
+
U: ndarray
|
40 |
+
S: ndarray
|
41 |
+
Vh: ndarray
|
42 |
+
|
43 |
+
# These functions are the same as their NumPy counterparts except they return
|
44 |
+
# a namedtuple.
|
45 |
+
def eigh(x: ndarray, /, xp, **kwargs) -> EighResult:
|
46 |
+
return EighResult(*xp.linalg.eigh(x, **kwargs))
|
47 |
+
|
48 |
+
def qr(x: ndarray, /, xp, *, mode: Literal['reduced', 'complete'] = 'reduced',
|
49 |
+
**kwargs) -> QRResult:
|
50 |
+
return QRResult(*xp.linalg.qr(x, mode=mode, **kwargs))
|
51 |
+
|
52 |
+
def slogdet(x: ndarray, /, xp, **kwargs) -> SlogdetResult:
|
53 |
+
return SlogdetResult(*xp.linalg.slogdet(x, **kwargs))
|
54 |
+
|
55 |
+
def svd(x: ndarray, /, xp, *, full_matrices: bool = True, **kwargs) -> SVDResult:
|
56 |
+
return SVDResult(*xp.linalg.svd(x, full_matrices=full_matrices, **kwargs))
|
57 |
+
|
58 |
+
# These functions have additional keyword arguments
|
59 |
+
|
60 |
+
# The upper keyword argument is new from NumPy
|
61 |
+
def cholesky(x: ndarray, /, xp, *, upper: bool = False, **kwargs) -> ndarray:
|
62 |
+
L = xp.linalg.cholesky(x, **kwargs)
|
63 |
+
if upper:
|
64 |
+
U = get_xp(xp)(matrix_transpose)(L)
|
65 |
+
if get_xp(xp)(isdtype)(U.dtype, 'complex floating'):
|
66 |
+
U = xp.conj(U)
|
67 |
+
return U
|
68 |
+
return L
|
69 |
+
|
70 |
+
# The rtol keyword argument of matrix_rank() and pinv() is new from NumPy.
|
71 |
+
# Note that it has a different semantic meaning from tol and rcond.
|
72 |
+
def matrix_rank(x: ndarray,
|
73 |
+
/,
|
74 |
+
xp,
|
75 |
+
*,
|
76 |
+
rtol: Optional[Union[float, ndarray]] = None,
|
77 |
+
**kwargs) -> ndarray:
|
78 |
+
# this is different from xp.linalg.matrix_rank, which supports 1
|
79 |
+
# dimensional arrays.
|
80 |
+
if x.ndim < 2:
|
81 |
+
raise xp.linalg.LinAlgError("1-dimensional array given. Array must be at least two-dimensional")
|
82 |
+
S = get_xp(xp)(svdvals)(x, **kwargs)
|
83 |
+
if rtol is None:
|
84 |
+
tol = S.max(axis=-1, keepdims=True) * max(x.shape[-2:]) * xp.finfo(S.dtype).eps
|
85 |
+
else:
|
86 |
+
# this is different from xp.linalg.matrix_rank, which does not
|
87 |
+
# multiply the tolerance by the largest singular value.
|
88 |
+
tol = S.max(axis=-1, keepdims=True)*xp.asarray(rtol)[..., xp.newaxis]
|
89 |
+
return xp.count_nonzero(S > tol, axis=-1)
|
90 |
+
|
91 |
+
def pinv(x: ndarray, /, xp, *, rtol: Optional[Union[float, ndarray]] = None, **kwargs) -> ndarray:
|
92 |
+
# this is different from xp.linalg.pinv, which does not multiply the
|
93 |
+
# default tolerance by max(M, N).
|
94 |
+
if rtol is None:
|
95 |
+
rtol = max(x.shape[-2:]) * xp.finfo(x.dtype).eps
|
96 |
+
return xp.linalg.pinv(x, rcond=rtol, **kwargs)
|
97 |
+
|
98 |
+
# These functions are new in the array API spec
|
99 |
+
|
100 |
+
def matrix_norm(x: ndarray, /, xp, *, keepdims: bool = False, ord: Optional[Union[int, float, Literal['fro', 'nuc']]] = 'fro') -> ndarray:
|
101 |
+
return xp.linalg.norm(x, axis=(-2, -1), keepdims=keepdims, ord=ord)
|
102 |
+
|
103 |
+
# svdvals is not in NumPy (but it is in SciPy). It is equivalent to
|
104 |
+
# xp.linalg.svd(compute_uv=False).
|
105 |
+
def svdvals(x: ndarray, /, xp) -> Union[ndarray, Tuple[ndarray, ...]]:
|
106 |
+
return xp.linalg.svd(x, compute_uv=False)
|
107 |
+
|
108 |
+
def vector_norm(x: ndarray, /, xp, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False, ord: Optional[Union[int, float]] = 2) -> ndarray:
|
109 |
+
# xp.linalg.norm tries to do a matrix norm whenever axis is a 2-tuple or
|
110 |
+
# when axis=None and the input is 2-D, so to force a vector norm, we make
|
111 |
+
# it so the input is 1-D (for axis=None), or reshape so that norm is done
|
112 |
+
# on a single dimension.
|
113 |
+
if axis is None:
|
114 |
+
# Note: xp.linalg.norm() doesn't handle 0-D arrays
|
115 |
+
_x = x.ravel()
|
116 |
+
_axis = 0
|
117 |
+
elif isinstance(axis, tuple):
|
118 |
+
# Note: The axis argument supports any number of axes, whereas
|
119 |
+
# xp.linalg.norm() only supports a single axis for vector norm.
|
120 |
+
normalized_axis = normalize_axis_tuple(axis, x.ndim)
|
121 |
+
rest = tuple(i for i in range(x.ndim) if i not in normalized_axis)
|
122 |
+
newshape = axis + rest
|
123 |
+
_x = xp.transpose(x, newshape).reshape(
|
124 |
+
(math.prod([x.shape[i] for i in axis]), *[x.shape[i] for i in rest]))
|
125 |
+
_axis = 0
|
126 |
+
else:
|
127 |
+
_x = x
|
128 |
+
_axis = axis
|
129 |
+
|
130 |
+
res = xp.linalg.norm(_x, axis=_axis, ord=ord)
|
131 |
+
|
132 |
+
if keepdims:
|
133 |
+
# We can't reuse xp.linalg.norm(keepdims) because of the reshape hacks
|
134 |
+
# above to avoid matrix norm logic.
|
135 |
+
shape = list(x.shape)
|
136 |
+
_axis = normalize_axis_tuple(range(x.ndim) if axis is None else axis, x.ndim)
|
137 |
+
for i in _axis:
|
138 |
+
shape[i] = 1
|
139 |
+
res = xp.reshape(res, tuple(shape))
|
140 |
+
|
141 |
+
return res
|
142 |
+
|
143 |
+
# xp.diagonal and xp.trace operate on the first two axes whereas these
|
144 |
+
# operates on the last two
|
145 |
+
|
146 |
+
def diagonal(x: ndarray, /, xp, *, offset: int = 0, **kwargs) -> ndarray:
|
147 |
+
return xp.diagonal(x, offset=offset, axis1=-2, axis2=-1, **kwargs)
|
148 |
+
|
149 |
+
def trace(x: ndarray, /, xp, *, offset: int = 0, dtype=None, **kwargs) -> ndarray:
|
150 |
+
if dtype is None:
|
151 |
+
if x.dtype == xp.float32:
|
152 |
+
dtype = xp.float64
|
153 |
+
elif x.dtype == xp.complex64:
|
154 |
+
dtype = xp.complex128
|
155 |
+
return xp.asarray(xp.trace(x, offset=offset, dtype=dtype, axis1=-2, axis2=-1, **kwargs))
|
156 |
+
|
157 |
+
__all__ = ['cross', 'matmul', 'outer', 'tensordot', 'EighResult',
|
158 |
+
'QRResult', 'SlogdetResult', 'SVDResult', 'eigh', 'qr', 'slogdet',
|
159 |
+
'svd', 'cholesky', 'matrix_rank', 'pinv', 'matrix_norm',
|
160 |
+
'matrix_transpose', 'svdvals', 'vecdot', 'vector_norm', 'diagonal',
|
161 |
+
'trace']
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_typing.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
__all__ = [
|
4 |
+
"NestedSequence",
|
5 |
+
"SupportsBufferProtocol",
|
6 |
+
]
|
7 |
+
|
8 |
+
from typing import (
|
9 |
+
Any,
|
10 |
+
TypeVar,
|
11 |
+
Protocol,
|
12 |
+
)
|
13 |
+
|
14 |
+
_T_co = TypeVar("_T_co", covariant=True)
|
15 |
+
|
16 |
+
class NestedSequence(Protocol[_T_co]):
|
17 |
+
def __getitem__(self, key: int, /) -> _T_co | NestedSequence[_T_co]: ...
|
18 |
+
def __len__(self, /) -> int: ...
|
19 |
+
|
20 |
+
SupportsBufferProtocol = Any
|
21 |
+
|
22 |
+
Array = Any
|
23 |
+
Device = Any
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__init__.py
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from cupy import * # noqa: F403
|
2 |
+
|
3 |
+
# from cupy import * doesn't overwrite these builtin names
|
4 |
+
from cupy import abs, max, min, round # noqa: F401
|
5 |
+
|
6 |
+
# These imports may overwrite names from the import * above.
|
7 |
+
from ._aliases import * # noqa: F403
|
8 |
+
|
9 |
+
# See the comment in the numpy __init__.py
|
10 |
+
__import__(__package__ + '.linalg')
|
11 |
+
|
12 |
+
__import__(__package__ + '.fft')
|
13 |
+
|
14 |
+
from ..common._helpers import * # noqa: F401,F403
|
15 |
+
|
16 |
+
__array_api_version__ = '2022.12'
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (484 Bytes). View file
|
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/_aliases.cpython-310.pyc
ADDED
Binary file (1.99 kB). View file
|
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/_typing.cpython-310.pyc
ADDED
Binary file (768 Bytes). View file
|
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/fft.cpython-310.pyc
ADDED
Binary file (819 Bytes). View file
|
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/linalg.cpython-310.pyc
ADDED
Binary file (1.08 kB). View file
|
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/_aliases.py
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from functools import partial
|
4 |
+
|
5 |
+
import cupy as cp
|
6 |
+
|
7 |
+
from ..common import _aliases
|
8 |
+
from .._internal import get_xp
|
9 |
+
|
10 |
+
asarray = asarray_cupy = partial(_aliases._asarray, namespace='cupy')
|
11 |
+
asarray.__doc__ = _aliases._asarray.__doc__
|
12 |
+
del partial
|
13 |
+
|
14 |
+
bool = cp.bool_
|
15 |
+
|
16 |
+
# Basic renames
|
17 |
+
acos = cp.arccos
|
18 |
+
acosh = cp.arccosh
|
19 |
+
asin = cp.arcsin
|
20 |
+
asinh = cp.arcsinh
|
21 |
+
atan = cp.arctan
|
22 |
+
atan2 = cp.arctan2
|
23 |
+
atanh = cp.arctanh
|
24 |
+
bitwise_left_shift = cp.left_shift
|
25 |
+
bitwise_invert = cp.invert
|
26 |
+
bitwise_right_shift = cp.right_shift
|
27 |
+
concat = cp.concatenate
|
28 |
+
pow = cp.power
|
29 |
+
|
30 |
+
arange = get_xp(cp)(_aliases.arange)
|
31 |
+
empty = get_xp(cp)(_aliases.empty)
|
32 |
+
empty_like = get_xp(cp)(_aliases.empty_like)
|
33 |
+
eye = get_xp(cp)(_aliases.eye)
|
34 |
+
full = get_xp(cp)(_aliases.full)
|
35 |
+
full_like = get_xp(cp)(_aliases.full_like)
|
36 |
+
linspace = get_xp(cp)(_aliases.linspace)
|
37 |
+
ones = get_xp(cp)(_aliases.ones)
|
38 |
+
ones_like = get_xp(cp)(_aliases.ones_like)
|
39 |
+
zeros = get_xp(cp)(_aliases.zeros)
|
40 |
+
zeros_like = get_xp(cp)(_aliases.zeros_like)
|
41 |
+
UniqueAllResult = get_xp(cp)(_aliases.UniqueAllResult)
|
42 |
+
UniqueCountsResult = get_xp(cp)(_aliases.UniqueCountsResult)
|
43 |
+
UniqueInverseResult = get_xp(cp)(_aliases.UniqueInverseResult)
|
44 |
+
unique_all = get_xp(cp)(_aliases.unique_all)
|
45 |
+
unique_counts = get_xp(cp)(_aliases.unique_counts)
|
46 |
+
unique_inverse = get_xp(cp)(_aliases.unique_inverse)
|
47 |
+
unique_values = get_xp(cp)(_aliases.unique_values)
|
48 |
+
astype = _aliases.astype
|
49 |
+
std = get_xp(cp)(_aliases.std)
|
50 |
+
var = get_xp(cp)(_aliases.var)
|
51 |
+
permute_dims = get_xp(cp)(_aliases.permute_dims)
|
52 |
+
reshape = get_xp(cp)(_aliases.reshape)
|
53 |
+
argsort = get_xp(cp)(_aliases.argsort)
|
54 |
+
sort = get_xp(cp)(_aliases.sort)
|
55 |
+
nonzero = get_xp(cp)(_aliases.nonzero)
|
56 |
+
sum = get_xp(cp)(_aliases.sum)
|
57 |
+
prod = get_xp(cp)(_aliases.prod)
|
58 |
+
ceil = get_xp(cp)(_aliases.ceil)
|
59 |
+
floor = get_xp(cp)(_aliases.floor)
|
60 |
+
trunc = get_xp(cp)(_aliases.trunc)
|
61 |
+
matmul = get_xp(cp)(_aliases.matmul)
|
62 |
+
matrix_transpose = get_xp(cp)(_aliases.matrix_transpose)
|
63 |
+
tensordot = get_xp(cp)(_aliases.tensordot)
|
64 |
+
|
65 |
+
# These functions are completely new here. If the library already has them
|
66 |
+
# (i.e., numpy 2.0), use the library version instead of our wrapper.
|
67 |
+
if hasattr(cp, 'vecdot'):
|
68 |
+
vecdot = cp.vecdot
|
69 |
+
else:
|
70 |
+
vecdot = get_xp(cp)(_aliases.vecdot)
|
71 |
+
if hasattr(cp, 'isdtype'):
|
72 |
+
isdtype = cp.isdtype
|
73 |
+
else:
|
74 |
+
isdtype = get_xp(cp)(_aliases.isdtype)
|
75 |
+
|
76 |
+
__all__ = _aliases.__all__ + ['asarray', 'asarray_cupy', 'bool', 'acos',
|
77 |
+
'acosh', 'asin', 'asinh', 'atan', 'atan2',
|
78 |
+
'atanh', 'bitwise_left_shift', 'bitwise_invert',
|
79 |
+
'bitwise_right_shift', 'concat', 'pow']
|
80 |
+
|
81 |
+
_all_ignore = ['cp', 'get_xp']
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/_typing.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
__all__ = [
|
4 |
+
"ndarray",
|
5 |
+
"Device",
|
6 |
+
"Dtype",
|
7 |
+
]
|
8 |
+
|
9 |
+
import sys
|
10 |
+
from typing import (
|
11 |
+
Union,
|
12 |
+
TYPE_CHECKING,
|
13 |
+
)
|
14 |
+
|
15 |
+
from cupy import (
|
16 |
+
ndarray,
|
17 |
+
dtype,
|
18 |
+
int8,
|
19 |
+
int16,
|
20 |
+
int32,
|
21 |
+
int64,
|
22 |
+
uint8,
|
23 |
+
uint16,
|
24 |
+
uint32,
|
25 |
+
uint64,
|
26 |
+
float32,
|
27 |
+
float64,
|
28 |
+
)
|
29 |
+
|
30 |
+
from cupy.cuda.device import Device
|
31 |
+
|
32 |
+
if TYPE_CHECKING or sys.version_info >= (3, 9):
|
33 |
+
Dtype = dtype[Union[
|
34 |
+
int8,
|
35 |
+
int16,
|
36 |
+
int32,
|
37 |
+
int64,
|
38 |
+
uint8,
|
39 |
+
uint16,
|
40 |
+
uint32,
|
41 |
+
uint64,
|
42 |
+
float32,
|
43 |
+
float64,
|
44 |
+
]]
|
45 |
+
else:
|
46 |
+
Dtype = dtype
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/fft.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from cupy.fft import * # noqa: F403
|
2 |
+
# cupy.fft doesn't have __all__. If it is added, replace this with
|
3 |
+
#
|
4 |
+
# from cupy.fft import __all__ as linalg_all
|
5 |
+
_n = {}
|
6 |
+
exec('from cupy.fft import *', _n)
|
7 |
+
del _n['__builtins__']
|
8 |
+
fft_all = list(_n)
|
9 |
+
del _n
|
10 |
+
|
11 |
+
from ..common import _fft
|
12 |
+
from .._internal import get_xp
|
13 |
+
|
14 |
+
import cupy as cp
|
15 |
+
|
16 |
+
fft = get_xp(cp)(_fft.fft)
|
17 |
+
ifft = get_xp(cp)(_fft.ifft)
|
18 |
+
fftn = get_xp(cp)(_fft.fftn)
|
19 |
+
ifftn = get_xp(cp)(_fft.ifftn)
|
20 |
+
rfft = get_xp(cp)(_fft.rfft)
|
21 |
+
irfft = get_xp(cp)(_fft.irfft)
|
22 |
+
rfftn = get_xp(cp)(_fft.rfftn)
|
23 |
+
irfftn = get_xp(cp)(_fft.irfftn)
|
24 |
+
hfft = get_xp(cp)(_fft.hfft)
|
25 |
+
ihfft = get_xp(cp)(_fft.ihfft)
|
26 |
+
fftfreq = get_xp(cp)(_fft.fftfreq)
|
27 |
+
rfftfreq = get_xp(cp)(_fft.rfftfreq)
|
28 |
+
fftshift = get_xp(cp)(_fft.fftshift)
|
29 |
+
ifftshift = get_xp(cp)(_fft.ifftshift)
|
30 |
+
|
31 |
+
__all__ = fft_all + _fft.__all__
|
32 |
+
|
33 |
+
del get_xp
|
34 |
+
del cp
|
35 |
+
del fft_all
|
36 |
+
del _fft
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/linalg.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from cupy.linalg import * # noqa: F403
|
2 |
+
# cupy.linalg doesn't have __all__. If it is added, replace this with
|
3 |
+
#
|
4 |
+
# from cupy.linalg import __all__ as linalg_all
|
5 |
+
_n = {}
|
6 |
+
exec('from cupy.linalg import *', _n)
|
7 |
+
del _n['__builtins__']
|
8 |
+
linalg_all = list(_n)
|
9 |
+
del _n
|
10 |
+
|
11 |
+
from ..common import _linalg
|
12 |
+
from .._internal import get_xp
|
13 |
+
|
14 |
+
import cupy as cp
|
15 |
+
|
16 |
+
# These functions are in both the main and linalg namespaces
|
17 |
+
from ._aliases import matmul, matrix_transpose, tensordot, vecdot # noqa: F401
|
18 |
+
|
19 |
+
cross = get_xp(cp)(_linalg.cross)
|
20 |
+
outer = get_xp(cp)(_linalg.outer)
|
21 |
+
EighResult = _linalg.EighResult
|
22 |
+
QRResult = _linalg.QRResult
|
23 |
+
SlogdetResult = _linalg.SlogdetResult
|
24 |
+
SVDResult = _linalg.SVDResult
|
25 |
+
eigh = get_xp(cp)(_linalg.eigh)
|
26 |
+
qr = get_xp(cp)(_linalg.qr)
|
27 |
+
slogdet = get_xp(cp)(_linalg.slogdet)
|
28 |
+
svd = get_xp(cp)(_linalg.svd)
|
29 |
+
cholesky = get_xp(cp)(_linalg.cholesky)
|
30 |
+
matrix_rank = get_xp(cp)(_linalg.matrix_rank)
|
31 |
+
pinv = get_xp(cp)(_linalg.pinv)
|
32 |
+
matrix_norm = get_xp(cp)(_linalg.matrix_norm)
|
33 |
+
svdvals = get_xp(cp)(_linalg.svdvals)
|
34 |
+
diagonal = get_xp(cp)(_linalg.diagonal)
|
35 |
+
trace = get_xp(cp)(_linalg.trace)
|
36 |
+
|
37 |
+
# These functions are completely new here. If the library already has them
|
38 |
+
# (i.e., numpy 2.0), use the library version instead of our wrapper.
|
39 |
+
if hasattr(cp.linalg, 'vector_norm'):
|
40 |
+
vector_norm = cp.linalg.vector_norm
|
41 |
+
else:
|
42 |
+
vector_norm = get_xp(cp)(_linalg.vector_norm)
|
43 |
+
|
44 |
+
__all__ = linalg_all + _linalg.__all__
|
45 |
+
|
46 |
+
del get_xp
|
47 |
+
del cp
|
48 |
+
del linalg_all
|
49 |
+
del _linalg
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/dask/__init__.py
ADDED
File without changes
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/dask/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (230 Bytes). View file
|
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/dask/array/__init__.py
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dask.array import * # noqa: F403
|
2 |
+
|
3 |
+
# These imports may overwrite names from the import * above.
|
4 |
+
from ._aliases import * # noqa: F403
|
5 |
+
|
6 |
+
__array_api_version__ = '2022.12'
|
7 |
+
|
8 |
+
__import__(__package__ + '.linalg')
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/dask/array/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (374 Bytes). View file
|
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/dask/array/__pycache__/_aliases.cpython-310.pyc
ADDED
Binary file (3.36 kB). View file
|
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/dask/array/__pycache__/linalg.cpython-310.pyc
ADDED
Binary file (2.02 kB). View file
|
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/dask/array/_aliases.py
ADDED
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from ...common import _aliases
|
4 |
+
from ...common._helpers import _check_device
|
5 |
+
|
6 |
+
from ..._internal import get_xp
|
7 |
+
|
8 |
+
import numpy as np
|
9 |
+
from numpy import (
|
10 |
+
# Constants
|
11 |
+
e,
|
12 |
+
inf,
|
13 |
+
nan,
|
14 |
+
pi,
|
15 |
+
newaxis,
|
16 |
+
# Dtypes
|
17 |
+
bool_ as bool,
|
18 |
+
float32,
|
19 |
+
float64,
|
20 |
+
int8,
|
21 |
+
int16,
|
22 |
+
int32,
|
23 |
+
int64,
|
24 |
+
uint8,
|
25 |
+
uint16,
|
26 |
+
uint32,
|
27 |
+
uint64,
|
28 |
+
complex64,
|
29 |
+
complex128,
|
30 |
+
iinfo,
|
31 |
+
finfo,
|
32 |
+
can_cast,
|
33 |
+
result_type,
|
34 |
+
)
|
35 |
+
|
36 |
+
from typing import TYPE_CHECKING
|
37 |
+
if TYPE_CHECKING:
|
38 |
+
from typing import Optional, Union
|
39 |
+
|
40 |
+
from ...common._typing import Device, Dtype, Array
|
41 |
+
|
42 |
+
import dask.array as da
|
43 |
+
|
44 |
+
isdtype = get_xp(np)(_aliases.isdtype)
|
45 |
+
astype = _aliases.astype
|
46 |
+
|
47 |
+
# Common aliases
|
48 |
+
|
49 |
+
# This arange func is modified from the common one to
|
50 |
+
# not pass stop/step as keyword arguments, which will cause
|
51 |
+
# an error with dask
|
52 |
+
|
53 |
+
# TODO: delete the xp stuff, it shouldn't be necessary
|
54 |
+
def _dask_arange(
|
55 |
+
start: Union[int, float],
|
56 |
+
/,
|
57 |
+
stop: Optional[Union[int, float]] = None,
|
58 |
+
step: Union[int, float] = 1,
|
59 |
+
*,
|
60 |
+
xp,
|
61 |
+
dtype: Optional[Dtype] = None,
|
62 |
+
device: Optional[Device] = None,
|
63 |
+
**kwargs,
|
64 |
+
) -> Array:
|
65 |
+
_check_device(xp, device)
|
66 |
+
args = [start]
|
67 |
+
if stop is not None:
|
68 |
+
args.append(stop)
|
69 |
+
else:
|
70 |
+
# stop is None, so start is actually stop
|
71 |
+
# prepend the default value for start which is 0
|
72 |
+
args.insert(0, 0)
|
73 |
+
args.append(step)
|
74 |
+
return xp.arange(*args, dtype=dtype, **kwargs)
|
75 |
+
|
76 |
+
arange = get_xp(da)(_dask_arange)
|
77 |
+
eye = get_xp(da)(_aliases.eye)
|
78 |
+
|
79 |
+
from functools import partial
|
80 |
+
asarray = partial(_aliases._asarray, namespace='dask.array')
|
81 |
+
asarray.__doc__ = _aliases._asarray.__doc__
|
82 |
+
|
83 |
+
linspace = get_xp(da)(_aliases.linspace)
|
84 |
+
eye = get_xp(da)(_aliases.eye)
|
85 |
+
UniqueAllResult = get_xp(da)(_aliases.UniqueAllResult)
|
86 |
+
UniqueCountsResult = get_xp(da)(_aliases.UniqueCountsResult)
|
87 |
+
UniqueInverseResult = get_xp(da)(_aliases.UniqueInverseResult)
|
88 |
+
unique_all = get_xp(da)(_aliases.unique_all)
|
89 |
+
unique_counts = get_xp(da)(_aliases.unique_counts)
|
90 |
+
unique_inverse = get_xp(da)(_aliases.unique_inverse)
|
91 |
+
unique_values = get_xp(da)(_aliases.unique_values)
|
92 |
+
permute_dims = get_xp(da)(_aliases.permute_dims)
|
93 |
+
std = get_xp(da)(_aliases.std)
|
94 |
+
var = get_xp(da)(_aliases.var)
|
95 |
+
empty = get_xp(da)(_aliases.empty)
|
96 |
+
empty_like = get_xp(da)(_aliases.empty_like)
|
97 |
+
full = get_xp(da)(_aliases.full)
|
98 |
+
full_like = get_xp(da)(_aliases.full_like)
|
99 |
+
ones = get_xp(da)(_aliases.ones)
|
100 |
+
ones_like = get_xp(da)(_aliases.ones_like)
|
101 |
+
zeros = get_xp(da)(_aliases.zeros)
|
102 |
+
zeros_like = get_xp(da)(_aliases.zeros_like)
|
103 |
+
reshape = get_xp(da)(_aliases.reshape)
|
104 |
+
matrix_transpose = get_xp(da)(_aliases.matrix_transpose)
|
105 |
+
vecdot = get_xp(da)(_aliases.vecdot)
|
106 |
+
|
107 |
+
nonzero = get_xp(da)(_aliases.nonzero)
|
108 |
+
sum = get_xp(np)(_aliases.sum)
|
109 |
+
prod = get_xp(np)(_aliases.prod)
|
110 |
+
ceil = get_xp(np)(_aliases.ceil)
|
111 |
+
floor = get_xp(np)(_aliases.floor)
|
112 |
+
trunc = get_xp(np)(_aliases.trunc)
|
113 |
+
matmul = get_xp(np)(_aliases.matmul)
|
114 |
+
tensordot = get_xp(np)(_aliases.tensordot)
|
115 |
+
|
116 |
+
from dask.array import (
|
117 |
+
# Element wise aliases
|
118 |
+
arccos as acos,
|
119 |
+
arccosh as acosh,
|
120 |
+
arcsin as asin,
|
121 |
+
arcsinh as asinh,
|
122 |
+
arctan as atan,
|
123 |
+
arctan2 as atan2,
|
124 |
+
arctanh as atanh,
|
125 |
+
left_shift as bitwise_left_shift,
|
126 |
+
right_shift as bitwise_right_shift,
|
127 |
+
invert as bitwise_invert,
|
128 |
+
power as pow,
|
129 |
+
# Other
|
130 |
+
concatenate as concat,
|
131 |
+
)
|
132 |
+
|
133 |
+
# exclude these from all since
|
134 |
+
_da_unsupported = ['sort', 'argsort']
|
135 |
+
|
136 |
+
common_aliases = [alias for alias in _aliases.__all__ if alias not in _da_unsupported]
|
137 |
+
|
138 |
+
__all__ = common_aliases + ['asarray', 'bool', 'acos',
|
139 |
+
'acosh', 'asin', 'asinh', 'atan', 'atan2',
|
140 |
+
'atanh', 'bitwise_left_shift', 'bitwise_invert',
|
141 |
+
'bitwise_right_shift', 'concat', 'pow',
|
142 |
+
'e', 'inf', 'nan', 'pi', 'newaxis', 'float32', 'float64', 'int8',
|
143 |
+
'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64',
|
144 |
+
'complex64', 'complex128', 'iinfo', 'finfo', 'can_cast', 'result_type']
|
145 |
+
|
146 |
+
_all_ignore = ['get_xp', 'da', 'partial', 'common_aliases', 'np']
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/dask/array/linalg.py
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from ...common import _linalg
|
4 |
+
from ..._internal import get_xp
|
5 |
+
|
6 |
+
# Exports
|
7 |
+
from dask.array.linalg import * # noqa: F403
|
8 |
+
from dask.array import trace, outer
|
9 |
+
|
10 |
+
# These functions are in both the main and linalg namespaces
|
11 |
+
from dask.array import matmul, tensordot
|
12 |
+
from ._aliases import matrix_transpose, vecdot
|
13 |
+
|
14 |
+
import dask.array as da
|
15 |
+
|
16 |
+
from typing import TYPE_CHECKING
|
17 |
+
if TYPE_CHECKING:
|
18 |
+
from ...common._typing import Array
|
19 |
+
from typing import Literal
|
20 |
+
|
21 |
+
# dask.array.linalg doesn't have __all__. If it is added, replace this with
|
22 |
+
#
|
23 |
+
# from dask.array.linalg import __all__ as linalg_all
|
24 |
+
_n = {}
|
25 |
+
exec('from dask.array.linalg import *', _n)
|
26 |
+
del _n['__builtins__']
|
27 |
+
if 'annotations' in _n:
|
28 |
+
del _n['annotations']
|
29 |
+
linalg_all = list(_n)
|
30 |
+
del _n
|
31 |
+
|
32 |
+
EighResult = _linalg.EighResult
|
33 |
+
QRResult = _linalg.QRResult
|
34 |
+
SlogdetResult = _linalg.SlogdetResult
|
35 |
+
SVDResult = _linalg.SVDResult
|
36 |
+
# TODO: use the QR wrapper once dask
|
37 |
+
# supports the mode keyword on QR
|
38 |
+
# https://github.com/dask/dask/issues/10388
|
39 |
+
#qr = get_xp(da)(_linalg.qr)
|
40 |
+
def qr(x: Array, mode: Literal['reduced', 'complete'] = 'reduced',
|
41 |
+
**kwargs) -> QRResult:
|
42 |
+
if mode != "reduced":
|
43 |
+
raise ValueError("dask arrays only support using mode='reduced'")
|
44 |
+
return QRResult(*da.linalg.qr(x, **kwargs))
|
45 |
+
cholesky = get_xp(da)(_linalg.cholesky)
|
46 |
+
matrix_rank = get_xp(da)(_linalg.matrix_rank)
|
47 |
+
matrix_norm = get_xp(da)(_linalg.matrix_norm)
|
48 |
+
|
49 |
+
|
50 |
+
# Wrap the svd functions to not pass full_matrices to dask
|
51 |
+
# when full_matrices=False (as that is the default behavior for dask),
|
52 |
+
# and dask doesn't have the full_matrices keyword
|
53 |
+
def svd(x: Array, full_matrices: bool = True, **kwargs) -> SVDResult:
|
54 |
+
if full_matrices:
|
55 |
+
raise ValueError("full_matrics=True is not supported by dask.")
|
56 |
+
return da.linalg.svd(x, coerce_signs=False, **kwargs)
|
57 |
+
|
58 |
+
def svdvals(x: Array) -> Array:
|
59 |
+
# TODO: can't avoid computing U or V for dask
|
60 |
+
_, s, _ = svd(x)
|
61 |
+
return s
|
62 |
+
|
63 |
+
vector_norm = get_xp(da)(_linalg.vector_norm)
|
64 |
+
diagonal = get_xp(da)(_linalg.diagonal)
|
65 |
+
|
66 |
+
__all__ = linalg_all + ["trace", "outer", "matmul", "tensordot",
|
67 |
+
"matrix_transpose", "vecdot", "EighResult",
|
68 |
+
"QRResult", "SlogdetResult", "SVDResult", "qr",
|
69 |
+
"cholesky", "matrix_rank", "matrix_norm", "svdvals",
|
70 |
+
"vector_norm", "diagonal"]
|
71 |
+
|
72 |
+
_all_ignore = ['get_xp', 'da', 'linalg_all']
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__init__.py
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from numpy import * # noqa: F403
|
2 |
+
|
3 |
+
# from numpy import * doesn't overwrite these builtin names
|
4 |
+
from numpy import abs, max, min, round # noqa: F401
|
5 |
+
|
6 |
+
# These imports may overwrite names from the import * above.
|
7 |
+
from ._aliases import * # noqa: F403
|
8 |
+
|
9 |
+
# Don't know why, but we have to do an absolute import to import linalg. If we
|
10 |
+
# instead do
|
11 |
+
#
|
12 |
+
# from . import linalg
|
13 |
+
#
|
14 |
+
# It doesn't overwrite np.linalg from above. The import is generated
|
15 |
+
# dynamically so that the library can be vendored.
|
16 |
+
__import__(__package__ + '.linalg')
|
17 |
+
|
18 |
+
__import__(__package__ + '.fft')
|
19 |
+
|
20 |
+
from .linalg import matrix_transpose, vecdot # noqa: F401
|
21 |
+
|
22 |
+
from ..common._helpers import * # noqa: F403
|
23 |
+
|
24 |
+
__array_api_version__ = '2022.12'
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (550 Bytes). View file
|
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/_aliases.cpython-310.pyc
ADDED
Binary file (2 kB). View file
|
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/_typing.cpython-310.pyc
ADDED
Binary file (764 Bytes). View file
|
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/fft.cpython-310.pyc
ADDED
Binary file (749 Bytes). View file
|
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/linalg.cpython-310.pyc
ADDED
Binary file (1.94 kB). View file
|
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/_aliases.py
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from functools import partial
|
4 |
+
|
5 |
+
from ..common import _aliases
|
6 |
+
|
7 |
+
from .._internal import get_xp
|
8 |
+
|
9 |
+
asarray = asarray_numpy = partial(_aliases._asarray, namespace='numpy')
|
10 |
+
asarray.__doc__ = _aliases._asarray.__doc__
|
11 |
+
del partial
|
12 |
+
|
13 |
+
import numpy as np
|
14 |
+
bool = np.bool_
|
15 |
+
|
16 |
+
# Basic renames
|
17 |
+
acos = np.arccos
|
18 |
+
acosh = np.arccosh
|
19 |
+
asin = np.arcsin
|
20 |
+
asinh = np.arcsinh
|
21 |
+
atan = np.arctan
|
22 |
+
atan2 = np.arctan2
|
23 |
+
atanh = np.arctanh
|
24 |
+
bitwise_left_shift = np.left_shift
|
25 |
+
bitwise_invert = np.invert
|
26 |
+
bitwise_right_shift = np.right_shift
|
27 |
+
concat = np.concatenate
|
28 |
+
pow = np.power
|
29 |
+
|
30 |
+
arange = get_xp(np)(_aliases.arange)
|
31 |
+
empty = get_xp(np)(_aliases.empty)
|
32 |
+
empty_like = get_xp(np)(_aliases.empty_like)
|
33 |
+
eye = get_xp(np)(_aliases.eye)
|
34 |
+
full = get_xp(np)(_aliases.full)
|
35 |
+
full_like = get_xp(np)(_aliases.full_like)
|
36 |
+
linspace = get_xp(np)(_aliases.linspace)
|
37 |
+
ones = get_xp(np)(_aliases.ones)
|
38 |
+
ones_like = get_xp(np)(_aliases.ones_like)
|
39 |
+
zeros = get_xp(np)(_aliases.zeros)
|
40 |
+
zeros_like = get_xp(np)(_aliases.zeros_like)
|
41 |
+
UniqueAllResult = get_xp(np)(_aliases.UniqueAllResult)
|
42 |
+
UniqueCountsResult = get_xp(np)(_aliases.UniqueCountsResult)
|
43 |
+
UniqueInverseResult = get_xp(np)(_aliases.UniqueInverseResult)
|
44 |
+
unique_all = get_xp(np)(_aliases.unique_all)
|
45 |
+
unique_counts = get_xp(np)(_aliases.unique_counts)
|
46 |
+
unique_inverse = get_xp(np)(_aliases.unique_inverse)
|
47 |
+
unique_values = get_xp(np)(_aliases.unique_values)
|
48 |
+
astype = _aliases.astype
|
49 |
+
std = get_xp(np)(_aliases.std)
|
50 |
+
var = get_xp(np)(_aliases.var)
|
51 |
+
permute_dims = get_xp(np)(_aliases.permute_dims)
|
52 |
+
reshape = get_xp(np)(_aliases.reshape)
|
53 |
+
argsort = get_xp(np)(_aliases.argsort)
|
54 |
+
sort = get_xp(np)(_aliases.sort)
|
55 |
+
nonzero = get_xp(np)(_aliases.nonzero)
|
56 |
+
sum = get_xp(np)(_aliases.sum)
|
57 |
+
prod = get_xp(np)(_aliases.prod)
|
58 |
+
ceil = get_xp(np)(_aliases.ceil)
|
59 |
+
floor = get_xp(np)(_aliases.floor)
|
60 |
+
trunc = get_xp(np)(_aliases.trunc)
|
61 |
+
matmul = get_xp(np)(_aliases.matmul)
|
62 |
+
matrix_transpose = get_xp(np)(_aliases.matrix_transpose)
|
63 |
+
tensordot = get_xp(np)(_aliases.tensordot)
|
64 |
+
|
65 |
+
# These functions are completely new here. If the library already has them
|
66 |
+
# (i.e., numpy 2.0), use the library version instead of our wrapper.
|
67 |
+
if hasattr(np, 'vecdot'):
|
68 |
+
vecdot = np.vecdot
|
69 |
+
else:
|
70 |
+
vecdot = get_xp(np)(_aliases.vecdot)
|
71 |
+
if hasattr(np, 'isdtype'):
|
72 |
+
isdtype = np.isdtype
|
73 |
+
else:
|
74 |
+
isdtype = get_xp(np)(_aliases.isdtype)
|
75 |
+
|
76 |
+
__all__ = _aliases.__all__ + ['asarray', 'asarray_numpy', 'bool', 'acos',
|
77 |
+
'acosh', 'asin', 'asinh', 'atan', 'atan2',
|
78 |
+
'atanh', 'bitwise_left_shift', 'bitwise_invert',
|
79 |
+
'bitwise_right_shift', 'concat', 'pow']
|
80 |
+
|
81 |
+
_all_ignore = ['np', 'get_xp']
|
emissary-ml/llm-scripts/fine-tuning/llama3/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/_typing.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
__all__ = [
|
4 |
+
"ndarray",
|
5 |
+
"Device",
|
6 |
+
"Dtype",
|
7 |
+
]
|
8 |
+
|
9 |
+
import sys
|
10 |
+
from typing import (
|
11 |
+
Literal,
|
12 |
+
Union,
|
13 |
+
TYPE_CHECKING,
|
14 |
+
)
|
15 |
+
|
16 |
+
from numpy import (
|
17 |
+
ndarray,
|
18 |
+
dtype,
|
19 |
+
int8,
|
20 |
+
int16,
|
21 |
+
int32,
|
22 |
+
int64,
|
23 |
+
uint8,
|
24 |
+
uint16,
|
25 |
+
uint32,
|
26 |
+
uint64,
|
27 |
+
float32,
|
28 |
+
float64,
|
29 |
+
)
|
30 |
+
|
31 |
+
Device = Literal["cpu"]
|
32 |
+
if TYPE_CHECKING or sys.version_info >= (3, 9):
|
33 |
+
Dtype = dtype[Union[
|
34 |
+
int8,
|
35 |
+
int16,
|
36 |
+
int32,
|
37 |
+
int64,
|
38 |
+
uint8,
|
39 |
+
uint16,
|
40 |
+
uint32,
|
41 |
+
uint64,
|
42 |
+
float32,
|
43 |
+
float64,
|
44 |
+
]]
|
45 |
+
else:
|
46 |
+
Dtype = dtype
|